[yt-svn] commit/yt: 9 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Sep 2 15:22:04 PDT 2014


9 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/fdfdb5476aad/
Changeset:   fdfdb5476aad
Branch:      yt
User:        drudd
Date:        2014-08-10 23:19:41
Summary:     Replaced time conversion in ARTIO with new c functions that match cart to machine precision
Affected #:  5 files

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r fdfdb5476aadcfaceaa6f51e35ff143606124191 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -25,6 +25,33 @@
     cdef extern from "alloca.h":
         void *alloca(int)
 
+cdef extern from "cosmology.h":
+    ctypedef struct CosmologyParameters "CosmologyParameters" :
+        pass
+    CosmologyParameters *cosmology_allocate()
+    void cosmology_free(CosmologyParameters *c)
+    void cosmology_set_fixed(CosmologyParameters *c)
+
+    void cosmology_set_OmegaM(CosmologyParameters *c, double value)
+    void cosmology_set_OmegaB(CosmologyParameters *c, double value)
+    void cosmology_set_OmegaL(CosmologyParameters *c, double value)
+    void cosmology_set_h(CosmologyParameters *c, double value)
+    void cosmology_set_DeltaDC(CosmologyParameters *c, double value)
+
+    double abox_from_auni(CosmologyParameters *c, double a) nogil
+    double tcode_from_auni(CosmologyParameters *c, double a) nogil
+    double tphys_from_auni(CosmologyParameters *c, double a) nogil
+
+    double auni_from_abox(CosmologyParameters *c, double v) nogil
+    double auni_from_tcode(CosmologyParameters *c, double v) nogil
+    double auni_from_tphys(CosmologyParameters *c, double v) nogil
+
+    double abox_from_tcode(CosmologyParameters *c, double tcode) nogil
+    double tcode_from_abox(CosmologyParameters *c, double abox) nogil
+
+    double tphys_from_abox(CosmologyParameters *c, double abox) nogil
+    double tphys_from_tcode(CosmologyParameters *c, double tcode) nogil
+
 cdef extern from "artio.h":
     ctypedef struct artio_fileset_handle "artio_fileset" :
         pass
@@ -138,6 +165,9 @@
     cdef public object parameters 
     cdef artio_fileset_handle *handle
 
+    # cosmology parameter for time unit conversion
+    cdef CosmologyParameters *cosmology
+
     # common attributes
     cdef public int num_grid
     cdef int64_t num_root_cells
@@ -178,6 +208,15 @@
         self.sfc_min = 0
         self.sfc_max = self.num_root_cells-1
 
+        # initialize cosmology module
+        self.cosmology = cosmology_allocate()
+        cosmology_set_OmegaM(self.cosmology, self.parameters['OmegaM'][0])
+        cosmology_set_OmegaL(self.cosmology, self.parameters['OmegaL'][0])
+        cosmology_set_OmegaB(self.cosmology, self.parameters['OmegaB'][0])
+        cosmology_set_h(self.cosmology, self.parameters['hubble'][0])
+        cosmology_set_DeltaDC(self.cosmology, self.parameters['DeltaDC'][0])
+        cosmology_set_fixed(self.cosmology)
+
         # grid detection
         self.min_level = 0
         self.max_level = self.parameters['grid_max_level'][0]
@@ -239,6 +278,8 @@
         if self.primary_variables : free(self.primary_variables)
         if self.secondary_variables : free(self.secondary_variables)
 
+        if self.cosmology : cosmology_free(self.cosmology)
+
         if self.handle : artio_fileset_close(self.handle)
   
     def read_parameters(self) :
@@ -288,6 +329,60 @@
 
             self.parameters[key] = parameter
 
+    def abox_from_auni(self, np.float64_t a):
+        return abox_from_auni(self.cosmology, a)
+
+    def tcode_from_auni(self, np.float64_t a):
+        return tcode_from_auni(self.cosmology, a)
+
+    def tphys_from_auni(self, np.float64_t a):
+        return tphys_from_auni(self.cosmology, a)
+
+    def auni_from_abox(self, np.float64_t v):
+        return auni_from_abox(self.cosmology, v)
+
+    def auni_from_tcode(self, np.float64_t v):
+        return auni_from_tcode(self.cosmology, v)
+
+    @cython.wraparound(False)
+    @cython.boundscheck(False)
+    def auni_from_tcode_array(self, np.ndarray[np.float64_t] tcode):
+        cdef int i, nauni
+        cdef np.ndarray[np.float64_t, ndim=1] auni
+        auni = np.empty_like(tcode)
+        nauni = auni.shape[0]
+        with nogil:
+            for i in range(nauni):
+                auni[i] = auni_from_tcode(self.cosmology, tcode[i])
+        return auni
+
+    def auni_from_tphys(self, np.float64_t v):
+        return auni_from_tphys(self.cosmology, v)
+
+    def abox_from_tcode(self, np.float64_t abox):
+        return abox_from_tcode(self.cosmology, abox)
+
+    def tcode_from_abox(self, np.float64_t abox):
+        return tcode_from_abox(self.cosmology, abox)
+
+    def tphys_from_abox(self, np.float64_t abox):
+        return tphys_from_abox(self.cosmology, abox)
+
+    def tphys_from_tcode(self, np.float64_t tcode):
+        return tphys_from_tcode(self.cosmology, tcode)
+
+    @cython.wraparound(False)
+    @cython.boundscheck(False)
+    def tphys_from_tcode_array(self, np.ndarray[np.float64_t] tcode):
+        cdef int i, ntphys
+        cdef np.ndarray[np.float64_t, ndim=1] tphys
+        tphys = np.empty_like(tcode)
+        ntphys = tcode.shape[0]
+        with nogil:
+            for i in range(ntphys):
+                tphys[i] = tphys_from_tcode(self.cosmology, tcode[i])
+        return tphys
+
 #    @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r fdfdb5476aadcfaceaa6f51e35ff143606124191 yt/frontends/artio/artio_headers/cosmology.c
--- /dev/null
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -0,0 +1,524 @@
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+
+#ifndef ERROR
+#include <stdio.h>
+#define ERROR(msg) { fprintf(stderr,"%s\n",msg); exit(1); }
+#endif
+
+#ifndef ASSERT
+#include <stdio.h>
+#define ASSERT(exp) { if(!(exp)) { fprintf(stderr,"Failed assertion %s, line: %d\n",#exp,__LINE__); exit(1); } }
+#endif
+
+#ifndef HEAPALLOC
+#include <stdlib.h>
+#define HEAPALLOC(type,size)	(type *)malloc((size)*sizeof(type))
+#endif
+
+#ifndef NEWARR
+#include <stdlib.h>
+#define NEWARR(size)   HEAPALLOC(double,size)
+#endif
+
+#ifndef DELETE
+#include <stdlib.h>
+#define DELETE(ptr)    free(ptr)
+#endif
+
+
+#include "cosmology.h"
+
+
+struct CosmologyParametersStruct
+{
+  int set;
+  int ndex;
+  int size;
+  double *la;
+  double *aUni;
+  double *aBox;
+  double *tCode;
+  double *tPhys;
+  double *dPlus;
+  double *qPlus;
+  double aLow;
+  double tCodeOffset;
+
+  double OmegaM;
+  double OmegaD;
+  double OmegaB;
+  double OmegaL;
+  double OmegaK;
+  double OmegaR;
+  double h;
+  double DeltaDC;
+  int flat;
+  double Omh2;
+  double Obh2;
+};
+
+void cosmology_clear_table(CosmologyParameters *c);
+void cosmology_fill_table(CosmologyParameters *c, double amin, double amax);
+void cosmology_fill_table_abox(CosmologyParameters *c, int istart, int n);
+
+CosmologyParameters *cosmology_allocate() {
+	CosmologyParameters *c = HEAPALLOC(CosmologyParameters,1);
+	if ( c != NULL ) {
+		memset(c, 0, sizeof(CosmologyParameters));
+
+		c->ndex = 200;
+		c->aLow = 1.0e-2;
+	}
+	return c;
+}
+
+void cosmology_free(CosmologyParameters *c) {
+	cosmology_clear_table(c);
+	DELETE(c);
+}
+
+int cosmology_is_set(CosmologyParameters *c)
+{
+  return (c->OmegaM>0.0 && c->OmegaB>0.0 && c->h>0.0);
+}
+
+
+void cosmology_fail_on_reset(const char *name, double old_value, double new_value)
+{
+  char str[150];
+  sprintf(str,"Trying to change %s from %lg to %lg...\nCosmology has been fixed and cannot be changed.\n",name,old_value,new_value);
+  ERROR(str);
+}
+
+
+void cosmology_set_OmegaM(CosmologyParameters *c, double v)
+{
+  if(v < 1.0e-3) v = 1.0e-3;
+  if(fabs(c->OmegaM-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaM",c->OmegaM,v);
+      c->OmegaM = v;
+      c->flat = (fabs(c->OmegaM+c->OmegaL-1.0) > 1.0e-5) ? 0 : 1;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_OmegaL(CosmologyParameters *c, double v)
+{
+  if(fabs(c->OmegaL-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaL",c->OmegaL,v);
+      c->OmegaL = v;
+      c->flat = (fabs(c->OmegaM+c->OmegaL-1.0) > 1.0e-5) ? 0 : 1;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_OmegaB(CosmologyParameters *c, double v)
+{
+  if(v < 0.0) v = 0.0;
+  if(fabs(c->OmegaB-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaB",c->OmegaB,v);
+      c->OmegaB = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_h(CosmologyParameters *c, double v)
+{
+  if(fabs(c->h-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("h",c->h,v);
+      c->h = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_DeltaDC(CosmologyParameters *c, double v)
+{
+  if(fabs(c->DeltaDC-v) > 1.0e-3)
+    {
+      if(c->set) cosmology_fail_on_reset("DeltaDC",c->DeltaDC,v);
+      c->DeltaDC = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_init(CosmologyParameters *c)
+{
+  if(c->size == 0) /* reset only if the state is dirty */
+    {
+      if(!cosmology_is_set(c)) ERROR("Not all of the required cosmological parameters have been set; the minimum required set is (OmegaM,OmegaB,h).");
+
+      if(c->OmegaB > c->OmegaM) c->OmegaB = c->OmegaM;
+      c->OmegaD = c->OmegaM - c->OmegaB;
+      if(c->flat)
+	{
+	  c->OmegaK = 0.0;
+	  c->OmegaL = 1.0 - c->OmegaM;
+	}
+      else
+	{
+	  c->OmegaK = 1.0 - (c->OmegaM+c->OmegaL);
+	}
+      c->OmegaR = 4.166e-5/(c->h*c->h);
+
+      c->Omh2 = c->OmegaM*c->h*c->h;
+      c->Obh2 = c->OmegaB*c->h*c->h;
+
+      cosmology_fill_table(c,c->aLow,1.0);
+
+      c->tCodeOffset = 0.0;  /*  Do need to set it to zero first */
+#ifndef NATIVE_TCODE_NORMALIZATION
+      c->tCodeOffset = 0.0 - tCode(c,inv_aBox(c,1.0));
+#endif
+    }      
+}
+
+
+void cosmology_set_fixed(CosmologyParameters *c)
+{
+  cosmology_init(c);
+  c->set = 1;
+}
+
+
+double cosmology_mu(CosmologyParameters *c, double a)
+{
+  return sqrt(((a*a*c->OmegaL+c->OmegaK)*a+c->OmegaM)*a+c->OmegaR);
+}
+
+
+double cosmology_dc_factor(CosmologyParameters *c, double dPlus)
+{
+  double dc = 1.0 + dPlus*c->DeltaDC;
+  return 1.0/pow((dc>0.001)?dc:0.001,1.0/3.0);
+}
+
+
+void cosmology_fill_table_integrate(CosmologyParameters *c, double a, double y[], double f[])
+{
+  double mu = cosmology_mu(c, a);
+  double abox = a*cosmology_dc_factor(c, y[2]);
+  
+  f[0] = a/(abox*abox*mu);
+  f[1] = a/mu;
+  f[2] = y[3]/(a*mu);
+  f[3] = 1.5*c->OmegaM*y[2]/mu;
+}
+
+
+void cosmology_fill_table_piece(CosmologyParameters *c, int istart, int n)
+{
+  int i, j;
+  double tPhysUnit = (3.0856775813e17/(365.25*86400))/c->h;  /* 1/H0 in Julian years */
+
+  double x, aeq = c->OmegaR/c->OmegaM;
+  double tCodeFac = 1.0/sqrt(aeq);
+  double tPhysFac = tPhysUnit*aeq*sqrt(aeq)/sqrt(c->OmegaM);
+
+  double da, a0, y0[4], y1[4];
+  double f1[4], f2[4], f3[4], f4[4];
+
+  for(i=istart; i<n; i++)
+    {
+      c->aUni[i] = pow(10.0,c->la[i]);
+    }
+
+  /*
+  //  Small a regime, use analytical formulae for matter + radiation model
+  */  
+  for(i=istart; c->aUni[i]<(c->aLow+1.0e-9) && i<n; i++)
+    {
+      x = c->aUni[i]/aeq;
+
+      c->tPhys[i] = tPhysFac*2*x*x*(2+sqrt(x+1))/(3*pow(1+sqrt(x+1),2.0));
+      c->dPlus[i] = aeq*(x + 2.0/3.0 + (6*sqrt(1+x)+(2+3*x)*log(x)-2*(2+3*x)*log(1+sqrt(1+x)))/(log(64.0)-9));  /* long last term is the decaying mode generated after euality; it is very small for x > 10, I keep ot just for completeness; */
+      c->qPlus[i] = c->aUni[i]*cosmology_mu(c,c->aUni[i])*(1 + ((2+6*x)/(x*sqrt(1+x))+3*log(x)-6*log(1+sqrt(1+x)))/(log(64)-9)); /* this is a^2*dDPlus/dt/H0 */
+
+      c->aBox[i] = c->aUni[i]*cosmology_dc_factor(c,c->dPlus[i]);
+      c->tCode[i] = 1.0 - tCodeFac*asinh(sqrt(aeq/c->aBox[i]));
+    }
+  
+  /*
+  //  Large a regime, solve ODEs
+  */
+  ASSERT(i > 0);
+
+  tCodeFac = 0.5*sqrt(c->OmegaM);
+  tPhysFac = tPhysUnit;
+
+  y1[0] = c->tCode[i-1]/tCodeFac;
+  y1[1] = c->tPhys[i-1]/tPhysFac;
+  y1[2] = c->dPlus[i-1];
+  y1[3] = c->qPlus[i-1];
+
+  for(; i<n; i++)
+    {
+      a0 = c->aUni[i-1];
+      da = c->aUni[i] - a0;
+
+      /*  RK4 integration */
+      for(j=0; j<4; j++) y0[j] = y1[j];
+      cosmology_fill_table_integrate(c, a0,y1,f1);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + 0.5*da*f1[j];
+      cosmology_fill_table_integrate(c, a0+0.5*da,y1,f2);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + 0.5*da*f2[j];
+      cosmology_fill_table_integrate(c, a0+0.5*da,y1,f3);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + da*f3[j];
+      cosmology_fill_table_integrate(c, a0+da,y1,f4);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + da*(f1[j]+2*f2[j]+2*f3[j]+f4[j])/6.0;
+
+      c->tCode[i] = tCodeFac*y1[0];
+      c->tPhys[i] = tPhysFac*y1[1];
+      c->dPlus[i] = y1[2];
+      c->qPlus[i] = y1[3];
+
+      c->aBox[i] = c->aUni[i]*cosmology_dc_factor(c,c->dPlus[i]);
+    }
+} 
+
+
+void cosmology_fill_table(CosmologyParameters *c, double amin, double amax)
+{
+  int i, imin, imax, iold;
+  double dla = 1.0/c->ndex;
+  double lamin, lamax;
+  double *old_la = c->la;
+  double *old_aUni = c->aUni;
+  double *old_aBox = c->aBox;
+  double *old_tCode = c->tCode;
+  double *old_tPhys = c->tPhys;
+  double *old_dPlus = c->dPlus;
+  double *old_qPlus = c->qPlus;
+  int old_size = c->size;
+
+  if(amin > c->aLow) amin = c->aLow;
+  lamin = dla*floor(c->ndex*log10(amin));
+  lamax = dla*ceil(c->ndex*log10(amax));
+
+  c->size = 1 + (int)(0.5+c->ndex*(lamax-lamin)); 
+  ASSERT(fabs(lamax-lamin-dla*(c->size-1)) < 1.0e-14);
+
+  c->la = NEWARR(c->size);     ASSERT(c->la != NULL);
+  c->aUni = NEWARR(c->size);   ASSERT(c->aUni != NULL);
+  c->aBox = NEWARR(c->size);   ASSERT(c->aBox != NULL);
+  c->tCode = NEWARR(c->size);  ASSERT(c->tCode != NULL);
+  c->tPhys = NEWARR(c->size);  ASSERT(c->tPhys != NULL);
+  c->dPlus = NEWARR(c->size);  ASSERT(c->dPlus != NULL);
+  c->qPlus = NEWARR(c->size);  ASSERT(c->qPlus != NULL);
+
+  /*
+  //  New log10(aUni) table
+  */
+  for(i=0; i<c->size; i++)
+    {
+      c->la[i] = lamin + dla*i;
+    }
+
+  if(old_size == 0)
+    {
+      /*
+      //  Filling the table for the first time
+      */
+      cosmology_fill_table_piece(c,0,c->size);
+    }
+  else
+    {
+      /*
+      //  Find if we need to expand the lower end
+      */
+      if(lamin < old_la[0])
+	{
+	  imin = (int)(0.5+c->ndex*(old_la[0]-lamin));
+	  ASSERT(fabs(old_la[0]-lamin-dla*imin) < 1.0e-14);
+	}
+      else imin = 0;
+
+      /*
+      //  Find if we need to expand the upper end
+      */
+      if(lamax > old_la[old_size-1])
+	{
+	  imax = (int)(0.5+c->ndex*(old_la[old_size-1]-lamin));
+	  ASSERT(fabs(old_la[old_size-1]-lamin-dla*imax) < 1.0e-14);
+	}
+      else imax = c->size - 1;
+  
+      /*
+      //  Re-use the rest
+      */
+      if(lamin > old_la[0])
+	{
+	  iold = (int)(0.5+c->ndex*(lamin-old_la[0]));
+	  ASSERT(fabs(lamin-old_la[0]-dla*iold) < 1.0e-14);
+	}
+      else iold = 0;
+
+      memcpy(c->aUni+imin,old_aUni+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->aBox+imin,old_aBox+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->tCode+imin,old_tCode+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->tPhys+imin,old_tPhys+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->dPlus+imin,old_dPlus+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->qPlus+imin,old_qPlus+iold,sizeof(double)*(imax-imin+1));
+
+      DELETE(old_la);
+      DELETE(old_aUni);
+      DELETE(old_aBox);
+      DELETE(old_tCode);
+      DELETE(old_tPhys);
+      DELETE(old_dPlus);
+      DELETE(old_qPlus);
+
+      /*
+      //  Fill in additional pieces
+      */
+      if(imin > 0) cosmology_fill_table_piece(c,0,imin);
+      if(imax < c->size-1) cosmology_fill_table_piece(c,imax,c->size);
+    }
+}
+
+
+void cosmology_clear_table(CosmologyParameters *c)
+{
+  if(c->size > 0)
+    {
+      DELETE(c->la);
+      DELETE(c->aUni);
+      DELETE(c->aBox);
+      DELETE(c->tCode);
+      DELETE(c->tPhys);
+      DELETE(c->dPlus);
+      DELETE(c->qPlus);
+
+      c->size = 0;
+      c->la = NULL;
+      c->aUni = NULL;
+      c->aBox = NULL;
+      c->tCode = NULL;
+      c->tPhys = NULL;
+      c->dPlus = NULL;
+      c->qPlus = NULL;
+    }
+}
+
+
+void cosmology_check_range(CosmologyParameters *c, double a)
+{
+  ASSERT((a > 1.0e-9) && (a < 1.0e9));
+
+  if(c->size == 0) cosmology_init(c);
+
+  if(a < c->aUni[0])
+    {
+      cosmology_fill_table(c,a,c->aUni[c->size-1]);
+    }
+
+  if(a > c->aUni[c->size-1])
+    {
+      cosmology_fill_table(c,c->aUni[0],a);
+    }
+}
+
+
+void cosmology_set_thread_safe_range(CosmologyParameters *c, double amin, double amax)
+{
+  cosmology_check_range(c, amin);
+  cosmology_check_range(c, amax);
+}
+
+
+double cosmology_get_value_from_table(CosmologyParameters *c, double a, double table[])
+{
+  int idx = (int)(c->ndex*(log10(a)-c->la[0]));
+
+  ASSERT(idx>=0 && idx<c->size);
+
+  /*
+  //  Do it as a function of aUni rather than la to ensure exact inversion
+  */
+  return table[idx] + (table[idx+1]-table[idx])/(c->aUni[idx+1]-c->aUni[idx])*(a-c->aUni[idx]);
+}
+
+
+int cosmology_find_index(CosmologyParameters *c, double v, double table[])
+{
+  int ic, il = 0;
+  int ih = c->size - 1;
+
+  if(v < table[0])
+    {
+      return -1;
+    }
+  if(v > table[c->size-1])
+    {
+      return c->size + 1;
+    }
+
+  while((ih-il) > 1)
+    {
+      ic = (il+ih)/2;
+      if(v > table[ic]) /* special, not fully optimal form to avoid checking that il < c->size-1 */
+	il = ic;
+      else
+	ih = ic;
+    }
+
+  ASSERT(il+1 < c->size);
+
+  return il;
+}
+
+
+/*
+//  Direct and inverse functions
+*/
+#define DEFINE_FUN(name,offset)			\
+double name(CosmologyParameters *c, double a) \
+{ \
+  cosmology_check_range(c,a); \
+  return cosmology_get_value_from_table(c,a,c->name) + offset; \
+} \
+double inv_##name(CosmologyParameters *c, double v) \
+{ \
+  int idx; \
+  double *table; \
+  if(c->size == 0) cosmology_init(c); \
+  v -= offset; \
+  table = c->name; \
+  idx = cosmology_find_index(c,v,table); \
+  while(idx < 0) \
+    { \
+      cosmology_check_range(c,0.5*c->aUni[0]); \
+      table = c->name; \
+      idx = cosmology_find_index(c,v,table); \
+    } \
+  while(idx > c->size) \
+    { \
+      cosmology_check_range(c,2.0*c->aUni[c->size-1]); \
+      table = c->name; \
+      idx = cosmology_find_index(c,v,table); \
+    } \
+  return c->aUni[idx] + (c->aUni[idx+1]-c->aUni[idx])/(table[idx+1]-table[idx])*(v-table[idx]); \
+}
+
+DEFINE_FUN(aBox,0.0);
+DEFINE_FUN(tCode,c->tCodeOffset);
+DEFINE_FUN(tPhys,0.0);
+DEFINE_FUN(dPlus,0.0);
+DEFINE_FUN(qPlus,0.0);
+
+#undef DEFINE_FUN

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r fdfdb5476aadcfaceaa6f51e35ff143606124191 yt/frontends/artio/artio_headers/cosmology.h
--- /dev/null
+++ b/yt/frontends/artio/artio_headers/cosmology.h
@@ -0,0 +1,101 @@
+#ifndef __COSMOLOGY_H__
+#define __COSMOLOGY_H__
+
+typedef struct CosmologyParametersStruct CosmologyParameters;
+
+#define COSMOLOGY_DECLARE_PRIMARY_PARAMETER(name) \
+void cosmology_set_##name(CosmologyParameters *c, double value)
+
+#define cosmology_set(c,name,value)	\
+cosmology_set_##name(c,value)
+
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaM);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaB);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaL);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(h);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(DeltaDC);
+
+#undef COSMOLOGY_DECLARE_PRIMARY_PARAMETER
+
+CosmologyParameters *cosmology_allocate();
+void cosmology_free(CosmologyParameters *c);
+
+/*
+//  Check that all required cosmological parameters have been set.
+//  The minimum set is OmegaM, OmegaB, and h. By default, zero OmegaL,
+//  OmegaK, and the DC mode are assumed.
+*/
+int cosmology_is_set(CosmologyParameters *c);
+
+
+/*
+//  Freeze the cosmology and forbid any further changes to it.
+//  In codes that include user-customizable segments (like plugins),
+//  this function van be used for insuring that a user does not
+//  change the cosmology in mid-run.
+*/
+void cosmology_set_fixed(CosmologyParameters *c);
+
+
+/*
+//  Manual initialization. This does not need to be called, 
+//  the initialization is done automatically on the first call
+//  to a relevant function.
+*/
+void cosmology_init(CosmologyParameters *c);
+
+
+/*
+//  Set the range of global scale factors for thread-safe
+//  calls to direct functions until the argument leaves the range.
+*/
+void cosmology_set_thread_safe_range(CosmologyParameters *c, double amin, double amax);
+
+/*
+//  Direct functions take the global cosmological scale factor as the argument.
+//  These functionsare are thread-safe if called with the argument in the
+//  range set by a prior call to cosmology_set_thread_safe_range(...).
+//  Calling them with the argument outside that range is ok, but breaks
+//  thread-safety assurance.
+*/
+
+#define DEFINE_FUN(name)         \
+double name(CosmologyParameters *c, double a); \
+double inv_##name(CosmologyParameters *c, double v);
+
+DEFINE_FUN(aBox);
+DEFINE_FUN(tCode);
+DEFINE_FUN(tPhys);
+DEFINE_FUN(dPlus);
+DEFINE_FUN(qPlus); /* Q+ = a^2 dD+/(H0 dt) */
+
+#undef DEFINE_FUN
+
+/*
+//  Conversion macros
+*/
+#define  abox_from_auni(c,a)   aBox(c,a)
+#define tcode_from_auni(c,a)  tCode(c,a)
+#define tphys_from_auni(c,a)  tPhys(c,a)
+#define dplus_from_auni(c,a)  dPlus(c,a)
+
+#define auni_from_abox(c,v)   inv_aBox(c,v)
+#define auni_from_tcode(c,v)  inv_tCode(c,v)
+#define auni_from_tphys(c,v)  inv_tPhys(c,v)
+#define auni_from_dplus(c,v)  inv_dPlus(c,v)
+
+#define abox_from_tcode(c,tcode)   aBox(c,inv_tCode(c,tcode))
+#define tcode_from_abox(c,abox)    tCode(c,inv_aBox(c,abox))
+
+#define tphys_from_abox(c,abox)    tPhys(c,inv_aBox(c,abox))
+#define tphys_from_tcode(c,tcode)  tPhys(c,inv_tCode(c,tcode))
+#define dplus_from_tcode(c,tcode)  dPlus(c,inv_tCode(c,tcode))
+
+/*
+//  Hubble parameter in km/s/Mpc; defined as macro so that it can be
+//  undefined if needed to avoid the name clash.
+*/
+double cosmology_mu(CosmologyParameters *c, double a);
+#define Hubble(c,a) (100*c->h*cosmology_mu(c,a)/(a*a))
+
+#endif /* __COSMOLOGY_H__ */

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r fdfdb5476aadcfaceaa6f51e35ff143606124191 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -26,8 +26,7 @@
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from .fields import \
-    ARTIOFieldInfo, \
-    b2t
+    ARTIOFieldInfo
 from yt.fields.particle_fields import \
     standard_particle_fields
 
@@ -387,37 +386,40 @@
             self.particle_types = ()
         self.particle_types_raw = self.particle_types
 
-        self.current_time = b2t(self.artio_parameters["tl"][0])
+        self.current_time = self.quan(self._handle.tphys_from_tcode(self.artio_parameters["tl"][0]),"yr")
 
         # detect cosmology
         if "abox" in self.artio_parameters:
+            self.cosmological_simulation = True
+
             abox = self.artio_parameters["abox"][0]
-            self.cosmological_simulation = True
             self.omega_lambda = self.artio_parameters["OmegaL"][0]
             self.omega_matter = self.artio_parameters["OmegaM"][0]
             self.hubble_constant = self.artio_parameters["hubble"][0]
-            self.current_redshift = 1.0/self.artio_parameters["abox"][0] - 1.0
+            self.current_redshift = 1.0/self.artio_parameters["auni"][0] - 1.0
+            self.current_redshift_box = 1.0/abox - 1.0
 
             self.parameters["initial_redshift"] =\
                 1.0 / self.artio_parameters["auni_init"][0] - 1.0
             self.parameters["CosmologyInitialRedshift"] =\
                 self.parameters["initial_redshift"]
-        else:
-            self.cosmological_simulation = False
 
-        #units
-        if self.cosmological_simulation:
             self.parameters['unit_m'] = self.artio_parameters["mass_unit"][0]
             self.parameters['unit_t'] =\
                 self.artio_parameters["time_unit"][0] * abox**2
             self.parameters['unit_l'] =\
                 self.artio_parameters["length_unit"][0] * abox
+
+            if self.artio_parameters["DeltaDC"][0] != 0:
+                mylog.warn("DeltaDC != 0, which implies auni != abox.  Be sure you understand which expansion parameter is appropriate for your use! (Gnedin, Kravtsov, & Rudd 2011)")
         else:
+            self.cosmological_simulation = False
+
             self.parameters['unit_l'] = self.artio_parameters["length_unit"][0]
             self.parameters['unit_t'] = self.artio_parameters["time_unit"][0]
             self.parameters['unit_m'] = self.artio_parameters["mass_unit"][0]
 
-        # hard coded assumption of 3D periodicity (add to dataset)
+        # hard coded assumption of 3D periodicity
         self.periodicity = (True, True, True)
 
     @classmethod

diff -r 77bbb0c5a38e27dd1855f23710a631765ca31f05 -r fdfdb5476aadcfaceaa6f51e35ff143606124191 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -19,6 +19,8 @@
 from yt.funcs import mylog
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from yt.fields.field_detector import \
+    FieldDetector
 from yt.units.yt_array import \
     YTArray
 
@@ -62,7 +64,8 @@
         ("MASS", ("code_mass", ["particle_mass"], None)),
         ("PID", ("", ["particle_index"], None)),
         ("SPECIES", ("", ["particle_type"], None)),
-        ("BIRTH_TIME", ("code_time", ["creation_time"], None)),
+        ("BIRTH_TIME", ("", [], None)),  # code-units defined as dimensionless to 
+                                         # avoid incorrect conversion 
         ("INITIAL_MASS", ("code_mass", ["initial_mass"], None)),
         ("METALLICITY_SNIa", ("", ["metallicity_snia"], None)),
         ("METALLICITY_SNII", ("", ["metallicity_snii"], None)),
@@ -111,123 +114,31 @@
                        take_log=True)
 
     def setup_particle_fields(self, ptype):
+        def _creation_time(field,data):
+            if isinstance(data,FieldDetector):
+                return data["BIRTH_TIME"]
+            if any(data["BIRTH_TIME"] > 0.0):
+                raise ValueError("Invalid value for BIRTH_TIME found in ARTIO frontend")
+            return YTArray(data.ds._handle.tphys_from_tcode_array(data["BIRTH_TIME"]),"yr")
+        
 
-        def _particle_age(field, data):
-            return b2t(data[ptype,"creation_time"])
-        self.add_field((ptype, "particle_age"), function=_particle_age, units="s",
-                  particle_type=True)
+        def _creation_redshift(field,data):
+            if isinstance(data,FieldDetector):
+                return data["BIRTH_TIME"]
+            if any(data["BIRTH_TIME"] > 0.0):
+                raise ValueError("Invalid value for BIRTH_TIME found in ARTIO frontend")
+            return 1.0/data.ds._handle.auni_from_tcode_array(data["BIRTH_TIME"]) - 1.0
+
+        def _age(field, data):
+            if isinstance(data,FieldDetector):
+                return data["BIRTH_TIME"]
+            return data.ds.current_time - data["creation_time"]
+
+        self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",
+                    particle_type=True)
+        self.add_field((ptype, "creation_redshift"), function=_creation_redshift,
+                    particle_type=True)
+        self.add_field((ptype, "age"), function=_age, units="yr",
+                    particle_type=True)
 
         super(ARTIOFieldInfo, self).setup_particle_fields(ptype)
-
-#stolen from frontends/art/
-#All of these functions are to convert from hydro time var to
-#proper time
-sqrt = np.sqrt
-sign = np.sign
-
-
-def find_root(f, a, b, tol=1e-6):
-    c = (a+b)/2.0
-    last = -np.inf
-    assert(sign(f(a)) != sign(f(b)))
-    while np.abs(f(c)-last) > tol:
-        last = f(c)
-        if sign(last) == sign(f(b)):
-            b = c
-        else:
-            a = c
-        c = (a+b)/2.0
-    return c
-
-
-def quad(fintegrand, xmin, xmax, n=1e4):
-    spacings = np.logspace(np.log10(xmin), np.log10(xmax), n)
-    integrand_arr = fintegrand(spacings)
-    val = np.trapz(integrand_arr, dx=np.diff(spacings))
-    return val
-
-
-def a2b(at, Om0=0.27, Oml0=0.73, h=0.700):
-    def f_a2b(x):
-        val = 0.5*sqrt(Om0) / x**3.0
-        val /= sqrt(Om0/x**3.0 + Oml0 + (1.0-Om0-Oml0)/x**2.0)
-        return val
-    #val, err = si.quad(f_a2b,1,at)
-    val = quad(f_a2b, 1, at)
-    return val
-
-
-def b2a(bt, **kwargs):
-    #converts code time into expansion factor
-    #if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
-    #if bt < -190.0 or bt > -.10:  raise 'bt outside of range'
-    f_b2a = lambda at: a2b(at, **kwargs)-bt
-    return find_root(f_b2a, 1e-4, 1.1)
-    #return so.brenth(f_b2a,1e-4,1.1)
-    #return brent.brent(f_b2a)
-
-
-def a2t(at, Om0=0.27, Oml0=0.73, h=0.700):
-    integrand = lambda x: 1./(x*sqrt(Oml0+Om0*x**-3.0))
-    #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
-    current_time = quad(integrand, 1e-4, at)
-    #spacings = np.logspace(-5,np.log10(at),1e5)
-    #integrand_arr = integrand(spacings)
-    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-    current_time *= 9.779/h
-    return current_time
-
-
-def b2t(tb, n=1e2, logger=None, **kwargs):
-    tb = np.array(tb)
-    if len(np.atleast_1d(tb)) == 1: 
-        return a2t(b2a(tb))
-    if tb.shape == ():
-        return None 
-    if len(tb) < n:
-        n = len(tb)
-    age_min = a2t(b2a(tb.max(), **kwargs), **kwargs)
-    age_max = a2t(b2a(tb.min(), **kwargs), **kwargs)
-    tbs = -1.*np.logspace(np.log10(-tb.min()),
-                          np.log10(-tb.max()), n)
-    ages = []
-    for i, tbi in enumerate(tbs):
-        ages += a2t(b2a(tbi)),
-        if logger:
-            logger(i)
-    ages = np.array(ages)
-    fb2t = np.interp(tb, tbs, ages)
-    #fb2t = interp1d(tbs,ages)
-    return fb2t*1e9*31556926
-
-
-def spread_ages(ages, logger=None, spread=.0e7*365*24*3600):
-    #stars are formed in lumps; spread out the ages linearly
-    da = np.diff(ages)
-    assert np.all(da <= 0)
-    #ages should always be decreasing, and ordered so
-    agesd = np.zeros(ages.shape)
-    idx, = np.where(da < 0)
-    idx += 1
-    #mark the right edges
-    #spread this age evenly out to the next age
-    lidx = 0
-    lage = 0
-    for i in idx:
-        n = i-lidx
-        #n stars affected
-        rage = ages[i]
-        lage = max(rage-spread, 0.0)
-        agesd[lidx:i] = np.linspace(lage, rage, n)
-        lidx = i
-        #lage=rage
-        if logger:
-            logger(i)
-    #we didn't get the last iter
-    i = ages.shape[0]-1
-    n = i-lidx
-    #n stars affected
-    rage = ages[i]
-    lage = max(rage-spread, 0.0)
-    agesd[lidx:i] = np.linspace(lage, rage, n)
-    return agesd


https://bitbucket.org/yt_analysis/yt/commits/f01557326ebc/
Changeset:   f01557326ebc
Branch:      yt
User:        drudd
Date:        2014-08-28 00:00:27
Summary:     Merged yt_analysis/yt into yt
Affected #:  37 files

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -186,9 +186,20 @@
     this_f = getattr(frontends_module, frontend)
     field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
     dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
+
     if frontend == "sph":
         field_info_names = \
           ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    elif frontend == "boxlib":
+        field_info_names = []
+        for d in dataset_names:
+            if "Maestro" in d:  
+                field_info_names.append("MaestroFieldInfo")
+            elif "Castro" in d: 
+                field_info_names.append("CastroFieldInfo")
+            else: 
+                field_info_names.append("BoxlibFieldInfo")
+
     for dset_name, fi_name in zip(dataset_names, field_info_names):
         fi = getattr(this_f, fi_name)
         nfields = 0

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -580,56 +580,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +651,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -932,7 +929,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -39,6 +39,13 @@
         padding-top: 10px;
         padding-bottom: 10px;
     }
+    /* since 3.1.0 */
+    .navbar-collapse.collapse.in { 
+        display: block!important;
+    }
+    .collapsing {
+        overflow: hidden!important;
+    }
 }
 
 /* 

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d705a81671d5692ed6691b3402115edbe9c98af815af5bb160ddf551bf02c76"
+  "signature": "sha256:427da1e1d02deb543246218dc8cce991268b518b25cfdd5944a4a436695f874b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -40,11 +40,13 @@
      "source": [
       "We will filter these into young stars and old stars by masking on the ('Stars', 'creation_time') field. \n",
       "\n",
-      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The second argument is a yt data container and is usually the only one used in a filter definition.\n",
+      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The first argument is a `ParticleFilter` object that contains metadata about the filter its self.  The second argument is a yt data container.\n",
       "\n",
-      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages.\n",
+      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages. \n",
       "\n",
-      "Old stars either formed dynamically in the simulation (ages greater than 5 Myr) or were present in the initial conditions (negative ages)."
+      "Conversely, let's define \"old\" stars as those stars formed dynamically in the simulation with ages greater than 5 Myr.  We also include stars with negative ages, since these stars were included in the simulation initial conditions.\n",
+      "\n",
+      "We make use of `pfilter.filtered_type` so that the filter definition will use the same particle type as the one specified in the call to `add_particle_filter` below.  This makes the filter definition usable for arbitrary particle types.  Since we're only filtering the `\"Stars\"` particle type in this example, we could have also replaced `pfilter.filtered_type` with `\"Stars\"` and gotten the same result."
      ]
     },
     {
@@ -52,12 +54,12 @@
      "collapsed": false,
      "input": [
       "def young_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_and(age.in_units('Myr') <= 5, age >= 0)\n",
       "    return filter\n",
       "\n",
       "def old_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_or(age.in_units('Myr') >= 5, age < 0)\n",
       "    return filter"
      ],
@@ -140,4 +142,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -179,6 +179,38 @@
      fields or that get aliased to themselves, we can specify a different
      desired output unit than the unit found on disk.
 
+Debugging a Derived Field
+-------------------------
+
+If your derived field is not behaving as you would like, you can insert a call
+to ``data._debug()`` to spawn an interactive interpreter whenever that line is
+reached.  Note that this is slightly different from calling
+``pdb.set_trace()``, as it will *only* trigger when the derived field is being
+called on an actual data object, rather than during the field detection phase.
+The starting position will be one function lower in the stack than you are
+likely interested in, but you can either step through back to the derived field
+function, or simply type ``u`` to go up a level in the stack.
+
+For instance, if you had defined this derived field:
+
+.. code-block:: python
+
+   @yt.derived_field(name = "funthings")
+   def funthings(field, data):
+       return data["sillythings"] + data["humorousthings"]**2.0
+
+And you wanted to debug it, you could do:
+
+.. code-block:: python
+
+   @yt.derived_field(name = "funthings")
+   def funthings(field, data):
+       data._debug()
+       return data["sillythings"] + data["humorousthings"]**2.0
+
+And now, when that derived field is actually used, you will be placed into a
+debugger.
+
 Units for Cosmological Datasets
 -------------------------------
 

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -170,10 +170,16 @@
 Developing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-If you plan to develop yt on Windows, we recommend using the `MinGW
+If you plan to develop yt on Windows, it is necessary to use the `MinGW
 <http://www.mingw.org/>`_ gcc compiler that can be installed using the `Anaconda
-Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the
-syntax for the setup command is slightly different; you must type:
+Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. The libpython package must be
+ installed from Anaconda as well. These can both be installed with a single command:
+
+.. code-block:: bash
+
+  $ conda install libpython mingw
+
+Additionally, the syntax for the setup command is slightly different; you must type:
 
 .. code-block:: bash
 

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -138,14 +138,14 @@
 
 .. _loading-orion-data:
 
-Boxlib Data
+BoxLib Data
 -----------
 
-yt has been tested with Boxlib data generated by Orion, Nyx, Maestro and
+yt has been tested with BoxLib data generated by Orion, Nyx, Maestro and
 Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
 Malone, Matthew Turk, and Mike Zingale.
 
-To load a Boxlib dataset, you can use the ``yt.load`` command on
+To load a BoxLib dataset, you can use the ``yt.load`` command on
 the plotfile directory name.  In general, you must also have the
 ``inputs`` file in the base directory, but Maestro and Castro will get
 all the necessary parameter information from the ``job_info`` file in
@@ -178,6 +178,18 @@
 For Maestro and Castro, you would not need the ``inputs`` file, and you 
 would have a ``job_info`` file in the plotfile directory.
 
+.. rubric:: Caveats
+
+* yt does not read the Maestro base state (although you can have Maestro
+  map it to a full Cartesian state variable before writing the plotfile
+  to get around this).  E-mail the dev list if you need this support.
+* yt does not know about particles in Maestro.
+* For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
+  depending on the value of the ``use_tfromp`` runtime parameter.
+* For Maestro, some velocity fields like ``velocity_magnitude`` or 
+  ``mach_number`` will always use the on-disk value, and not have yt 
+  derive it, due to the complex interplay of the base state velocity.
+
 .. _loading-enzo-data:
 
 Enzo Data

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -198,10 +198,9 @@
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on Microsoft Windows is only supported for Windows XP Service Pack
-3 and higher (both 32-bit and 64-bit) using Anaconda, see
-:ref:`anaconda-installation`.  Also see :ref:`windows-developing` for details on
-how to build yt from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
+:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
+from source in Windows.
 
 .. _source-installation:
 

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/source/reference/field_list.rst
--- a/doc/source/reference/field_list.rst
+++ b/doc/source/reference/field_list.rst
@@ -853,9 +853,9 @@
               raise NeedsParameter("omega_baryon")
           co = data.ds.cosmology
           # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
-          # mean density(z) ~ omega_matter * (1 + z)^3
+          # mean matter density(z) ~ omega_matter * (1 + z)^3
           return data[ftype, "density"] / omega_baryon / co.critical_density(0.0) / \
-            (1.0 + data.ds.hubble_constant)**3
+            (1.0 + data.ds.current_redshift)**3
   
 
 ('gas', 'cell_mass')
@@ -1526,9 +1526,9 @@
           co = data.ds.cosmology
           # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
           # mean density(z) ~ omega_matter * (1 + z)^3
-          return data[ftype, "density"] / data.ds.omega_matter / \
+          return data[ftype, "matter_density"] / data.ds.omega_matter / \
             co.critical_density(0.0) / \
-            (1.0 + data.ds.hubble_constant)**3
+            (1.0 + data.ds.current_redshift)**3
   
 
 ('gas', 'mean_molecular_weight')
@@ -1747,7 +1747,11 @@
                                   "bulk_%s" % basename)
           theta = data['index', 'spherical_theta']
           phi   = data['index', 'spherical_phi']
-          return get_sph_r_component(vectors, theta, phi, normal)
+          rv = get_sph_r_component(vectors, theta, phi, normal)
+          # Now, anywhere that radius is in fact zero, we want to zero out our
+          # return values.
+          rv[np.isnan(theta)] = 0.0
+          return rv
   
 
 ('gas', 'radial_velocity_absolute')
@@ -1766,7 +1770,11 @@
                                   "bulk_%s" % basename)
           theta = data['index', 'spherical_theta']
           phi   = data['index', 'spherical_phi']
-          return get_sph_r_component(vectors, theta, phi, normal)
+          rv = get_sph_r_component(vectors, theta, phi, normal)
+          # Now, anywhere that radius is in fact zero, we want to zero out our
+          # return values.
+          rv[np.isnan(theta)] = 0.0
+          return rv
   
 
 ('gas', 'radiation_acceleration_x')
@@ -2702,12 +2710,8 @@
 .. code-block:: python
 
       def _cylindrical_r(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
   
 
@@ -2721,12 +2725,8 @@
 .. code-block:: python
 
       def _cylindrical_theta(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_cyl_theta(coords, normal)
   
 
@@ -2741,13 +2741,9 @@
 .. code-block:: python
 
       def _cylindrical_z(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = data.ds.arr(obtain_rvec(data), "code_length")
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
-          return get_cyl_z(coords, normal).in_cgs()
+          coords = get_periodic_rvec(data)
+          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
   
 
 ('index', 'disk_angle')
@@ -2903,12 +2899,8 @@
 .. code-block:: python
 
       def _spherical_phi(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_sph_phi(coords, normal)
   
 
@@ -2923,12 +2915,8 @@
 .. code-block:: python
 
       def _spherical_r(field, data):
-          center = data.get_field_parameter("center")
-          coords = data.ds.arr(obtain_rvec(data), "code_length")
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
-          return get_sph_r(coords).in_cgs()
+          coords = get_periodic_rvec(data)
+          return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
   
 
 ('index', 'spherical_theta')
@@ -2941,12 +2929,8 @@
 .. code-block:: python
 
       def _spherical_theta(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_sph_theta(coords, normal)
   
 
@@ -4025,6 +4009,603 @@
    * Units: :math:`\mathrm{\rm{code}~\rm{mass} / \rm{code}~\rm{time}}`
    * Particle Type: True
 
+.. _Castro_specific_fields:
+
+Castro-Specific Fields
+----------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'xmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_x``
+   * Particle Type: False
+
+('boxlib', 'ymom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_y``
+   * Particle Type: False
+
+('boxlib', 'zmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_z``
+   * Particle Type: False
+
+('boxlib', 'x_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'rho_E')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Aliased to: ``energy_density``
+   * Particle Type: False
+
+('boxlib', 'rho_e')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'Temp')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'grav_x')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'grav_y')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'grav_z')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'pressure')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'kineng')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'soundspeed')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``sound_speed``
+   * Particle Type: False
+
+('boxlib', 'Machnumber')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``mach_number``
+   * Particle Type: False
+
+('boxlib', 'entropy')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{K} \cdot \rm{g}}}`
+   * Aliased to: ``entropy``
+   * Particle Type: False
+
+('boxlib', 'magvort')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Aliased to: ``vorticity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'divu')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'eint_E')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'eint_e')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'magvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'radvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'magmom')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} \cdot \rm{g} / \rm{s}}`
+   * Aliased to: ``momentum_magnitude``
+   * Particle Type: False
+
+('boxlib', 'maggrav')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'phiGrav')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+.. _Maestro_specific_fields:
+
+Maestro-Specific Fields
+-----------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'x_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'magvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'radial_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'tfromp')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'tfromh')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'Machnumber')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``mach_number``
+   * Particle Type: False
+
+('boxlib', 'S')
+^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'ad_excess')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltaT')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltagamma')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltap')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'divw0')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'entropy')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{K} \cdot \rm{g}}}`
+   * Aliased to: ``entropy``
+   * Particle Type: False
+
+('boxlib', 'entropypert')
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'enucdot')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{g} \cdot \rm{s}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_x')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_y')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_z')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'h')
+^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'h0')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'momentum')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} \cdot \rm{g} / \rm{s}}`
+   * Aliased to: ``momentum_magnitude``
+   * Particle Type: False
+
+('boxlib', 'p0')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'p0pluspi')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'pi')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'pioverp0')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'rho0')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhoh')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Aliased to: ``enthalpy_density``
+   * Particle Type: False
+
+('boxlib', 'rhoh0')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhohpert')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhopert')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'soundspeed')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``sound_speed``
+   * Particle Type: False
+
+('boxlib', 'sponge')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'tpert')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'vort')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Aliased to: ``vorticity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'w0_x')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'w0_y')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'w0_z')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+.. _Orion_specific_fields:
+
+Orion-Specific Fields
+---------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'eden')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length} \cdot \rm{code}~\rm{time}^{2}}}`
+   * Aliased to: ``energy_density``
+   * Particle Type: False
+
+('boxlib', 'xmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_x``
+   * Particle Type: False
+
+('boxlib', 'ymom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_y``
+   * Particle Type: False
+
+('boxlib', 'zmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_z``
+   * Particle Type: False
+
+('boxlib', 'temperature')
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'Temp')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'x_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'xvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'yvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'zvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('io', 'particle_mass')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{mass}}`
+   * Particle Type: True
+
+('io', 'particle_position_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_position_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_position_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_id')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``particle_index``
+   * Particle Type: True
+
+('io', 'particle_mdot')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{mass} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
 .. _Enzo_specific_fields:
 
 Enzo-Specific Fields

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:f3e6416e4807e008a016ad8c7dfc8e78cab0d7519498458660554a4c88549c23"
+  "signature": "sha256:5a1547973517987ff047f1b2405277a0e98392e8fd5ffe04521cb2dc372d32d3"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -83,7 +83,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# Build a transfer function that is a multivariate gaussian in Density\n",
+      "# Build a transfer function that is a multivariate gaussian in temperature\n",
       "tfh = yt.TransferFunctionHelper(ds)\n",
       "tfh.set_field('temperature')\n",
       "tfh.set_log(True)\n",
@@ -180,4 +180,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1,8 +1,10 @@
-from scipy import optimize
-import numpy as na
+import numpy as np
 import h5py
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
+from yt.utilities.on_demand_imports import _scipy
+
+optimize = _scipy.optimize
 
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
         minError=1E-4, complexLim=.995,
@@ -83,7 +85,7 @@
     x0,xRes=x[0],x[1]-x[0]
 
     #Empty fit without any lines
-    yFit = na.ones(len(fluxData))
+    yFit = np.ones(len(fluxData))
 
     #Force the first and last flux pixel to be 1 to prevent OOB
     fluxData[0]=1
@@ -98,10 +100,10 @@
     #Fit all species one at a time in given order from low to high wavelength
     for species in orderFits:
         speciesDict = speciesDicts[species]
-        speciesLines = {'N':na.array([]),
-                        'b':na.array([]),
-                        'z':na.array([]),
-                        'group#':na.array([])}
+        speciesLines = {'N':np.array([]),
+                        'b':np.array([]),
+                        'z':np.array([]),
+                        'group#':np.array([])}
 
         #Set up wavelengths for species
         initWl = speciesDict['wavelength'][0]
@@ -131,7 +133,7 @@
                         yFitBounded,z,speciesDict,
                         minSize,minError)
 
-            if na.size(newLinesP)> 0:
+            if np.size(newLinesP)> 0:
 
                 #Check for EXPLOOOOSIIONNNSSS
                 newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
@@ -150,12 +152,12 @@
 
 
             #Add new group to all fitted lines
-            if na.size(newLinesP)>0:
-                speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
-                speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
-                speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
-                groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
-                speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+            if np.size(newLinesP)>0:
+                speciesLines['N']=np.append(speciesLines['N'],newLinesP[:,0])
+                speciesLines['b']=np.append(speciesLines['b'],newLinesP[:,1])
+                speciesLines['z']=np.append(speciesLines['z'],newLinesP[:,2])
+                groupNums = b_i*np.ones(np.size(newLinesP[:,0]))
+                speciesLines['group#']=np.append(speciesLines['group#'],groupNums)
 
         allSpeciesLines[species]=speciesLines
 
@@ -226,7 +228,7 @@
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
         initP[2]=initz
-        initP=na.array([initP])
+        initP=np.array([initP])
 
     linesP = initP
 
@@ -259,7 +261,7 @@
 
 
         #Set results of optimization
-        linesP = na.reshape(fitP,(-1,3))
+        linesP = np.reshape(fitP,(-1,3))
 
         #Generate difference between current best fit and data
         yNewFit=_gen_flux_lines(x,linesP,speciesDict)
@@ -288,7 +290,7 @@
                 break
 
         #If too many lines 
-        if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+        if np.shape(linesP)[0]>8 or np.size(linesP)+3>=len(x):
             #If its fitable by flag tools and still bad, use flag tools
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
@@ -315,17 +317,17 @@
             newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
-        linesP=na.append(linesP,[newP],axis=0)
+        linesP=np.append(linesP,[newP],axis=0)
 
 
     #Check the parameters of all lines to see if they fall in an
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict,x)
+        check=_check_params(np.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
-    linesP = na.delete(linesP,remove,axis=0)
+    linesP = np.delete(linesP,remove,axis=0)
 
     return linesP,flag
 
@@ -377,7 +379,7 @@
     #Iterate through test line guesses
     for initLines in lineTests:
         if initLines[1,0]==0:
-            initLines = na.delete(initLines,1,axis=0)
+            initLines = np.delete(initLines,1,axis=0)
 
         #Do fitting with initLines as first guess
         linesP,flag=_complex_fit(x,yDat,yFit,initz,
@@ -421,7 +423,7 @@
     """
 
     #Set up a bunch of empty lines
-    testP = na.zeros((10,2,3))
+    testP = np.zeros((10,2,3))
 
     testP[0,0,:]=[1E18,20,initz]
     testP[1,0,:]=[1E18,40,initz]
@@ -542,7 +544,7 @@
                 errBound = 10*errBound*len(yb)
 
             #Generate a fit and find the difference to data
-            yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+            yFitb=_gen_flux_lines(xb,np.array([p]),speciesDict)
             dif =yb-yFitb
 
 
@@ -557,7 +559,7 @@
                 break
 
     #Remove all bad line fits
-    linesP = na.delete(linesP,removeLines,axis=0)
+    linesP = np.delete(linesP,removeLines,axis=0)
 
     return linesP 
 
@@ -755,7 +757,7 @@
             if firstLine: 
                 break
 
-    flux = na.exp(-y)
+    flux = np.exp(-y)
     return flux
 
 def _gen_tau(t, p, f, Gamma, lambda_unshifted):
@@ -768,7 +770,7 @@
     a=7.95774715459E-15*Gamma*lambda_unshifted/b
     x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
     
-    H = na.zeros(len(x))
+    H = np.zeros(len(x))
     H = voigt(a,x)
     
     tau = tau_o*H
@@ -910,9 +912,9 @@
 
             # Make the final line parameters. Its annoying because
             # one or both regions may have fit to nothing
-            if na.size(p1)> 0 and na.size(p2)>0:
-                p = na.r_[p1,p2]
-            elif na.size(p1) > 0:
+            if np.size(p1)> 0 and np.size(p2)>0:
+                p = np.r_[p1,p2]
+            elif np.size(p1) > 0:
                 p = p1
             else:
                 p = p2
@@ -952,7 +954,7 @@
             # max and min to prevent boundary errors
 
             flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
-            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+            flux = np.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
 
             #Find regions that are absorbing outside the region we fit
             flux_dif = 1 - flux
@@ -971,7 +973,7 @@
                 remove_lines.append(i)
     
     if remove_lines:
-        p = na.delete(p, remove_lines, axis=0)
+        p = np.delete(p, remove_lines, axis=0)
 
     return p
 

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -22,14 +22,10 @@
 except ImportError:
     pass
 
-try:
-    import xspec
-    from scipy.integrate import cumtrapz
-    from scipy import stats        
-except ImportError:
-    pass
-from yt.utilities.on_demand_imports import _astropy
+from yt.utilities.on_demand_imports import _astropy, _scipy
+
 pyfits = _astropy.pyfits
+stats = _scipy.stats
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
@@ -212,11 +208,14 @@
         try:
             self.line_handle = pyfits.open(self.linefile)
         except IOError:
-            mylog.error("LINE file %s does not exist" % (self.linefile))
+            mylog.error("LINE file %s does not exist" % self.linefile)
+            raise IOError("LINE file %s does not exist" % self.linefile)
         try:
             self.coco_handle = pyfits.open(self.cocofile)
         except IOError:
-            mylog.error("COCO file %s does not exist" % (self.cocofile))
+            mylog.error("COCO file %s does not exist" % self.cocofile)
+            raise IOError("COCO file %s does not exist" % self.cocofile)
+
         self.Tvals = self.line_handle[1].data.field("kT")
         self.dTvals = np.diff(self.Tvals)
         self.minlam = self.wvbins.min()
@@ -224,18 +223,18 @@
     
     def _make_spectrum(self, element, tindex):
         
-        tmpspec = np.zeros((self.nchan))
+        tmpspec = np.zeros(self.nchan)
         
         i = np.where((self.line_handle[tindex].data.field('element') == element) &
                      (self.line_handle[tindex].data.field('lambda') > self.minlam) &
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
-        vec = np.zeros((self.nchan))
+        vec = np.zeros(self.nchan)
         E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.ndarray_view()
         if self.thermal_broad:
-            vec = np.zeros((self.nchan))
+            vec = np.zeros(self.nchan)
             sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value
             for E, sig, a in zip(E0, sigma, amp):
                 cdf = stats.norm(E,sig).cdf(ebins)
@@ -270,10 +269,10 @@
         """
         Get the thermal emission spectrum given a temperature *kT* in keV. 
         """
-        cspec_l = np.zeros((self.nchan))
-        mspec_l = np.zeros((self.nchan))
-        cspec_r = np.zeros((self.nchan))
-        mspec_r = np.zeros((self.nchan))
+        cspec_l = np.zeros(self.nchan)
+        mspec_l = np.zeros(self.nchan)
+        cspec_r = np.zeros(self.nchan)
+        mspec_r = np.zeros(self.nchan)
         tindex = np.searchsorted(self.Tvals, kT)-1
         if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
             return cspec_l*cm3/units.s, mspec_l*cm3/units.s

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -6,6 +6,7 @@
     config.make_config_py()  # installs __config__.py
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("cosmological_observation")
+    config.add_subpackage("halo_analysis")
     config.add_subpackage("halo_finding")
     config.add_subpackage("halo_mass_function")
     config.add_subpackage("level_sets")

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -129,6 +129,15 @@
         self._index = self.ds.index
         return self._index
 
+    def _debug(self):
+        """
+        When called from within a derived field, this will run pdb.  However,
+        during field detection, it will not.  This allows you to more easily
+        debug fields that are being called on actual objects.
+        """
+        import pdb
+        pdb.set_trace()
+
     def _set_default_field_parameters(self):
         self.field_parameters = {}
         for k,v in self._default_field_parameters.items():
@@ -438,8 +447,14 @@
 
     @contextmanager
     def _field_parameter_state(self, field_parameters):
+        # What we're doing here is making a copy of the incoming field
+        # parameters, and then updating it with our own.  This means that we'll
+        # be using our own center, if set, rather than the supplied one.  But
+        # it also means that any additionally set values can override it.
         old_field_parameters = self.field_parameters
-        self.field_parameters = field_parameters
+        new_field_parameters = field_parameters.copy()
+        new_field_parameters.update(old_field_parameters)
+        self.field_parameters = new_field_parameters
         yield
         self.field_parameters = old_field_parameters
 

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -517,15 +517,15 @@
     Parameters
     ----------
     center : array_like 
-        coordinate to which the normal, radius, and height all reference; in
-        the center of one of the bases of the cylinder
+        coordinate to which the normal, radius, and height all reference
     normal : array_like
         the normal vector defining the direction of lengthwise part of the 
         cylinder
     radius : float
         the radius of the cylinder
     height : float
-        the height of the lengthwise part of the cylinder
+        the distance from the midplane of the cylinder to the top and 
+        bottom planes
     fields : array of fields, optional
         any fields to be pre-loaded in the cylinder object
     ds: Dataset, optional
@@ -543,14 +543,15 @@
     >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc'))
     """
     _type_name = "disk"
-    _con_args = ('center', '_norm_vec', '_radius', '_height')
+    _con_args = ('center', '_norm_vec', 'radius', 'height')
     def __init__(self, center, normal, radius, height, fields=None,
                  ds=None, **kwargs):
         YTSelectionContainer3D.__init__(self, center, fields, ds, **kwargs)
         self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
         self.set_field_parameter("normal", self._norm_vec)
-        self._height = fix_length(height, self.ds)
-        self._radius = fix_length(radius, self.ds)
+        self.set_field_parameter("center", self.center)
+        self.height = fix_length(height, self.ds)
+        self.radius = fix_length(radius, self.ds)
         self._d = -1.0 * np.dot(self._norm_vec, self.center)
 
 class YTRegionBase(YTSelectionContainer3D):
@@ -575,7 +576,7 @@
     _con_args = ('center', 'left_edge', 'right_edge')
     def __init__(self, center, left_edge, right_edge, fields = None,
                  ds = None, **kwargs):
-        YTSelectionContainer3D.__init__(self, center, fields, ds, **kwargs)
+        YTSelectionContainer3D.__init__(self, center, ds, **kwargs)
         if not isinstance(left_edge, YTArray):
             self.left_edge = self.ds.arr(left_edge, 'code_length')
         else:
@@ -615,7 +616,7 @@
     >>> import yt
     >>> ds = yt.load("RedshiftOutput0005")
     >>> c = [0.5,0.5,0.5]
-    >>> sphere = ds.sphere(c,1.*ds['kpc'])
+    >>> sphere = ds.sphere(c, (1., "kpc"))
     """
     _type_name = "sphere"
     _con_args = ('center', 'radius')
@@ -627,6 +628,7 @@
             raise YTSphereTooSmall(ds, radius.in_units("code_length"),
                                    self.index.get_smallest_dx().in_units("code_length"))
         self.set_field_parameter('radius',radius)
+        self.set_field_parameter("center", self.center)
         self.radius = radius
 
 class YTEllipsoidBase(YTSelectionContainer3D):

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -572,7 +572,7 @@
         return out
 
     # Now all the object related stuff
-    def all_data(self, find_max=False):
+    def all_data(self, find_max=False, **kwargs):
         """
         all_data is a wrapper to the Region object for creating a region
         which covers the entire simulation domain.
@@ -580,7 +580,7 @@
         if find_max: c = self.find_max("density")[1]
         else: c = (self.domain_right_edge + self.domain_left_edge)/2.0
         return self.region(c,
-            self.domain_left_edge, self.domain_right_edge)
+            self.domain_left_edge, self.domain_right_edge, **kwargs)
 
     def box(self, left_edge, right_edge, **kwargs):
         """

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -82,9 +82,9 @@
             raise NeedsParameter("omega_baryon")
         co = data.ds.cosmology
         # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
-        # mean density(z) ~ omega_matter * (1 + z)^3
+        # mean matter density(z) ~ omega_matter * (1 + z)^3
         return data[ftype, "density"] / omega_baryon / co.critical_density(0.0) / \
-          (1.0 + data.ds.hubble_constant)**3
+          (1.0 + data.ds.current_redshift)**3
 
     registry.add_field((ftype, "baryon_overdensity"),
                        function=_baryon_overdensity,
@@ -99,9 +99,9 @@
         co = data.ds.cosmology
         # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
         # mean density(z) ~ omega_matter * (1 + z)^3
-        return data[ftype, "density"] / data.ds.omega_matter / \
+        return data[ftype, "matter_density"] / data.ds.omega_matter / \
           co.critical_density(0.0) / \
-          (1.0 + data.ds.hubble_constant)**3
+          (1.0 + data.ds.current_redshift)**3
 
     registry.add_field((ftype, "matter_overdensity"),
                        function=_matter_overdensity,

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -148,6 +148,10 @@
             self[item] = self._read_data(item)
         return self[item]
 
+    def _debug(self):
+        # We allow this to pass through.
+        return
+
     def deposit(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))
 

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -15,6 +15,9 @@
 
 import numpy as np
 
+from yt.utilities.lib.geometry_utils import \
+    obtain_rvec
+
 def get_radius(data, field_prefix):
     center = data.get_field_parameter("center").in_units("cm")
     DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).in_units("cm")
@@ -43,3 +46,17 @@
     # Alias it, just for clarity.
     radius = radius2
     return radius
+
+def get_periodic_rvec(data):
+    coords = obtain_rvec(data)
+    if sum(data.ds.periodicity) == 0: return coords
+    le = data.ds.domain_left_edge.in_units("code_length").d
+    dw = data.ds.domain_width.in_units("code_length").d
+    for i in range(coords.shape[0]):
+        if not data.ds.periodicity[i]: continue
+        coords[i, ...] -= le[i]
+        coords[i, ...] = np.min([np.abs(np.mod(coords[i, ...],  dw[i])),
+                                 np.abs(np.mod(coords[i, ...], -dw[i]))],
+                                 axis=0)
+        coords[i, ...] += le[i]
+    return coords

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -22,6 +22,7 @@
     ValidateSpatial
 
 from .field_functions import \
+     get_periodic_rvec, \
      get_radius
 
 from .field_plugin_registry import \
@@ -39,9 +40,6 @@
     get_sph_theta, get_sph_phi, \
     periodic_dist, euclidean_dist
 
-from yt.utilities.lib.geometry_utils import \
-    obtain_rvec
-
 @register_field_plugin
 def setup_geometric_fields(registry, ftype = "gas", slice_info = None):
 
@@ -92,12 +90,8 @@
 
     ### spherical coordinates: r (radius)
     def _spherical_r(field, data):
-        center = data.get_field_parameter("center")
-        coords = data.ds.arr(obtain_rvec(data), "code_length")
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
-        return get_sph_r(coords).in_cgs()
+        coords = get_periodic_rvec(data)
+        return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
 
     registry.add_field(("index", "spherical_r"),
              function=_spherical_r,
@@ -106,27 +100,19 @@
 
     ### spherical coordinates: theta (angle with respect to normal)
     def _spherical_theta(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_sph_theta(coords, normal)
 
     registry.add_field(("index", "spherical_theta"),
              function=_spherical_theta,
              validators=[ValidateParameter("center"),
-             ValidateParameter("normal")])
+                         ValidateParameter("normal")])
 
     ### spherical coordinates: phi (angle in the plane perpendicular to the normal)
     def _spherical_phi(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_sph_phi(coords, normal)
 
     registry.add_field(("index", "spherical_phi"),
@@ -136,29 +122,21 @@
 
     ### cylindrical coordinates: R (radius in the cylinder's plane)
     def _cylindrical_r(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
 
     registry.add_field(("index", "cylindrical_r"),
              function=_cylindrical_r,
              validators=[ValidateParameter("center"),
-                        ValidateParameter("normal")],
+                         ValidateParameter("normal")],
              units="cm")
 
     ### cylindrical coordinates: z (height above the cylinder's plane)
     def _cylindrical_z(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = data.ds.arr(obtain_rvec(data), "code_length")
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
-        return get_cyl_z(coords, normal).in_cgs()
+        coords = get_periodic_rvec(data)
+        return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
 
     registry.add_field(("index", "cylindrical_z"),
              function=_cylindrical_z,
@@ -168,12 +146,8 @@
 
     ### cylindrical coordinates: theta (angle in the cylinder's plane)
     def _cylindrical_theta(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_cyl_theta(coords, normal)
 
     registry.add_field(("index", "cylindrical_theta"),

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -116,7 +116,11 @@
                                 "bulk_%s" % basename)
         theta = data['index', 'spherical_theta']
         phi   = data['index', 'spherical_phi']
-        return get_sph_r_component(vectors, theta, phi, normal)
+        rv = get_sph_r_component(vectors, theta, phi, normal)
+        # Now, anywhere that radius is in fact zero, we want to zero out our
+        # return values.
+        rv[np.isnan(theta)] = 0.0
+        return rv
     def _radial_absolute(field, data):
         return np.abs(data[ftype, "radial_%s" % basename])
 

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -23,7 +23,9 @@
       MaestroDataset
 
 from .fields import \
-      BoxlibFieldInfo
+      BoxlibFieldInfo, \
+      MaestroFieldInfo, \
+      CastroFieldInfo
 
 from .io import \
       IOHandlerBoxlib

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -566,8 +566,10 @@
         # Skip timesteps per level
         header_file.readline()
         self._header_mesh_start = header_file.tell()
-        header_file.next()
-        next_line = header_file.next()
+        # Skip the cell size information per level - we'll get this later
+        for i in range(self._max_level+1): header_file.readline()
+        # Get the geometry
+        next_line = header_file.readline()
         if len(next_line.split()) == 1:
             coordinate_type = int(next_line)
         else:
@@ -780,7 +782,7 @@
                 line = f.next()
             # get the runtime parameters
             for line in f:
-                p, v = (_.strip() for _ in line[4:].split("="))
+                p, v = (_.strip() for _ in line[4:].split("=",1))
                 if len(v) == 0:
                     self.parameters[p] = ""
                 else:

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -83,16 +83,8 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             grid = chunks[0].objs[0]
-            lstring = 'level_%i' % grid.Level
-            lev = self._handle[lstring]
-            grid_offset = lev[self._offset_string][grid._level_id]
-            boxsize = grid.ActiveDimensions.prod()
             for ftype, fname in fields:
-                start = grid_offset+self.field_dict[fname]*boxsize
-                stop = start + boxsize
-                data = lev[self._data_string][start:stop]
-                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
-                                        order='F')
+                rv[ftype, fname] = self._read_data(grid, fname)
             return rv
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
@@ -108,16 +100,10 @@
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
-                lstring = 'level_%i' % g.Level
-                lev = self._handle[lstring]
-                grid_offset = lev[self._offset_string][g._level_id]
-                boxsize = g.ActiveDimensions.prod()
                 nd = 0
                 for field in fields:
-                    start = grid_offset+self.field_dict[fname]*boxsize
-                    stop = start + boxsize
-                    data = lev[self._data_string][start:stop]
-                    data = data.reshape(g.ActiveDimensions, order='F')
+                    ftype, fname = field
+                    data = self._read_data(g, fname)
                     nd = g.select(selector, data, rv[field], ind) # caches
                 ind += nd
         return rv

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -286,7 +286,7 @@
             active_particles = True
             nap = dict((ap_type, []) for ap_type in 
                 params["Physics"]["ActiveParticles"]["ActiveParticlesEnabled"])
-        elif version > 2.0:
+        elif version == 2.2:
             active_particles = True
             nap = {}
             for type in self.parameters.get("AppendActiveParticleType", []):

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -219,6 +219,7 @@
             self.add_output_field(
                 ("enzo", te_name),
                 units = "code_velocity**2")
+            self.alias(("gas", "total_energy"), ("enzo", te_name))
             def _tot_minus_kin(field, data):
                 return data[te_name] - 0.5*(
                     data["x-velocity"]**2.0
@@ -226,6 +227,7 @@
                     + data["z-velocity"]**2.0 )
             self.add_field(
                 ("gas", "thermal_energy"),
+                function = _tot_minus_kin,
                 units = "erg/g")
 
     def setup_particle_fields(self, ptype):

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -44,7 +44,10 @@
             if not hasattr(v, "shape") or v.dtype == "O":
                 continue
             elif len(v.dims) == 1:
-                if add_io: fields.append( ("io", str(name)) )
+                if grid.ds.dimensionality == 1:
+                    fields.append( ("enzo", str(name)) )
+                elif add_io:
+                    fields.append( ("io", str(name)) )
             else:
                 fields.append( ("enzo", str(name)) )
         f.close()
@@ -238,12 +241,14 @@
         fields = []
         add_io = "io" in grid.ds.particle_types
         for name, v in self.grids_in_memory[grid.id].items():
-
             # NOTE: This won't work with 1D datasets or references.
             if not hasattr(v, "shape") or v.dtype == "O":
                 continue
             elif v.ndim == 1:
-                if add_io: fields.append( ("io", str(name)) )
+                if grid.ds.dimensionality == 1:
+                    fields.append( ("enzo", str(name)) )
+                elif add_io:
+                    fields.append( ("io", str(name)) )
             else:
                 fields.append( ("enzo", str(name)) )
         return fields

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -344,21 +344,26 @@
                         for ptype in self._ptypes):
                 continue
             pos += 4
+            any_ptypes = False
             for ptype in self._ptypes:
                 if field == "Mass" and ptype not in self.var_mass:
                     continue
                 if (ptype, field) not in field_list:
                     continue
                 offsets[(ptype, field)] = pos
+                any_ptypes = True
                 if field in self._vector_fields:
                     pos += 3 * pcount[ptype] * fs
                 else:
                     pos += pcount[ptype] * fs
             pos += 4
+            if not any_ptypes: pos -= 8
         if file_size is not None:
             if file_size != pos:
                 mylog.warning("Your Gadget-2 file may have extra " +
-                              "columns or different precision!")
+                              "columns or different precision!" +
+                              " (%s file vs %s computed)",
+                              file_size, pos)
         return offsets
 
     def _identify_fields(self, domain):

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r f01557326ebc45aa96165b7c1c5ae83a8f191b3d yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -442,7 +442,7 @@
                         if child_mask[i, j, k] == 1 or this_level == 1:
                             mask[i, j, k] = self.select_cell(pos, dds)
                             total += mask[i, j, k]
-                        pos[2] += dds[1]
+                        pos[2] += dds[2]
                     pos[1] += dds[1]
                 pos[0] += dds[0]
         if total == 0: return None
@@ -785,9 +785,9 @@
         for i in range(3):
             self.norm_vec[i] = dobj._norm_vec[i]
             self.center[i] = _ensure_code(dobj.center[i])
-        self.radius = _ensure_code(dobj._radius)
+        self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
-        self.height = _ensure_code(dobj._height)
+        self.height = _ensure_code(dobj.height)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -799,12 +799,17 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef int select_point(self, np.float64_t pos[3]) nogil:
-        cdef np.float64_t h, d, r2, temp
-        cdef int i
+        cdef np.float64_t h, d, r2, temp, spos
+        cdef int i, j, k
         h = d = 0
-        for i in range(3):
-            temp = self.difference(pos[i], self.center[i], i)
-            h += temp * self.norm_vec[i]
+        for ax in range(3):
+            temp = 1e30
+            for i in range(3):
+                if self.periodicity[ax] == 0 and i != 1: continue
+                spos = pos[ax] + (i-1)*self.domain_width[ax]
+                if fabs(spos - self.center[ax]) < fabs(temp):
+                    temp = spos - self.center[ax]
+            h += temp * self.norm_vec[ax]
             d += temp*temp
         r2 = (d - h*h)
         if fabs(h) <= self.height and r2 <= self.radius2: return 1
@@ -831,6 +836,8 @@
     @cython.cdivision(True)
     cdef int select_bbox(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil:
+        # Until we can get our OBB/OBB intersection correct, disable this.
+        return 1
         cdef np.float64_t *arr[2]
         cdef np.float64_t pos[3], H, D, R2, temp
         cdef int i, j, k, n

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/87b01dcddfee/
Changeset:   87b01dcddfee
Branch:      yt
User:        drudd
Date:        2014-08-27 21:23:05
Summary:     Check whether dataset is cosmological and generate errors if functions used out of context
Affected #:  2 files

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r 87b01dcddfee219d22c13ee5c5c1cd9d81b9d935 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -167,6 +167,7 @@
 
     # cosmology parameter for time unit conversion
     cdef CosmologyParameters *cosmology
+    cdef float tcode_to_years
 
     # common attributes
     cdef public int num_grid
@@ -209,13 +210,17 @@
         self.sfc_max = self.num_root_cells-1
 
         # initialize cosmology module
-        self.cosmology = cosmology_allocate()
-        cosmology_set_OmegaM(self.cosmology, self.parameters['OmegaM'][0])
-        cosmology_set_OmegaL(self.cosmology, self.parameters['OmegaL'][0])
-        cosmology_set_OmegaB(self.cosmology, self.parameters['OmegaB'][0])
-        cosmology_set_h(self.cosmology, self.parameters['hubble'][0])
-        cosmology_set_DeltaDC(self.cosmology, self.parameters['DeltaDC'][0])
-        cosmology_set_fixed(self.cosmology)
+        if self.parameters.has_key("abox"):
+            self.cosmology = cosmology_allocate()
+            cosmology_set_OmegaM(self.cosmology, self.parameters['OmegaM'][0])
+            cosmology_set_OmegaL(self.cosmology, self.parameters['OmegaL'][0])
+            cosmology_set_OmegaB(self.cosmology, self.parameters['OmegaB'][0])
+            cosmology_set_h(self.cosmology, self.parameters['hubble'][0])
+            cosmology_set_DeltaDC(self.cosmology, self.parameters['DeltaDC'][0])
+            cosmology_set_fixed(self.cosmology)
+        else:
+            self.cosmology = NULL
+            self.tcode_to_years = self.parameters['time_unit'][0]/(365.25*86400)
 
         # grid detection
         self.min_level = 0
@@ -330,25 +335,43 @@
             self.parameters[key] = parameter
 
     def abox_from_auni(self, np.float64_t a):
-        return abox_from_auni(self.cosmology, a)
+        if self.cosmology:
+            return abox_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("abox_from_auni called for non-cosmological ARTIO fileset!")
 
     def tcode_from_auni(self, np.float64_t a):
-        return tcode_from_auni(self.cosmology, a)
+        if self.cosmology:
+            return tcode_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("tcode_from_auni called for non-cosmological ARTIO fileset!")
 
     def tphys_from_auni(self, np.float64_t a):
-        return tphys_from_auni(self.cosmology, a)
+        if self.cosmology:
+            return tphys_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("tphys_from_auni called for non-cosmological ARTIO fileset!")
 
     def auni_from_abox(self, np.float64_t v):
-        return auni_from_abox(self.cosmology, v)
+        if self.cosmology:
+            return auni_from_abox(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_abox called for non-cosmological ARTIO fileset!")
 
     def auni_from_tcode(self, np.float64_t v):
-        return auni_from_tcode(self.cosmology, v)
+        if self.cosmology:
+            return auni_from_tcode(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_tcode called for non-cosmological ARTIO fileset!")
 
     @cython.wraparound(False)
     @cython.boundscheck(False)
     def auni_from_tcode_array(self, np.ndarray[np.float64_t] tcode):
         cdef int i, nauni
         cdef np.ndarray[np.float64_t, ndim=1] auni
+        cdef CosmologyParameters *cosmology = self.cosmology
+        if not cosmology:
+            raise RuntimeError("auni_from_tcode_array called for non-cosmological ARTIO fileset!")
         auni = np.empty_like(tcode)
         nauni = auni.shape[0]
         with nogil:
@@ -357,31 +380,53 @@
         return auni
 
     def auni_from_tphys(self, np.float64_t v):
-        return auni_from_tphys(self.cosmology, v)
+        if self.cosmology:
+            return auni_from_tphys(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_tphys called for non-cosmological ARTIO fileset!")
 
     def abox_from_tcode(self, np.float64_t abox):
-        return abox_from_tcode(self.cosmology, abox)
+        if self.cosmology:
+            return abox_from_tcode(self.cosmology, abox)
+        else:
+            raise RuntimeError("abox_from_tcode called for non-cosmological ARTIO fileset!")
 
     def tcode_from_abox(self, np.float64_t abox):
-        return tcode_from_abox(self.cosmology, abox)
+        if self.cosmology:
+            return tcode_from_abox(self.cosmology, abox)
+        else:
+            raise RuntimeError("tcode_from_abox called for non-cosmological ARTIO fileset!")
 
     def tphys_from_abox(self, np.float64_t abox):
-        return tphys_from_abox(self.cosmology, abox)
+        if self.cosmology:
+            return tphys_from_abox(self.cosmology, abox)
+        else:
+            raise RuntimeError("tphys_from_abox called for non-cosmological ARTIO fileset!")
 
     def tphys_from_tcode(self, np.float64_t tcode):
-        return tphys_from_tcode(self.cosmology, tcode)
+        if self.cosmology:
+            return tphys_from_tcode(self.cosmology, tcode)
+        else:
+            return self.tcode_to_years*tcode
 
     @cython.wraparound(False)
     @cython.boundscheck(False)
     def tphys_from_tcode_array(self, np.ndarray[np.float64_t] tcode):
         cdef int i, ntphys
         cdef np.ndarray[np.float64_t, ndim=1] tphys
+        cdef CosmologyParameters *cosmology = self.cosmology
         tphys = np.empty_like(tcode)
         ntphys = tcode.shape[0]
-        with nogil:
-            for i in range(ntphys):
-                tphys[i] = tphys_from_tcode(self.cosmology, tcode[i])
-        return tphys
+
+        if cosmology:
+            tphys = np.empty_like(tcode)
+            ntphys = tcode.shape[0]
+            with nogil:
+                for i in range(ntphys):
+                    tphys[i] = tphys_from_tcode(cosmology, tcode[i])
+            return tphys
+        else:
+            return tcode*self.tcode_to_years
 
 #    @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r fdfdb5476aadcfaceaa6f51e35ff143606124191 -r 87b01dcddfee219d22c13ee5c5c1cd9d81b9d935 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -116,17 +116,14 @@
     def setup_particle_fields(self, ptype):
         def _creation_time(field,data):
             if isinstance(data,FieldDetector):
+                print "_creation_time is being probed", data
                 return data["BIRTH_TIME"]
-            if any(data["BIRTH_TIME"] > 0.0):
-                raise ValueError("Invalid value for BIRTH_TIME found in ARTIO frontend")
             return YTArray(data.ds._handle.tphys_from_tcode_array(data["BIRTH_TIME"]),"yr")
         
 
         def _creation_redshift(field,data):
             if isinstance(data,FieldDetector):
                 return data["BIRTH_TIME"]
-            if any(data["BIRTH_TIME"] > 0.0):
-                raise ValueError("Invalid value for BIRTH_TIME found in ARTIO frontend")
             return 1.0/data.ds._handle.auni_from_tcode_array(data["BIRTH_TIME"]) - 1.0
 
         def _age(field, data):
@@ -136,9 +133,11 @@
 
         self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",
                     particle_type=True)
-        self.add_field((ptype, "creation_redshift"), function=_creation_redshift,
-                    particle_type=True)
         self.add_field((ptype, "age"), function=_age, units="yr",
                     particle_type=True)
 
+        if self.ds.cosmological_simulation:
+            self.add_field((ptype, "creation_redshift"), function=_creation_redshift,
+                    particle_type=True)
+
         super(ARTIOFieldInfo, self).setup_particle_fields(ptype)


https://bitbucket.org/yt_analysis/yt/commits/ddbd45d3c9fe/
Changeset:   ddbd45d3c9fe
Branch:      yt
User:        drudd
Date:        2014-08-28 00:03:59
Summary:     Cleaned up ARTIO derived field code
Affected #:  1 file

diff -r 87b01dcddfee219d22c13ee5c5c1cd9d81b9d935 -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -114,30 +114,23 @@
                        take_log=True)
 
     def setup_particle_fields(self, ptype):
-        def _creation_time(field,data):
-            if isinstance(data,FieldDetector):
-                print "_creation_time is being probed", data
-                return data["BIRTH_TIME"]
-            return YTArray(data.ds._handle.tphys_from_tcode_array(data["BIRTH_TIME"]),"yr")
-        
+        if ptype == "STAR":
+            def _creation_time(field,data):
+                return YTArray(data.ds._handle.tphys_from_tcode_array(data["STAR","BIRTH_TIME"]),"yr")
 
-        def _creation_redshift(field,data):
-            if isinstance(data,FieldDetector):
-                return data["BIRTH_TIME"]
-            return 1.0/data.ds._handle.auni_from_tcode_array(data["BIRTH_TIME"]) - 1.0
+            def _age(field, data):
+                return data.ds.current_time - data["STAR","creation_time"]
 
-        def _age(field, data):
-            if isinstance(data,FieldDetector):
-                return data["BIRTH_TIME"]
-            return data.ds.current_time - data["creation_time"]
+            self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",
+                        particle_type=True)
+            self.add_field((ptype, "age"), function=_age, units="yr",
+                        particle_type=True)
 
-        self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",
-                    particle_type=True)
-        self.add_field((ptype, "age"), function=_age, units="yr",
-                    particle_type=True)
+            if self.ds.cosmological_simulation:
+                def _creation_redshift(field,data):
+                    return 1.0/data.ds._handle.auni_from_tcode_array(data["STAR","BIRTH_TIME"]) - 1.0
 
-        if self.ds.cosmological_simulation:
-            self.add_field((ptype, "creation_redshift"), function=_creation_redshift,
-                    particle_type=True)
+                self.add_field((ptype, "creation_redshift"), function=_creation_redshift,
+                        particle_type=True)
 
         super(ARTIOFieldInfo, self).setup_particle_fields(ptype)


https://bitbucket.org/yt_analysis/yt/commits/733a49c70bdf/
Changeset:   733a49c70bdf
Branch:      yt
User:        drudd
Date:        2014-08-28 00:04:33
Summary:     Merged with upstream
Affected #:  37 files

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -186,9 +186,20 @@
     this_f = getattr(frontends_module, frontend)
     field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
     dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
+
     if frontend == "sph":
         field_info_names = \
           ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    elif frontend == "boxlib":
+        field_info_names = []
+        for d in dataset_names:
+            if "Maestro" in d:  
+                field_info_names.append("MaestroFieldInfo")
+            elif "Castro" in d: 
+                field_info_names.append("CastroFieldInfo")
+            else: 
+                field_info_names.append("BoxlibFieldInfo")
+
     for dset_name, fi_name in zip(dataset_names, field_info_names):
         fi = getattr(this_f, fi_name)
         nfields = 0

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -580,56 +580,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +651,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -932,7 +929,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -39,6 +39,13 @@
         padding-top: 10px;
         padding-bottom: 10px;
     }
+    /* since 3.1.0 */
+    .navbar-collapse.collapse.in { 
+        display: block!important;
+    }
+    .collapsing {
+        overflow: hidden!important;
+    }
 }
 
 /* 

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d705a81671d5692ed6691b3402115edbe9c98af815af5bb160ddf551bf02c76"
+  "signature": "sha256:427da1e1d02deb543246218dc8cce991268b518b25cfdd5944a4a436695f874b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -40,11 +40,13 @@
      "source": [
       "We will filter these into young stars and old stars by masking on the ('Stars', 'creation_time') field. \n",
       "\n",
-      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The second argument is a yt data container and is usually the only one used in a filter definition.\n",
+      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The first argument is a `ParticleFilter` object that contains metadata about the filter its self.  The second argument is a yt data container.\n",
       "\n",
-      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages.\n",
+      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages. \n",
       "\n",
-      "Old stars either formed dynamically in the simulation (ages greater than 5 Myr) or were present in the initial conditions (negative ages)."
+      "Conversely, let's define \"old\" stars as those stars formed dynamically in the simulation with ages greater than 5 Myr.  We also include stars with negative ages, since these stars were included in the simulation initial conditions.\n",
+      "\n",
+      "We make use of `pfilter.filtered_type` so that the filter definition will use the same particle type as the one specified in the call to `add_particle_filter` below.  This makes the filter definition usable for arbitrary particle types.  Since we're only filtering the `\"Stars\"` particle type in this example, we could have also replaced `pfilter.filtered_type` with `\"Stars\"` and gotten the same result."
      ]
     },
     {
@@ -52,12 +54,12 @@
      "collapsed": false,
      "input": [
       "def young_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_and(age.in_units('Myr') <= 5, age >= 0)\n",
       "    return filter\n",
       "\n",
       "def old_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_or(age.in_units('Myr') >= 5, age < 0)\n",
       "    return filter"
      ],
@@ -140,4 +142,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -179,6 +179,38 @@
      fields or that get aliased to themselves, we can specify a different
      desired output unit than the unit found on disk.
 
+Debugging a Derived Field
+-------------------------
+
+If your derived field is not behaving as you would like, you can insert a call
+to ``data._debug()`` to spawn an interactive interpreter whenever that line is
+reached.  Note that this is slightly different from calling
+``pdb.set_trace()``, as it will *only* trigger when the derived field is being
+called on an actual data object, rather than during the field detection phase.
+The starting position will be one function lower in the stack than you are
+likely interested in, but you can either step through back to the derived field
+function, or simply type ``u`` to go up a level in the stack.
+
+For instance, if you had defined this derived field:
+
+.. code-block:: python
+
+   @yt.derived_field(name = "funthings")
+   def funthings(field, data):
+       return data["sillythings"] + data["humorousthings"]**2.0
+
+And you wanted to debug it, you could do:
+
+.. code-block:: python
+
+   @yt.derived_field(name = "funthings")
+   def funthings(field, data):
+       data._debug()
+       return data["sillythings"] + data["humorousthings"]**2.0
+
+And now, when that derived field is actually used, you will be placed into a
+debugger.
+
 Units for Cosmological Datasets
 -------------------------------
 

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -170,10 +170,16 @@
 Developing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-If you plan to develop yt on Windows, we recommend using the `MinGW
+If you plan to develop yt on Windows, it is necessary to use the `MinGW
 <http://www.mingw.org/>`_ gcc compiler that can be installed using the `Anaconda
-Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the
-syntax for the setup command is slightly different; you must type:
+Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. The libpython package must be
+ installed from Anaconda as well. These can both be installed with a single command:
+
+.. code-block:: bash
+
+  $ conda install libpython mingw
+
+Additionally, the syntax for the setup command is slightly different; you must type:
 
 .. code-block:: bash
 

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -138,14 +138,14 @@
 
 .. _loading-orion-data:
 
-Boxlib Data
+BoxLib Data
 -----------
 
-yt has been tested with Boxlib data generated by Orion, Nyx, Maestro and
+yt has been tested with BoxLib data generated by Orion, Nyx, Maestro and
 Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
 Malone, Matthew Turk, and Mike Zingale.
 
-To load a Boxlib dataset, you can use the ``yt.load`` command on
+To load a BoxLib dataset, you can use the ``yt.load`` command on
 the plotfile directory name.  In general, you must also have the
 ``inputs`` file in the base directory, but Maestro and Castro will get
 all the necessary parameter information from the ``job_info`` file in
@@ -178,6 +178,18 @@
 For Maestro and Castro, you would not need the ``inputs`` file, and you 
 would have a ``job_info`` file in the plotfile directory.
 
+.. rubric:: Caveats
+
+* yt does not read the Maestro base state (although you can have Maestro
+  map it to a full Cartesian state variable before writing the plotfile
+  to get around this).  E-mail the dev list if you need this support.
+* yt does not know about particles in Maestro.
+* For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
+  depending on the value of the ``use_tfromp`` runtime parameter.
+* For Maestro, some velocity fields like ``velocity_magnitude`` or 
+  ``mach_number`` will always use the on-disk value, and not have yt 
+  derive it, due to the complex interplay of the base state velocity.
+
 .. _loading-enzo-data:
 
 Enzo Data

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -198,10 +198,9 @@
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on Microsoft Windows is only supported for Windows XP Service Pack
-3 and higher (both 32-bit and 64-bit) using Anaconda, see
-:ref:`anaconda-installation`.  Also see :ref:`windows-developing` for details on
-how to build yt from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
+:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
+from source in Windows.
 
 .. _source-installation:
 

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/source/reference/field_list.rst
--- a/doc/source/reference/field_list.rst
+++ b/doc/source/reference/field_list.rst
@@ -853,9 +853,9 @@
               raise NeedsParameter("omega_baryon")
           co = data.ds.cosmology
           # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
-          # mean density(z) ~ omega_matter * (1 + z)^3
+          # mean matter density(z) ~ omega_matter * (1 + z)^3
           return data[ftype, "density"] / omega_baryon / co.critical_density(0.0) / \
-            (1.0 + data.ds.hubble_constant)**3
+            (1.0 + data.ds.current_redshift)**3
   
 
 ('gas', 'cell_mass')
@@ -1526,9 +1526,9 @@
           co = data.ds.cosmology
           # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
           # mean density(z) ~ omega_matter * (1 + z)^3
-          return data[ftype, "density"] / data.ds.omega_matter / \
+          return data[ftype, "matter_density"] / data.ds.omega_matter / \
             co.critical_density(0.0) / \
-            (1.0 + data.ds.hubble_constant)**3
+            (1.0 + data.ds.current_redshift)**3
   
 
 ('gas', 'mean_molecular_weight')
@@ -1747,7 +1747,11 @@
                                   "bulk_%s" % basename)
           theta = data['index', 'spherical_theta']
           phi   = data['index', 'spherical_phi']
-          return get_sph_r_component(vectors, theta, phi, normal)
+          rv = get_sph_r_component(vectors, theta, phi, normal)
+          # Now, anywhere that radius is in fact zero, we want to zero out our
+          # return values.
+          rv[np.isnan(theta)] = 0.0
+          return rv
   
 
 ('gas', 'radial_velocity_absolute')
@@ -1766,7 +1770,11 @@
                                   "bulk_%s" % basename)
           theta = data['index', 'spherical_theta']
           phi   = data['index', 'spherical_phi']
-          return get_sph_r_component(vectors, theta, phi, normal)
+          rv = get_sph_r_component(vectors, theta, phi, normal)
+          # Now, anywhere that radius is in fact zero, we want to zero out our
+          # return values.
+          rv[np.isnan(theta)] = 0.0
+          return rv
   
 
 ('gas', 'radiation_acceleration_x')
@@ -2702,12 +2710,8 @@
 .. code-block:: python
 
       def _cylindrical_r(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
   
 
@@ -2721,12 +2725,8 @@
 .. code-block:: python
 
       def _cylindrical_theta(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_cyl_theta(coords, normal)
   
 
@@ -2741,13 +2741,9 @@
 .. code-block:: python
 
       def _cylindrical_z(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = data.ds.arr(obtain_rvec(data), "code_length")
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
-          return get_cyl_z(coords, normal).in_cgs()
+          coords = get_periodic_rvec(data)
+          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
   
 
 ('index', 'disk_angle')
@@ -2903,12 +2899,8 @@
 .. code-block:: python
 
       def _spherical_phi(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_sph_phi(coords, normal)
   
 
@@ -2923,12 +2915,8 @@
 .. code-block:: python
 
       def _spherical_r(field, data):
-          center = data.get_field_parameter("center")
-          coords = data.ds.arr(obtain_rvec(data), "code_length")
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
-          return get_sph_r(coords).in_cgs()
+          coords = get_periodic_rvec(data)
+          return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
   
 
 ('index', 'spherical_theta')
@@ -2941,12 +2929,8 @@
 .. code-block:: python
 
       def _spherical_theta(field, data):
-          center = data.get_field_parameter("center")
           normal = data.get_field_parameter("normal")
-          coords = obtain_rvec(data)
-          coords[0,...] -= center[0]
-          coords[1,...] -= center[1]
-          coords[2,...] -= center[2]
+          coords = get_periodic_rvec(data)
           return get_sph_theta(coords, normal)
   
 
@@ -4025,6 +4009,603 @@
    * Units: :math:`\mathrm{\rm{code}~\rm{mass} / \rm{code}~\rm{time}}`
    * Particle Type: True
 
+.. _Castro_specific_fields:
+
+Castro-Specific Fields
+----------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'xmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_x``
+   * Particle Type: False
+
+('boxlib', 'ymom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_y``
+   * Particle Type: False
+
+('boxlib', 'zmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{2} \cdot \rm{s}}}`
+   * Aliased to: ``momentum_z``
+   * Particle Type: False
+
+('boxlib', 'x_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'rho_E')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Aliased to: ``energy_density``
+   * Particle Type: False
+
+('boxlib', 'rho_e')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'Temp')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'grav_x')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'grav_y')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'grav_z')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'pressure')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'kineng')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'soundspeed')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``sound_speed``
+   * Particle Type: False
+
+('boxlib', 'Machnumber')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``mach_number``
+   * Particle Type: False
+
+('boxlib', 'entropy')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{K} \cdot \rm{g}}}`
+   * Aliased to: ``entropy``
+   * Particle Type: False
+
+('boxlib', 'magvort')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Aliased to: ``vorticity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'divu')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'eint_E')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'eint_e')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'magvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'radvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'magmom')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} \cdot \rm{g} / \rm{s}}`
+   * Aliased to: ``momentum_magnitude``
+   * Particle Type: False
+
+('boxlib', 'maggrav')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{cm}}{\rm{s}^{2}}}`
+   * Particle Type: False
+
+('boxlib', 'phiGrav')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+.. _Maestro_specific_fields:
+
+Maestro-Specific Fields
+-----------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'x_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_vel')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'magvel')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'radial_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'tfromp')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'tfromh')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'Machnumber')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``mach_number``
+   * Particle Type: False
+
+('boxlib', 'S')
+^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'ad_excess')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltaT')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltagamma')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'deltap')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'divw0')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'entropy')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{K} \cdot \rm{g}}}`
+   * Aliased to: ``entropy``
+   * Particle Type: False
+
+('boxlib', 'entropypert')
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'enucdot')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{g} \cdot \rm{s}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_x')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_y')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'gpi_z')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{dyne}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'h')
+^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'h0')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{erg} / \rm{g}}`
+   * Particle Type: False
+
+('boxlib', 'momentum')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} \cdot \rm{g} / \rm{s}}`
+   * Aliased to: ``momentum_magnitude``
+   * Particle Type: False
+
+('boxlib', 'p0')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'p0pluspi')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'pi')
+^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'pioverp0')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'rho0')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhoh')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Aliased to: ``enthalpy_density``
+   * Particle Type: False
+
+('boxlib', 'rhoh0')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhohpert')
+^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{erg}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'rhopert')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{g}}{\rm{cm}^{3}}}`
+   * Particle Type: False
+
+('boxlib', 'soundspeed')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``sound_speed``
+   * Particle Type: False
+
+('boxlib', 'sponge')
+^^^^^^^^^^^^^^^^^^^^
+
+   * Particle Type: False
+
+('boxlib', 'tpert')
+^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Particle Type: False
+
+('boxlib', 'vort')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{1 / \rm{s}}`
+   * Aliased to: ``vorticity_magnitude``
+   * Particle Type: False
+
+('boxlib', 'w0_x')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'w0_y')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+('boxlib', 'w0_z')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Particle Type: False
+
+.. _Orion_specific_fields:
+
+Orion-Specific Fields
+---------------------
+
+('boxlib', 'density')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{3}}}`
+   * Aliased to: ``density``
+   * Particle Type: False
+
+('boxlib', 'eden')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length} \cdot \rm{code}~\rm{time}^{2}}}`
+   * Aliased to: ``energy_density``
+   * Particle Type: False
+
+('boxlib', 'xmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_x``
+   * Particle Type: False
+
+('boxlib', 'ymom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_y``
+   * Particle Type: False
+
+('boxlib', 'zmom')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Aliased to: ``momentum_z``
+   * Particle Type: False
+
+('boxlib', 'temperature')
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'Temp')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{K}}`
+   * Aliased to: ``temperature``
+   * Particle Type: False
+
+('boxlib', 'x_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'y_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'z_velocity')
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('boxlib', 'xvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_x``
+   * Particle Type: False
+
+('boxlib', 'yvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_y``
+   * Particle Type: False
+
+('boxlib', 'zvel')
+^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{cm} / \rm{s}}`
+   * Aliased to: ``velocity_z``
+   * Particle Type: False
+
+('io', 'particle_mass')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{mass}}`
+   * Particle Type: True
+
+('io', 'particle_position_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_position_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_position_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_momentum_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\frac{\rm{code}~\rm{mass}}{\rm{code}~\rm{length}^{2} \cdot \rm{code}~\rm{time}}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_x')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_y')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_angmomen_z')
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{length}^{2} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
+('io', 'particle_id')
+^^^^^^^^^^^^^^^^^^^^^
+
+   * Aliased to: ``particle_index``
+   * Particle Type: True
+
+('io', 'particle_mdot')
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   * Units: :math:`\mathrm{\rm{code}~\rm{mass} / \rm{code}~\rm{time}}`
+   * Particle Type: True
+
 .. _Enzo_specific_fields:
 
 Enzo-Specific Fields

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:f3e6416e4807e008a016ad8c7dfc8e78cab0d7519498458660554a4c88549c23"
+  "signature": "sha256:5a1547973517987ff047f1b2405277a0e98392e8fd5ffe04521cb2dc372d32d3"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -83,7 +83,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# Build a transfer function that is a multivariate gaussian in Density\n",
+      "# Build a transfer function that is a multivariate gaussian in temperature\n",
       "tfh = yt.TransferFunctionHelper(ds)\n",
       "tfh.set_field('temperature')\n",
       "tfh.set_log(True)\n",
@@ -180,4 +180,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1,8 +1,10 @@
-from scipy import optimize
-import numpy as na
+import numpy as np
 import h5py
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
+from yt.utilities.on_demand_imports import _scipy
+
+optimize = _scipy.optimize
 
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
         minError=1E-4, complexLim=.995,
@@ -83,7 +85,7 @@
     x0,xRes=x[0],x[1]-x[0]
 
     #Empty fit without any lines
-    yFit = na.ones(len(fluxData))
+    yFit = np.ones(len(fluxData))
 
     #Force the first and last flux pixel to be 1 to prevent OOB
     fluxData[0]=1
@@ -98,10 +100,10 @@
     #Fit all species one at a time in given order from low to high wavelength
     for species in orderFits:
         speciesDict = speciesDicts[species]
-        speciesLines = {'N':na.array([]),
-                        'b':na.array([]),
-                        'z':na.array([]),
-                        'group#':na.array([])}
+        speciesLines = {'N':np.array([]),
+                        'b':np.array([]),
+                        'z':np.array([]),
+                        'group#':np.array([])}
 
         #Set up wavelengths for species
         initWl = speciesDict['wavelength'][0]
@@ -131,7 +133,7 @@
                         yFitBounded,z,speciesDict,
                         minSize,minError)
 
-            if na.size(newLinesP)> 0:
+            if np.size(newLinesP)> 0:
 
                 #Check for EXPLOOOOSIIONNNSSS
                 newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
@@ -150,12 +152,12 @@
 
 
             #Add new group to all fitted lines
-            if na.size(newLinesP)>0:
-                speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
-                speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
-                speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
-                groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
-                speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+            if np.size(newLinesP)>0:
+                speciesLines['N']=np.append(speciesLines['N'],newLinesP[:,0])
+                speciesLines['b']=np.append(speciesLines['b'],newLinesP[:,1])
+                speciesLines['z']=np.append(speciesLines['z'],newLinesP[:,2])
+                groupNums = b_i*np.ones(np.size(newLinesP[:,0]))
+                speciesLines['group#']=np.append(speciesLines['group#'],groupNums)
 
         allSpeciesLines[species]=speciesLines
 
@@ -226,7 +228,7 @@
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
         initP[2]=initz
-        initP=na.array([initP])
+        initP=np.array([initP])
 
     linesP = initP
 
@@ -259,7 +261,7 @@
 
 
         #Set results of optimization
-        linesP = na.reshape(fitP,(-1,3))
+        linesP = np.reshape(fitP,(-1,3))
 
         #Generate difference between current best fit and data
         yNewFit=_gen_flux_lines(x,linesP,speciesDict)
@@ -288,7 +290,7 @@
                 break
 
         #If too many lines 
-        if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+        if np.shape(linesP)[0]>8 or np.size(linesP)+3>=len(x):
             #If its fitable by flag tools and still bad, use flag tools
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
@@ -315,17 +317,17 @@
             newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
-        linesP=na.append(linesP,[newP],axis=0)
+        linesP=np.append(linesP,[newP],axis=0)
 
 
     #Check the parameters of all lines to see if they fall in an
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict,x)
+        check=_check_params(np.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
-    linesP = na.delete(linesP,remove,axis=0)
+    linesP = np.delete(linesP,remove,axis=0)
 
     return linesP,flag
 
@@ -377,7 +379,7 @@
     #Iterate through test line guesses
     for initLines in lineTests:
         if initLines[1,0]==0:
-            initLines = na.delete(initLines,1,axis=0)
+            initLines = np.delete(initLines,1,axis=0)
 
         #Do fitting with initLines as first guess
         linesP,flag=_complex_fit(x,yDat,yFit,initz,
@@ -421,7 +423,7 @@
     """
 
     #Set up a bunch of empty lines
-    testP = na.zeros((10,2,3))
+    testP = np.zeros((10,2,3))
 
     testP[0,0,:]=[1E18,20,initz]
     testP[1,0,:]=[1E18,40,initz]
@@ -542,7 +544,7 @@
                 errBound = 10*errBound*len(yb)
 
             #Generate a fit and find the difference to data
-            yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+            yFitb=_gen_flux_lines(xb,np.array([p]),speciesDict)
             dif =yb-yFitb
 
 
@@ -557,7 +559,7 @@
                 break
 
     #Remove all bad line fits
-    linesP = na.delete(linesP,removeLines,axis=0)
+    linesP = np.delete(linesP,removeLines,axis=0)
 
     return linesP 
 
@@ -755,7 +757,7 @@
             if firstLine: 
                 break
 
-    flux = na.exp(-y)
+    flux = np.exp(-y)
     return flux
 
 def _gen_tau(t, p, f, Gamma, lambda_unshifted):
@@ -768,7 +770,7 @@
     a=7.95774715459E-15*Gamma*lambda_unshifted/b
     x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
     
-    H = na.zeros(len(x))
+    H = np.zeros(len(x))
     H = voigt(a,x)
     
     tau = tau_o*H
@@ -910,9 +912,9 @@
 
             # Make the final line parameters. Its annoying because
             # one or both regions may have fit to nothing
-            if na.size(p1)> 0 and na.size(p2)>0:
-                p = na.r_[p1,p2]
-            elif na.size(p1) > 0:
+            if np.size(p1)> 0 and np.size(p2)>0:
+                p = np.r_[p1,p2]
+            elif np.size(p1) > 0:
                 p = p1
             else:
                 p = p2
@@ -952,7 +954,7 @@
             # max and min to prevent boundary errors
 
             flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
-            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+            flux = np.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
 
             #Find regions that are absorbing outside the region we fit
             flux_dif = 1 - flux
@@ -971,7 +973,7 @@
                 remove_lines.append(i)
     
     if remove_lines:
-        p = na.delete(p, remove_lines, axis=0)
+        p = np.delete(p, remove_lines, axis=0)
 
     return p
 

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -22,14 +22,10 @@
 except ImportError:
     pass
 
-try:
-    import xspec
-    from scipy.integrate import cumtrapz
-    from scipy import stats        
-except ImportError:
-    pass
-from yt.utilities.on_demand_imports import _astropy
+from yt.utilities.on_demand_imports import _astropy, _scipy
+
 pyfits = _astropy.pyfits
+stats = _scipy.stats
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
@@ -212,11 +208,14 @@
         try:
             self.line_handle = pyfits.open(self.linefile)
         except IOError:
-            mylog.error("LINE file %s does not exist" % (self.linefile))
+            mylog.error("LINE file %s does not exist" % self.linefile)
+            raise IOError("LINE file %s does not exist" % self.linefile)
         try:
             self.coco_handle = pyfits.open(self.cocofile)
         except IOError:
-            mylog.error("COCO file %s does not exist" % (self.cocofile))
+            mylog.error("COCO file %s does not exist" % self.cocofile)
+            raise IOError("COCO file %s does not exist" % self.cocofile)
+
         self.Tvals = self.line_handle[1].data.field("kT")
         self.dTvals = np.diff(self.Tvals)
         self.minlam = self.wvbins.min()
@@ -224,18 +223,18 @@
     
     def _make_spectrum(self, element, tindex):
         
-        tmpspec = np.zeros((self.nchan))
+        tmpspec = np.zeros(self.nchan)
         
         i = np.where((self.line_handle[tindex].data.field('element') == element) &
                      (self.line_handle[tindex].data.field('lambda') > self.minlam) &
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
-        vec = np.zeros((self.nchan))
+        vec = np.zeros(self.nchan)
         E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.ndarray_view()
         if self.thermal_broad:
-            vec = np.zeros((self.nchan))
+            vec = np.zeros(self.nchan)
             sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value
             for E, sig, a in zip(E0, sigma, amp):
                 cdf = stats.norm(E,sig).cdf(ebins)
@@ -270,10 +269,10 @@
         """
         Get the thermal emission spectrum given a temperature *kT* in keV. 
         """
-        cspec_l = np.zeros((self.nchan))
-        mspec_l = np.zeros((self.nchan))
-        cspec_r = np.zeros((self.nchan))
-        mspec_r = np.zeros((self.nchan))
+        cspec_l = np.zeros(self.nchan)
+        mspec_l = np.zeros(self.nchan)
+        cspec_r = np.zeros(self.nchan)
+        mspec_r = np.zeros(self.nchan)
         tindex = np.searchsorted(self.Tvals, kT)-1
         if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
             return cspec_l*cm3/units.s, mspec_l*cm3/units.s

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -6,6 +6,7 @@
     config.make_config_py()  # installs __config__.py
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("cosmological_observation")
+    config.add_subpackage("halo_analysis")
     config.add_subpackage("halo_finding")
     config.add_subpackage("halo_mass_function")
     config.add_subpackage("level_sets")

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -129,6 +129,15 @@
         self._index = self.ds.index
         return self._index
 
+    def _debug(self):
+        """
+        When called from within a derived field, this will run pdb.  However,
+        during field detection, it will not.  This allows you to more easily
+        debug fields that are being called on actual objects.
+        """
+        import pdb
+        pdb.set_trace()
+
     def _set_default_field_parameters(self):
         self.field_parameters = {}
         for k,v in self._default_field_parameters.items():
@@ -438,8 +447,14 @@
 
     @contextmanager
     def _field_parameter_state(self, field_parameters):
+        # What we're doing here is making a copy of the incoming field
+        # parameters, and then updating it with our own.  This means that we'll
+        # be using our own center, if set, rather than the supplied one.  But
+        # it also means that any additionally set values can override it.
         old_field_parameters = self.field_parameters
-        self.field_parameters = field_parameters
+        new_field_parameters = field_parameters.copy()
+        new_field_parameters.update(old_field_parameters)
+        self.field_parameters = new_field_parameters
         yield
         self.field_parameters = old_field_parameters
 

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -517,15 +517,15 @@
     Parameters
     ----------
     center : array_like 
-        coordinate to which the normal, radius, and height all reference; in
-        the center of one of the bases of the cylinder
+        coordinate to which the normal, radius, and height all reference
     normal : array_like
         the normal vector defining the direction of lengthwise part of the 
         cylinder
     radius : float
         the radius of the cylinder
     height : float
-        the height of the lengthwise part of the cylinder
+        the distance from the midplane of the cylinder to the top and 
+        bottom planes
     fields : array of fields, optional
         any fields to be pre-loaded in the cylinder object
     ds: Dataset, optional
@@ -543,14 +543,15 @@
     >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc'))
     """
     _type_name = "disk"
-    _con_args = ('center', '_norm_vec', '_radius', '_height')
+    _con_args = ('center', '_norm_vec', 'radius', 'height')
     def __init__(self, center, normal, radius, height, fields=None,
                  ds=None, **kwargs):
         YTSelectionContainer3D.__init__(self, center, fields, ds, **kwargs)
         self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
         self.set_field_parameter("normal", self._norm_vec)
-        self._height = fix_length(height, self.ds)
-        self._radius = fix_length(radius, self.ds)
+        self.set_field_parameter("center", self.center)
+        self.height = fix_length(height, self.ds)
+        self.radius = fix_length(radius, self.ds)
         self._d = -1.0 * np.dot(self._norm_vec, self.center)
 
 class YTRegionBase(YTSelectionContainer3D):
@@ -575,7 +576,7 @@
     _con_args = ('center', 'left_edge', 'right_edge')
     def __init__(self, center, left_edge, right_edge, fields = None,
                  ds = None, **kwargs):
-        YTSelectionContainer3D.__init__(self, center, fields, ds, **kwargs)
+        YTSelectionContainer3D.__init__(self, center, ds, **kwargs)
         if not isinstance(left_edge, YTArray):
             self.left_edge = self.ds.arr(left_edge, 'code_length')
         else:
@@ -615,7 +616,7 @@
     >>> import yt
     >>> ds = yt.load("RedshiftOutput0005")
     >>> c = [0.5,0.5,0.5]
-    >>> sphere = ds.sphere(c,1.*ds['kpc'])
+    >>> sphere = ds.sphere(c, (1., "kpc"))
     """
     _type_name = "sphere"
     _con_args = ('center', 'radius')
@@ -627,6 +628,7 @@
             raise YTSphereTooSmall(ds, radius.in_units("code_length"),
                                    self.index.get_smallest_dx().in_units("code_length"))
         self.set_field_parameter('radius',radius)
+        self.set_field_parameter("center", self.center)
         self.radius = radius
 
 class YTEllipsoidBase(YTSelectionContainer3D):

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -572,7 +572,7 @@
         return out
 
     # Now all the object related stuff
-    def all_data(self, find_max=False):
+    def all_data(self, find_max=False, **kwargs):
         """
         all_data is a wrapper to the Region object for creating a region
         which covers the entire simulation domain.
@@ -580,7 +580,7 @@
         if find_max: c = self.find_max("density")[1]
         else: c = (self.domain_right_edge + self.domain_left_edge)/2.0
         return self.region(c,
-            self.domain_left_edge, self.domain_right_edge)
+            self.domain_left_edge, self.domain_right_edge, **kwargs)
 
     def box(self, left_edge, right_edge, **kwargs):
         """

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -82,9 +82,9 @@
             raise NeedsParameter("omega_baryon")
         co = data.ds.cosmology
         # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
-        # mean density(z) ~ omega_matter * (1 + z)^3
+        # mean matter density(z) ~ omega_matter * (1 + z)^3
         return data[ftype, "density"] / omega_baryon / co.critical_density(0.0) / \
-          (1.0 + data.ds.hubble_constant)**3
+          (1.0 + data.ds.current_redshift)**3
 
     registry.add_field((ftype, "baryon_overdensity"),
                        function=_baryon_overdensity,
@@ -99,9 +99,9 @@
         co = data.ds.cosmology
         # critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
         # mean density(z) ~ omega_matter * (1 + z)^3
-        return data[ftype, "density"] / data.ds.omega_matter / \
+        return data[ftype, "matter_density"] / data.ds.omega_matter / \
           co.critical_density(0.0) / \
-          (1.0 + data.ds.hubble_constant)**3
+          (1.0 + data.ds.current_redshift)**3
 
     registry.add_field((ftype, "matter_overdensity"),
                        function=_matter_overdensity,

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -148,6 +148,10 @@
             self[item] = self._read_data(item)
         return self[item]
 
+    def _debug(self):
+        # We allow this to pass through.
+        return
+
     def deposit(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))
 

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -15,6 +15,9 @@
 
 import numpy as np
 
+from yt.utilities.lib.geometry_utils import \
+    obtain_rvec
+
 def get_radius(data, field_prefix):
     center = data.get_field_parameter("center").in_units("cm")
     DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).in_units("cm")
@@ -43,3 +46,17 @@
     # Alias it, just for clarity.
     radius = radius2
     return radius
+
+def get_periodic_rvec(data):
+    coords = obtain_rvec(data)
+    if sum(data.ds.periodicity) == 0: return coords
+    le = data.ds.domain_left_edge.in_units("code_length").d
+    dw = data.ds.domain_width.in_units("code_length").d
+    for i in range(coords.shape[0]):
+        if not data.ds.periodicity[i]: continue
+        coords[i, ...] -= le[i]
+        coords[i, ...] = np.min([np.abs(np.mod(coords[i, ...],  dw[i])),
+                                 np.abs(np.mod(coords[i, ...], -dw[i]))],
+                                 axis=0)
+        coords[i, ...] += le[i]
+    return coords

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -22,6 +22,7 @@
     ValidateSpatial
 
 from .field_functions import \
+     get_periodic_rvec, \
      get_radius
 
 from .field_plugin_registry import \
@@ -39,9 +40,6 @@
     get_sph_theta, get_sph_phi, \
     periodic_dist, euclidean_dist
 
-from yt.utilities.lib.geometry_utils import \
-    obtain_rvec
-
 @register_field_plugin
 def setup_geometric_fields(registry, ftype = "gas", slice_info = None):
 
@@ -92,12 +90,8 @@
 
     ### spherical coordinates: r (radius)
     def _spherical_r(field, data):
-        center = data.get_field_parameter("center")
-        coords = data.ds.arr(obtain_rvec(data), "code_length")
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
-        return get_sph_r(coords).in_cgs()
+        coords = get_periodic_rvec(data)
+        return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
 
     registry.add_field(("index", "spherical_r"),
              function=_spherical_r,
@@ -106,27 +100,19 @@
 
     ### spherical coordinates: theta (angle with respect to normal)
     def _spherical_theta(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_sph_theta(coords, normal)
 
     registry.add_field(("index", "spherical_theta"),
              function=_spherical_theta,
              validators=[ValidateParameter("center"),
-             ValidateParameter("normal")])
+                         ValidateParameter("normal")])
 
     ### spherical coordinates: phi (angle in the plane perpendicular to the normal)
     def _spherical_phi(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_sph_phi(coords, normal)
 
     registry.add_field(("index", "spherical_phi"),
@@ -136,29 +122,21 @@
 
     ### cylindrical coordinates: R (radius in the cylinder's plane)
     def _cylindrical_r(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
 
     registry.add_field(("index", "cylindrical_r"),
              function=_cylindrical_r,
              validators=[ValidateParameter("center"),
-                        ValidateParameter("normal")],
+                         ValidateParameter("normal")],
              units="cm")
 
     ### cylindrical coordinates: z (height above the cylinder's plane)
     def _cylindrical_z(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = data.ds.arr(obtain_rvec(data), "code_length")
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
-        return get_cyl_z(coords, normal).in_cgs()
+        coords = get_periodic_rvec(data)
+        return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
 
     registry.add_field(("index", "cylindrical_z"),
              function=_cylindrical_z,
@@ -168,12 +146,8 @@
 
     ### cylindrical coordinates: theta (angle in the cylinder's plane)
     def _cylindrical_theta(field, data):
-        center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = obtain_rvec(data)
-        coords[0,...] -= center[0]
-        coords[1,...] -= center[1]
-        coords[2,...] -= center[2]
+        coords = get_periodic_rvec(data)
         return get_cyl_theta(coords, normal)
 
     registry.add_field(("index", "cylindrical_theta"),

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -116,7 +116,11 @@
                                 "bulk_%s" % basename)
         theta = data['index', 'spherical_theta']
         phi   = data['index', 'spherical_phi']
-        return get_sph_r_component(vectors, theta, phi, normal)
+        rv = get_sph_r_component(vectors, theta, phi, normal)
+        # Now, anywhere that radius is in fact zero, we want to zero out our
+        # return values.
+        rv[np.isnan(theta)] = 0.0
+        return rv
     def _radial_absolute(field, data):
         return np.abs(data[ftype, "radial_%s" % basename])
 

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -23,7 +23,9 @@
       MaestroDataset
 
 from .fields import \
-      BoxlibFieldInfo
+      BoxlibFieldInfo, \
+      MaestroFieldInfo, \
+      CastroFieldInfo
 
 from .io import \
       IOHandlerBoxlib

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -566,8 +566,10 @@
         # Skip timesteps per level
         header_file.readline()
         self._header_mesh_start = header_file.tell()
-        header_file.next()
-        next_line = header_file.next()
+        # Skip the cell size information per level - we'll get this later
+        for i in range(self._max_level+1): header_file.readline()
+        # Get the geometry
+        next_line = header_file.readline()
         if len(next_line.split()) == 1:
             coordinate_type = int(next_line)
         else:
@@ -780,7 +782,7 @@
                 line = f.next()
             # get the runtime parameters
             for line in f:
-                p, v = (_.strip() for _ in line[4:].split("="))
+                p, v = (_.strip() for _ in line[4:].split("=",1))
                 if len(v) == 0:
                     self.parameters[p] = ""
                 else:

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -83,16 +83,8 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             grid = chunks[0].objs[0]
-            lstring = 'level_%i' % grid.Level
-            lev = self._handle[lstring]
-            grid_offset = lev[self._offset_string][grid._level_id]
-            boxsize = grid.ActiveDimensions.prod()
             for ftype, fname in fields:
-                start = grid_offset+self.field_dict[fname]*boxsize
-                stop = start + boxsize
-                data = lev[self._data_string][start:stop]
-                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
-                                        order='F')
+                rv[ftype, fname] = self._read_data(grid, fname)
             return rv
         if size is None:
             size = sum((g.count(selector) for chunk in chunks
@@ -108,16 +100,10 @@
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
-                lstring = 'level_%i' % g.Level
-                lev = self._handle[lstring]
-                grid_offset = lev[self._offset_string][g._level_id]
-                boxsize = g.ActiveDimensions.prod()
                 nd = 0
                 for field in fields:
-                    start = grid_offset+self.field_dict[fname]*boxsize
-                    stop = start + boxsize
-                    data = lev[self._data_string][start:stop]
-                    data = data.reshape(g.ActiveDimensions, order='F')
+                    ftype, fname = field
+                    data = self._read_data(g, fname)
                     nd = g.select(selector, data, rv[field], ind) # caches
                 ind += nd
         return rv

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -286,7 +286,7 @@
             active_particles = True
             nap = dict((ap_type, []) for ap_type in 
                 params["Physics"]["ActiveParticles"]["ActiveParticlesEnabled"])
-        elif version > 2.0:
+        elif version == 2.2:
             active_particles = True
             nap = {}
             for type in self.parameters.get("AppendActiveParticleType", []):

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -219,6 +219,7 @@
             self.add_output_field(
                 ("enzo", te_name),
                 units = "code_velocity**2")
+            self.alias(("gas", "total_energy"), ("enzo", te_name))
             def _tot_minus_kin(field, data):
                 return data[te_name] - 0.5*(
                     data["x-velocity"]**2.0
@@ -226,6 +227,7 @@
                     + data["z-velocity"]**2.0 )
             self.add_field(
                 ("gas", "thermal_energy"),
+                function = _tot_minus_kin,
                 units = "erg/g")
 
     def setup_particle_fields(self, ptype):

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -44,7 +44,10 @@
             if not hasattr(v, "shape") or v.dtype == "O":
                 continue
             elif len(v.dims) == 1:
-                if add_io: fields.append( ("io", str(name)) )
+                if grid.ds.dimensionality == 1:
+                    fields.append( ("enzo", str(name)) )
+                elif add_io:
+                    fields.append( ("io", str(name)) )
             else:
                 fields.append( ("enzo", str(name)) )
         f.close()
@@ -238,12 +241,14 @@
         fields = []
         add_io = "io" in grid.ds.particle_types
         for name, v in self.grids_in_memory[grid.id].items():
-
             # NOTE: This won't work with 1D datasets or references.
             if not hasattr(v, "shape") or v.dtype == "O":
                 continue
             elif v.ndim == 1:
-                if add_io: fields.append( ("io", str(name)) )
+                if grid.ds.dimensionality == 1:
+                    fields.append( ("enzo", str(name)) )
+                elif add_io:
+                    fields.append( ("io", str(name)) )
             else:
                 fields.append( ("enzo", str(name)) )
         return fields

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -344,21 +344,26 @@
                         for ptype in self._ptypes):
                 continue
             pos += 4
+            any_ptypes = False
             for ptype in self._ptypes:
                 if field == "Mass" and ptype not in self.var_mass:
                     continue
                 if (ptype, field) not in field_list:
                     continue
                 offsets[(ptype, field)] = pos
+                any_ptypes = True
                 if field in self._vector_fields:
                     pos += 3 * pcount[ptype] * fs
                 else:
                     pos += pcount[ptype] * fs
             pos += 4
+            if not any_ptypes: pos -= 8
         if file_size is not None:
             if file_size != pos:
                 mylog.warning("Your Gadget-2 file may have extra " +
-                              "columns or different precision!")
+                              "columns or different precision!" +
+                              " (%s file vs %s computed)",
+                              file_size, pos)
         return offsets
 
     def _identify_fields(self, domain):

diff -r ddbd45d3c9fe7c0c64f17a694a61031da89254c0 -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -442,7 +442,7 @@
                         if child_mask[i, j, k] == 1 or this_level == 1:
                             mask[i, j, k] = self.select_cell(pos, dds)
                             total += mask[i, j, k]
-                        pos[2] += dds[1]
+                        pos[2] += dds[2]
                     pos[1] += dds[1]
                 pos[0] += dds[0]
         if total == 0: return None
@@ -785,9 +785,9 @@
         for i in range(3):
             self.norm_vec[i] = dobj._norm_vec[i]
             self.center[i] = _ensure_code(dobj.center[i])
-        self.radius = _ensure_code(dobj._radius)
+        self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
-        self.height = _ensure_code(dobj._height)
+        self.height = _ensure_code(dobj.height)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -799,12 +799,17 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef int select_point(self, np.float64_t pos[3]) nogil:
-        cdef np.float64_t h, d, r2, temp
-        cdef int i
+        cdef np.float64_t h, d, r2, temp, spos
+        cdef int i, j, k
         h = d = 0
-        for i in range(3):
-            temp = self.difference(pos[i], self.center[i], i)
-            h += temp * self.norm_vec[i]
+        for ax in range(3):
+            temp = 1e30
+            for i in range(3):
+                if self.periodicity[ax] == 0 and i != 1: continue
+                spos = pos[ax] + (i-1)*self.domain_width[ax]
+                if fabs(spos - self.center[ax]) < fabs(temp):
+                    temp = spos - self.center[ax]
+            h += temp * self.norm_vec[ax]
             d += temp*temp
         r2 = (d - h*h)
         if fabs(h) <= self.height and r2 <= self.radius2: return 1
@@ -831,6 +836,8 @@
     @cython.cdivision(True)
     cdef int select_bbox(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil:
+        # Until we can get our OBB/OBB intersection correct, disable this.
+        return 1
         cdef np.float64_t *arr[2]
         cdef np.float64_t pos[3], H, D, R2, temp
         cdef int i, j, k, n

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/6430c8f9543d/
Changeset:   6430c8f9543d
Branch:      yt
User:        drudd
Date:        2014-08-29 19:04:22
Summary:     Restored checks in derived units for field detectors, which pass in invalid quantities
Affected #:  1 file

diff -r 733a49c70bdf982e02ad8fcf4e6fa714418c986d -r 6430c8f9543d319b25575a03285c4a7a835b59f9 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -116,9 +116,13 @@
     def setup_particle_fields(self, ptype):
         if ptype == "STAR":
             def _creation_time(field,data):
+                if isinstance(data, FieldDetector):
+                    return data["STAR","BIRTH_TIME"]
                 return YTArray(data.ds._handle.tphys_from_tcode_array(data["STAR","BIRTH_TIME"]),"yr")
 
             def _age(field, data):
+                if isinstance(data, FieldDetector):
+                    return data["STAR","BIRTH_TIME"]
                 return data.ds.current_time - data["STAR","creation_time"]
 
             self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",
@@ -128,6 +132,8 @@
 
             if self.ds.cosmological_simulation:
                 def _creation_redshift(field,data):
+                    if isinstance(data, FieldDetector):
+                        return data["STAR","BIRTH_TIME"]
                     return 1.0/data.ds._handle.auni_from_tcode_array(data["STAR","BIRTH_TIME"]) - 1.0
 
                 self.add_field((ptype, "creation_redshift"), function=_creation_redshift,


https://bitbucket.org/yt_analysis/yt/commits/90881a618967/
Changeset:   90881a618967
Branch:      yt
User:        drudd
Date:        2014-08-29 19:51:29
Summary:     Added comment about field detection hack and removed one unecessary case
Affected #:  1 file

diff -r 6430c8f9543d319b25575a03285c4a7a835b59f9 -r 90881a618967debbb7883e7dc1da6cc2457f3820 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -116,13 +116,14 @@
     def setup_particle_fields(self, ptype):
         if ptype == "STAR":
             def _creation_time(field,data):
+                # this test is necessary to avoid passing invalid tcode values 
+                # to the function tphys_from_tcode during field detection
+                # (1.0 is not a valid tcode value)
                 if isinstance(data, FieldDetector):
                     return data["STAR","BIRTH_TIME"]
                 return YTArray(data.ds._handle.tphys_from_tcode_array(data["STAR","BIRTH_TIME"]),"yr")
 
             def _age(field, data):
-                if isinstance(data, FieldDetector):
-                    return data["STAR","BIRTH_TIME"]
                 return data.ds.current_time - data["STAR","creation_time"]
 
             self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",
@@ -132,6 +133,9 @@
 
             if self.ds.cosmological_simulation:
                 def _creation_redshift(field,data):
+                    # this test is necessary to avoid passing invalid tcode values 
+                    # to the function auni_from_tcode during field detection
+                    # (1.0 is not a valid tcode value)
                     if isinstance(data, FieldDetector):
                         return data["STAR","BIRTH_TIME"]
                     return 1.0/data.ds._handle.auni_from_tcode_array(data["STAR","BIRTH_TIME"]) - 1.0


https://bitbucket.org/yt_analysis/yt/commits/43f2d5aea978/
Changeset:   43f2d5aea978
Branch:      yt
User:        drudd
Date:        2014-09-02 23:40:39
Summary:     Turns out the hack is necessary for fields derived from derived fields too...
Affected #:  1 file

diff -r 90881a618967debbb7883e7dc1da6cc2457f3820 -r 43f2d5aea97828d4b39d157559ffcb4b7a9ce28b yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -124,6 +124,8 @@
                 return YTArray(data.ds._handle.tphys_from_tcode_array(data["STAR","BIRTH_TIME"]),"yr")
 
             def _age(field, data):
+                if isinstance(data, FieldDetector):
+                    return data["STAR","creation_time"]
                 return data.ds.current_time - data["STAR","creation_time"]
 
             self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",


https://bitbucket.org/yt_analysis/yt/commits/5130e0892eb0/
Changeset:   5130e0892eb0
Branch:      yt
User:        drudd
Date:        2014-09-03 00:21:55
Summary:     Merged in drudd/yt (pull request #1172)

Two approvers will have to do.  ARTIO cosmological conversion improvements
Affected #:  5 files

diff -r 66661ac7b4208e71cd9b3ce97f36c1bdc06f5deb -r 5130e0892eb0cda34d5bff8a94e424d737e1518d yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -25,6 +25,33 @@
     cdef extern from "alloca.h":
         void *alloca(int)
 
+cdef extern from "cosmology.h":
+    ctypedef struct CosmologyParameters "CosmologyParameters" :
+        pass
+    CosmologyParameters *cosmology_allocate()
+    void cosmology_free(CosmologyParameters *c)
+    void cosmology_set_fixed(CosmologyParameters *c)
+
+    void cosmology_set_OmegaM(CosmologyParameters *c, double value)
+    void cosmology_set_OmegaB(CosmologyParameters *c, double value)
+    void cosmology_set_OmegaL(CosmologyParameters *c, double value)
+    void cosmology_set_h(CosmologyParameters *c, double value)
+    void cosmology_set_DeltaDC(CosmologyParameters *c, double value)
+
+    double abox_from_auni(CosmologyParameters *c, double a) nogil
+    double tcode_from_auni(CosmologyParameters *c, double a) nogil
+    double tphys_from_auni(CosmologyParameters *c, double a) nogil
+
+    double auni_from_abox(CosmologyParameters *c, double v) nogil
+    double auni_from_tcode(CosmologyParameters *c, double v) nogil
+    double auni_from_tphys(CosmologyParameters *c, double v) nogil
+
+    double abox_from_tcode(CosmologyParameters *c, double tcode) nogil
+    double tcode_from_abox(CosmologyParameters *c, double abox) nogil
+
+    double tphys_from_abox(CosmologyParameters *c, double abox) nogil
+    double tphys_from_tcode(CosmologyParameters *c, double tcode) nogil
+
 cdef extern from "artio.h":
     ctypedef struct artio_fileset_handle "artio_fileset" :
         pass
@@ -138,6 +165,10 @@
     cdef public object parameters 
     cdef artio_fileset_handle *handle
 
+    # cosmology parameter for time unit conversion
+    cdef CosmologyParameters *cosmology
+    cdef float tcode_to_years
+
     # common attributes
     cdef public int num_grid
     cdef int64_t num_root_cells
@@ -178,6 +209,19 @@
         self.sfc_min = 0
         self.sfc_max = self.num_root_cells-1
 
+        # initialize cosmology module
+        if self.parameters.has_key("abox"):
+            self.cosmology = cosmology_allocate()
+            cosmology_set_OmegaM(self.cosmology, self.parameters['OmegaM'][0])
+            cosmology_set_OmegaL(self.cosmology, self.parameters['OmegaL'][0])
+            cosmology_set_OmegaB(self.cosmology, self.parameters['OmegaB'][0])
+            cosmology_set_h(self.cosmology, self.parameters['hubble'][0])
+            cosmology_set_DeltaDC(self.cosmology, self.parameters['DeltaDC'][0])
+            cosmology_set_fixed(self.cosmology)
+        else:
+            self.cosmology = NULL
+            self.tcode_to_years = self.parameters['time_unit'][0]/(365.25*86400)
+
         # grid detection
         self.min_level = 0
         self.max_level = self.parameters['grid_max_level'][0]
@@ -239,6 +283,8 @@
         if self.primary_variables : free(self.primary_variables)
         if self.secondary_variables : free(self.secondary_variables)
 
+        if self.cosmology : cosmology_free(self.cosmology)
+
         if self.handle : artio_fileset_close(self.handle)
   
     def read_parameters(self) :
@@ -288,6 +334,100 @@
 
             self.parameters[key] = parameter
 
+    def abox_from_auni(self, np.float64_t a):
+        if self.cosmology:
+            return abox_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("abox_from_auni called for non-cosmological ARTIO fileset!")
+
+    def tcode_from_auni(self, np.float64_t a):
+        if self.cosmology:
+            return tcode_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("tcode_from_auni called for non-cosmological ARTIO fileset!")
+
+    def tphys_from_auni(self, np.float64_t a):
+        if self.cosmology:
+            return tphys_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("tphys_from_auni called for non-cosmological ARTIO fileset!")
+
+    def auni_from_abox(self, np.float64_t v):
+        if self.cosmology:
+            return auni_from_abox(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_abox called for non-cosmological ARTIO fileset!")
+
+    def auni_from_tcode(self, np.float64_t v):
+        if self.cosmology:
+            return auni_from_tcode(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_tcode called for non-cosmological ARTIO fileset!")
+
+    @cython.wraparound(False)
+    @cython.boundscheck(False)
+    def auni_from_tcode_array(self, np.ndarray[np.float64_t] tcode):
+        cdef int i, nauni
+        cdef np.ndarray[np.float64_t, ndim=1] auni
+        cdef CosmologyParameters *cosmology = self.cosmology
+        if not cosmology:
+            raise RuntimeError("auni_from_tcode_array called for non-cosmological ARTIO fileset!")
+        auni = np.empty_like(tcode)
+        nauni = auni.shape[0]
+        with nogil:
+            for i in range(nauni):
+                auni[i] = auni_from_tcode(self.cosmology, tcode[i])
+        return auni
+
+    def auni_from_tphys(self, np.float64_t v):
+        if self.cosmology:
+            return auni_from_tphys(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_tphys called for non-cosmological ARTIO fileset!")
+
+    def abox_from_tcode(self, np.float64_t abox):
+        if self.cosmology:
+            return abox_from_tcode(self.cosmology, abox)
+        else:
+            raise RuntimeError("abox_from_tcode called for non-cosmological ARTIO fileset!")
+
+    def tcode_from_abox(self, np.float64_t abox):
+        if self.cosmology:
+            return tcode_from_abox(self.cosmology, abox)
+        else:
+            raise RuntimeError("tcode_from_abox called for non-cosmological ARTIO fileset!")
+
+    def tphys_from_abox(self, np.float64_t abox):
+        if self.cosmology:
+            return tphys_from_abox(self.cosmology, abox)
+        else:
+            raise RuntimeError("tphys_from_abox called for non-cosmological ARTIO fileset!")
+
+    def tphys_from_tcode(self, np.float64_t tcode):
+        if self.cosmology:
+            return tphys_from_tcode(self.cosmology, tcode)
+        else:
+            return self.tcode_to_years*tcode
+
+    @cython.wraparound(False)
+    @cython.boundscheck(False)
+    def tphys_from_tcode_array(self, np.ndarray[np.float64_t] tcode):
+        cdef int i, ntphys
+        cdef np.ndarray[np.float64_t, ndim=1] tphys
+        cdef CosmologyParameters *cosmology = self.cosmology
+        tphys = np.empty_like(tcode)
+        ntphys = tcode.shape[0]
+
+        if cosmology:
+            tphys = np.empty_like(tcode)
+            ntphys = tcode.shape[0]
+            with nogil:
+                for i in range(ntphys):
+                    tphys[i] = tphys_from_tcode(cosmology, tcode[i])
+            return tphys
+        else:
+            return tcode*self.tcode_to_years
+
 #    @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r 66661ac7b4208e71cd9b3ce97f36c1bdc06f5deb -r 5130e0892eb0cda34d5bff8a94e424d737e1518d yt/frontends/artio/artio_headers/cosmology.c
--- /dev/null
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -0,0 +1,524 @@
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+
+#ifndef ERROR
+#include <stdio.h>
+#define ERROR(msg) { fprintf(stderr,"%s\n",msg); exit(1); }
+#endif
+
+#ifndef ASSERT
+#include <stdio.h>
+#define ASSERT(exp) { if(!(exp)) { fprintf(stderr,"Failed assertion %s, line: %d\n",#exp,__LINE__); exit(1); } }
+#endif
+
+#ifndef HEAPALLOC
+#include <stdlib.h>
+#define HEAPALLOC(type,size)	(type *)malloc((size)*sizeof(type))
+#endif
+
+#ifndef NEWARR
+#include <stdlib.h>
+#define NEWARR(size)   HEAPALLOC(double,size)
+#endif
+
+#ifndef DELETE
+#include <stdlib.h>
+#define DELETE(ptr)    free(ptr)
+#endif
+
+
+#include "cosmology.h"
+
+
+struct CosmologyParametersStruct
+{
+  int set;
+  int ndex;
+  int size;
+  double *la;
+  double *aUni;
+  double *aBox;
+  double *tCode;
+  double *tPhys;
+  double *dPlus;
+  double *qPlus;
+  double aLow;
+  double tCodeOffset;
+
+  double OmegaM;
+  double OmegaD;
+  double OmegaB;
+  double OmegaL;
+  double OmegaK;
+  double OmegaR;
+  double h;
+  double DeltaDC;
+  int flat;
+  double Omh2;
+  double Obh2;
+};
+
+void cosmology_clear_table(CosmologyParameters *c);
+void cosmology_fill_table(CosmologyParameters *c, double amin, double amax);
+void cosmology_fill_table_abox(CosmologyParameters *c, int istart, int n);
+
+CosmologyParameters *cosmology_allocate() {
+	CosmologyParameters *c = HEAPALLOC(CosmologyParameters,1);
+	if ( c != NULL ) {
+		memset(c, 0, sizeof(CosmologyParameters));
+
+		c->ndex = 200;
+		c->aLow = 1.0e-2;
+	}
+	return c;
+}
+
+void cosmology_free(CosmologyParameters *c) {
+	cosmology_clear_table(c);
+	DELETE(c);
+}
+
+int cosmology_is_set(CosmologyParameters *c)
+{
+  return (c->OmegaM>0.0 && c->OmegaB>0.0 && c->h>0.0);
+}
+
+
+void cosmology_fail_on_reset(const char *name, double old_value, double new_value)
+{
+  char str[150];
+  sprintf(str,"Trying to change %s from %lg to %lg...\nCosmology has been fixed and cannot be changed.\n",name,old_value,new_value);
+  ERROR(str);
+}
+
+
+void cosmology_set_OmegaM(CosmologyParameters *c, double v)
+{
+  if(v < 1.0e-3) v = 1.0e-3;
+  if(fabs(c->OmegaM-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaM",c->OmegaM,v);
+      c->OmegaM = v;
+      c->flat = (fabs(c->OmegaM+c->OmegaL-1.0) > 1.0e-5) ? 0 : 1;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_OmegaL(CosmologyParameters *c, double v)
+{
+  if(fabs(c->OmegaL-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaL",c->OmegaL,v);
+      c->OmegaL = v;
+      c->flat = (fabs(c->OmegaM+c->OmegaL-1.0) > 1.0e-5) ? 0 : 1;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_OmegaB(CosmologyParameters *c, double v)
+{
+  if(v < 0.0) v = 0.0;
+  if(fabs(c->OmegaB-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaB",c->OmegaB,v);
+      c->OmegaB = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_h(CosmologyParameters *c, double v)
+{
+  if(fabs(c->h-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("h",c->h,v);
+      c->h = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_DeltaDC(CosmologyParameters *c, double v)
+{
+  if(fabs(c->DeltaDC-v) > 1.0e-3)
+    {
+      if(c->set) cosmology_fail_on_reset("DeltaDC",c->DeltaDC,v);
+      c->DeltaDC = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_init(CosmologyParameters *c)
+{
+  if(c->size == 0) /* reset only if the state is dirty */
+    {
+      if(!cosmology_is_set(c)) ERROR("Not all of the required cosmological parameters have been set; the minimum required set is (OmegaM,OmegaB,h).");
+
+      if(c->OmegaB > c->OmegaM) c->OmegaB = c->OmegaM;
+      c->OmegaD = c->OmegaM - c->OmegaB;
+      if(c->flat)
+	{
+	  c->OmegaK = 0.0;
+	  c->OmegaL = 1.0 - c->OmegaM;
+	}
+      else
+	{
+	  c->OmegaK = 1.0 - (c->OmegaM+c->OmegaL);
+	}
+      c->OmegaR = 4.166e-5/(c->h*c->h);
+
+      c->Omh2 = c->OmegaM*c->h*c->h;
+      c->Obh2 = c->OmegaB*c->h*c->h;
+
+      cosmology_fill_table(c,c->aLow,1.0);
+
+      c->tCodeOffset = 0.0;  /*  Do need to set it to zero first */
+#ifndef NATIVE_TCODE_NORMALIZATION
+      c->tCodeOffset = 0.0 - tCode(c,inv_aBox(c,1.0));
+#endif
+    }      
+}
+
+
+void cosmology_set_fixed(CosmologyParameters *c)
+{
+  cosmology_init(c);
+  c->set = 1;
+}
+
+
+double cosmology_mu(CosmologyParameters *c, double a)
+{
+  return sqrt(((a*a*c->OmegaL+c->OmegaK)*a+c->OmegaM)*a+c->OmegaR);
+}
+
+
+double cosmology_dc_factor(CosmologyParameters *c, double dPlus)
+{
+  double dc = 1.0 + dPlus*c->DeltaDC;
+  return 1.0/pow((dc>0.001)?dc:0.001,1.0/3.0);
+}
+
+
+void cosmology_fill_table_integrate(CosmologyParameters *c, double a, double y[], double f[])
+{
+  double mu = cosmology_mu(c, a);
+  double abox = a*cosmology_dc_factor(c, y[2]);
+  
+  f[0] = a/(abox*abox*mu);
+  f[1] = a/mu;
+  f[2] = y[3]/(a*mu);
+  f[3] = 1.5*c->OmegaM*y[2]/mu;
+}
+
+
+void cosmology_fill_table_piece(CosmologyParameters *c, int istart, int n)
+{
+  int i, j;
+  double tPhysUnit = (3.0856775813e17/(365.25*86400))/c->h;  /* 1/H0 in Julian years */
+
+  double x, aeq = c->OmegaR/c->OmegaM;
+  double tCodeFac = 1.0/sqrt(aeq);
+  double tPhysFac = tPhysUnit*aeq*sqrt(aeq)/sqrt(c->OmegaM);
+
+  double da, a0, y0[4], y1[4];
+  double f1[4], f2[4], f3[4], f4[4];
+
+  for(i=istart; i<n; i++)
+    {
+      c->aUni[i] = pow(10.0,c->la[i]);
+    }
+
+  /*
+  //  Small a regime, use analytical formulae for matter + radiation model
+  */  
+  for(i=istart; c->aUni[i]<(c->aLow+1.0e-9) && i<n; i++)
+    {
+      x = c->aUni[i]/aeq;
+
+      c->tPhys[i] = tPhysFac*2*x*x*(2+sqrt(x+1))/(3*pow(1+sqrt(x+1),2.0));
+      c->dPlus[i] = aeq*(x + 2.0/3.0 + (6*sqrt(1+x)+(2+3*x)*log(x)-2*(2+3*x)*log(1+sqrt(1+x)))/(log(64.0)-9));  /* long last term is the decaying mode generated after euality; it is very small for x > 10, I keep ot just for completeness; */
+      c->qPlus[i] = c->aUni[i]*cosmology_mu(c,c->aUni[i])*(1 + ((2+6*x)/(x*sqrt(1+x))+3*log(x)-6*log(1+sqrt(1+x)))/(log(64)-9)); /* this is a^2*dDPlus/dt/H0 */
+
+      c->aBox[i] = c->aUni[i]*cosmology_dc_factor(c,c->dPlus[i]);
+      c->tCode[i] = 1.0 - tCodeFac*asinh(sqrt(aeq/c->aBox[i]));
+    }
+  
+  /*
+  //  Large a regime, solve ODEs
+  */
+  ASSERT(i > 0);
+
+  tCodeFac = 0.5*sqrt(c->OmegaM);
+  tPhysFac = tPhysUnit;
+
+  y1[0] = c->tCode[i-1]/tCodeFac;
+  y1[1] = c->tPhys[i-1]/tPhysFac;
+  y1[2] = c->dPlus[i-1];
+  y1[3] = c->qPlus[i-1];
+
+  for(; i<n; i++)
+    {
+      a0 = c->aUni[i-1];
+      da = c->aUni[i] - a0;
+
+      /*  RK4 integration */
+      for(j=0; j<4; j++) y0[j] = y1[j];
+      cosmology_fill_table_integrate(c, a0,y1,f1);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + 0.5*da*f1[j];
+      cosmology_fill_table_integrate(c, a0+0.5*da,y1,f2);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + 0.5*da*f2[j];
+      cosmology_fill_table_integrate(c, a0+0.5*da,y1,f3);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + da*f3[j];
+      cosmology_fill_table_integrate(c, a0+da,y1,f4);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + da*(f1[j]+2*f2[j]+2*f3[j]+f4[j])/6.0;
+
+      c->tCode[i] = tCodeFac*y1[0];
+      c->tPhys[i] = tPhysFac*y1[1];
+      c->dPlus[i] = y1[2];
+      c->qPlus[i] = y1[3];
+
+      c->aBox[i] = c->aUni[i]*cosmology_dc_factor(c,c->dPlus[i]);
+    }
+} 
+
+
+void cosmology_fill_table(CosmologyParameters *c, double amin, double amax)
+{
+  int i, imin, imax, iold;
+  double dla = 1.0/c->ndex;
+  double lamin, lamax;
+  double *old_la = c->la;
+  double *old_aUni = c->aUni;
+  double *old_aBox = c->aBox;
+  double *old_tCode = c->tCode;
+  double *old_tPhys = c->tPhys;
+  double *old_dPlus = c->dPlus;
+  double *old_qPlus = c->qPlus;
+  int old_size = c->size;
+
+  if(amin > c->aLow) amin = c->aLow;
+  lamin = dla*floor(c->ndex*log10(amin));
+  lamax = dla*ceil(c->ndex*log10(amax));
+
+  c->size = 1 + (int)(0.5+c->ndex*(lamax-lamin)); 
+  ASSERT(fabs(lamax-lamin-dla*(c->size-1)) < 1.0e-14);
+
+  c->la = NEWARR(c->size);     ASSERT(c->la != NULL);
+  c->aUni = NEWARR(c->size);   ASSERT(c->aUni != NULL);
+  c->aBox = NEWARR(c->size);   ASSERT(c->aBox != NULL);
+  c->tCode = NEWARR(c->size);  ASSERT(c->tCode != NULL);
+  c->tPhys = NEWARR(c->size);  ASSERT(c->tPhys != NULL);
+  c->dPlus = NEWARR(c->size);  ASSERT(c->dPlus != NULL);
+  c->qPlus = NEWARR(c->size);  ASSERT(c->qPlus != NULL);
+
+  /*
+  //  New log10(aUni) table
+  */
+  for(i=0; i<c->size; i++)
+    {
+      c->la[i] = lamin + dla*i;
+    }
+
+  if(old_size == 0)
+    {
+      /*
+      //  Filling the table for the first time
+      */
+      cosmology_fill_table_piece(c,0,c->size);
+    }
+  else
+    {
+      /*
+      //  Find if we need to expand the lower end
+      */
+      if(lamin < old_la[0])
+	{
+	  imin = (int)(0.5+c->ndex*(old_la[0]-lamin));
+	  ASSERT(fabs(old_la[0]-lamin-dla*imin) < 1.0e-14);
+	}
+      else imin = 0;
+
+      /*
+      //  Find if we need to expand the upper end
+      */
+      if(lamax > old_la[old_size-1])
+	{
+	  imax = (int)(0.5+c->ndex*(old_la[old_size-1]-lamin));
+	  ASSERT(fabs(old_la[old_size-1]-lamin-dla*imax) < 1.0e-14);
+	}
+      else imax = c->size - 1;
+  
+      /*
+      //  Re-use the rest
+      */
+      if(lamin > old_la[0])
+	{
+	  iold = (int)(0.5+c->ndex*(lamin-old_la[0]));
+	  ASSERT(fabs(lamin-old_la[0]-dla*iold) < 1.0e-14);
+	}
+      else iold = 0;
+
+      memcpy(c->aUni+imin,old_aUni+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->aBox+imin,old_aBox+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->tCode+imin,old_tCode+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->tPhys+imin,old_tPhys+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->dPlus+imin,old_dPlus+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->qPlus+imin,old_qPlus+iold,sizeof(double)*(imax-imin+1));
+
+      DELETE(old_la);
+      DELETE(old_aUni);
+      DELETE(old_aBox);
+      DELETE(old_tCode);
+      DELETE(old_tPhys);
+      DELETE(old_dPlus);
+      DELETE(old_qPlus);
+
+      /*
+      //  Fill in additional pieces
+      */
+      if(imin > 0) cosmology_fill_table_piece(c,0,imin);
+      if(imax < c->size-1) cosmology_fill_table_piece(c,imax,c->size);
+    }
+}
+
+
+void cosmology_clear_table(CosmologyParameters *c)
+{
+  if(c->size > 0)
+    {
+      DELETE(c->la);
+      DELETE(c->aUni);
+      DELETE(c->aBox);
+      DELETE(c->tCode);
+      DELETE(c->tPhys);
+      DELETE(c->dPlus);
+      DELETE(c->qPlus);
+
+      c->size = 0;
+      c->la = NULL;
+      c->aUni = NULL;
+      c->aBox = NULL;
+      c->tCode = NULL;
+      c->tPhys = NULL;
+      c->dPlus = NULL;
+      c->qPlus = NULL;
+    }
+}
+
+
+void cosmology_check_range(CosmologyParameters *c, double a)
+{
+  ASSERT((a > 1.0e-9) && (a < 1.0e9));
+
+  if(c->size == 0) cosmology_init(c);
+
+  if(a < c->aUni[0])
+    {
+      cosmology_fill_table(c,a,c->aUni[c->size-1]);
+    }
+
+  if(a > c->aUni[c->size-1])
+    {
+      cosmology_fill_table(c,c->aUni[0],a);
+    }
+}
+
+
+void cosmology_set_thread_safe_range(CosmologyParameters *c, double amin, double amax)
+{
+  cosmology_check_range(c, amin);
+  cosmology_check_range(c, amax);
+}
+
+
+double cosmology_get_value_from_table(CosmologyParameters *c, double a, double table[])
+{
+  int idx = (int)(c->ndex*(log10(a)-c->la[0]));
+
+  ASSERT(idx>=0 && idx<c->size);
+
+  /*
+  //  Do it as a function of aUni rather than la to ensure exact inversion
+  */
+  return table[idx] + (table[idx+1]-table[idx])/(c->aUni[idx+1]-c->aUni[idx])*(a-c->aUni[idx]);
+}
+
+
+int cosmology_find_index(CosmologyParameters *c, double v, double table[])
+{
+  int ic, il = 0;
+  int ih = c->size - 1;
+
+  if(v < table[0])
+    {
+      return -1;
+    }
+  if(v > table[c->size-1])
+    {
+      return c->size + 1;
+    }
+
+  while((ih-il) > 1)
+    {
+      ic = (il+ih)/2;
+      if(v > table[ic]) /* special, not fully optimal form to avoid checking that il < c->size-1 */
+	il = ic;
+      else
+	ih = ic;
+    }
+
+  ASSERT(il+1 < c->size);
+
+  return il;
+}
+
+
+/*
+//  Direct and inverse functions
+*/
+#define DEFINE_FUN(name,offset)			\
+double name(CosmologyParameters *c, double a) \
+{ \
+  cosmology_check_range(c,a); \
+  return cosmology_get_value_from_table(c,a,c->name) + offset; \
+} \
+double inv_##name(CosmologyParameters *c, double v) \
+{ \
+  int idx; \
+  double *table; \
+  if(c->size == 0) cosmology_init(c); \
+  v -= offset; \
+  table = c->name; \
+  idx = cosmology_find_index(c,v,table); \
+  while(idx < 0) \
+    { \
+      cosmology_check_range(c,0.5*c->aUni[0]); \
+      table = c->name; \
+      idx = cosmology_find_index(c,v,table); \
+    } \
+  while(idx > c->size) \
+    { \
+      cosmology_check_range(c,2.0*c->aUni[c->size-1]); \
+      table = c->name; \
+      idx = cosmology_find_index(c,v,table); \
+    } \
+  return c->aUni[idx] + (c->aUni[idx+1]-c->aUni[idx])/(table[idx+1]-table[idx])*(v-table[idx]); \
+}
+
+DEFINE_FUN(aBox,0.0);
+DEFINE_FUN(tCode,c->tCodeOffset);
+DEFINE_FUN(tPhys,0.0);
+DEFINE_FUN(dPlus,0.0);
+DEFINE_FUN(qPlus,0.0);
+
+#undef DEFINE_FUN

diff -r 66661ac7b4208e71cd9b3ce97f36c1bdc06f5deb -r 5130e0892eb0cda34d5bff8a94e424d737e1518d yt/frontends/artio/artio_headers/cosmology.h
--- /dev/null
+++ b/yt/frontends/artio/artio_headers/cosmology.h
@@ -0,0 +1,101 @@
+#ifndef __COSMOLOGY_H__
+#define __COSMOLOGY_H__
+
+typedef struct CosmologyParametersStruct CosmologyParameters;
+
+#define COSMOLOGY_DECLARE_PRIMARY_PARAMETER(name) \
+void cosmology_set_##name(CosmologyParameters *c, double value)
+
+#define cosmology_set(c,name,value)	\
+cosmology_set_##name(c,value)
+
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaM);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaB);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaL);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(h);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(DeltaDC);
+
+#undef COSMOLOGY_DECLARE_PRIMARY_PARAMETER
+
+CosmologyParameters *cosmology_allocate();
+void cosmology_free(CosmologyParameters *c);
+
+/*
+//  Check that all required cosmological parameters have been set.
+//  The minimum set is OmegaM, OmegaB, and h. By default, zero OmegaL,
+//  OmegaK, and the DC mode are assumed.
+*/
+int cosmology_is_set(CosmologyParameters *c);
+
+
+/*
+//  Freeze the cosmology and forbid any further changes to it.
+//  In codes that include user-customizable segments (like plugins),
+//  this function van be used for insuring that a user does not
+//  change the cosmology in mid-run.
+*/
+void cosmology_set_fixed(CosmologyParameters *c);
+
+
+/*
+//  Manual initialization. This does not need to be called, 
+//  the initialization is done automatically on the first call
+//  to a relevant function.
+*/
+void cosmology_init(CosmologyParameters *c);
+
+
+/*
+//  Set the range of global scale factors for thread-safe
+//  calls to direct functions until the argument leaves the range.
+*/
+void cosmology_set_thread_safe_range(CosmologyParameters *c, double amin, double amax);
+
+/*
+//  Direct functions take the global cosmological scale factor as the argument.
+//  These functionsare are thread-safe if called with the argument in the
+//  range set by a prior call to cosmology_set_thread_safe_range(...).
+//  Calling them with the argument outside that range is ok, but breaks
+//  thread-safety assurance.
+*/
+
+#define DEFINE_FUN(name)         \
+double name(CosmologyParameters *c, double a); \
+double inv_##name(CosmologyParameters *c, double v);
+
+DEFINE_FUN(aBox);
+DEFINE_FUN(tCode);
+DEFINE_FUN(tPhys);
+DEFINE_FUN(dPlus);
+DEFINE_FUN(qPlus); /* Q+ = a^2 dD+/(H0 dt) */
+
+#undef DEFINE_FUN
+
+/*
+//  Conversion macros
+*/
+#define  abox_from_auni(c,a)   aBox(c,a)
+#define tcode_from_auni(c,a)  tCode(c,a)
+#define tphys_from_auni(c,a)  tPhys(c,a)
+#define dplus_from_auni(c,a)  dPlus(c,a)
+
+#define auni_from_abox(c,v)   inv_aBox(c,v)
+#define auni_from_tcode(c,v)  inv_tCode(c,v)
+#define auni_from_tphys(c,v)  inv_tPhys(c,v)
+#define auni_from_dplus(c,v)  inv_dPlus(c,v)
+
+#define abox_from_tcode(c,tcode)   aBox(c,inv_tCode(c,tcode))
+#define tcode_from_abox(c,abox)    tCode(c,inv_aBox(c,abox))
+
+#define tphys_from_abox(c,abox)    tPhys(c,inv_aBox(c,abox))
+#define tphys_from_tcode(c,tcode)  tPhys(c,inv_tCode(c,tcode))
+#define dplus_from_tcode(c,tcode)  dPlus(c,inv_tCode(c,tcode))
+
+/*
+//  Hubble parameter in km/s/Mpc; defined as macro so that it can be
+//  undefined if needed to avoid the name clash.
+*/
+double cosmology_mu(CosmologyParameters *c, double a);
+#define Hubble(c,a) (100*c->h*cosmology_mu(c,a)/(a*a))
+
+#endif /* __COSMOLOGY_H__ */

diff -r 66661ac7b4208e71cd9b3ce97f36c1bdc06f5deb -r 5130e0892eb0cda34d5bff8a94e424d737e1518d yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -26,8 +26,7 @@
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from .fields import \
-    ARTIOFieldInfo, \
-    b2t
+    ARTIOFieldInfo
 from yt.fields.particle_fields import \
     standard_particle_fields
 
@@ -387,37 +386,40 @@
             self.particle_types = ()
         self.particle_types_raw = self.particle_types
 
-        self.current_time = b2t(self.artio_parameters["tl"][0])
+        self.current_time = self.quan(self._handle.tphys_from_tcode(self.artio_parameters["tl"][0]),"yr")
 
         # detect cosmology
         if "abox" in self.artio_parameters:
+            self.cosmological_simulation = True
+
             abox = self.artio_parameters["abox"][0]
-            self.cosmological_simulation = True
             self.omega_lambda = self.artio_parameters["OmegaL"][0]
             self.omega_matter = self.artio_parameters["OmegaM"][0]
             self.hubble_constant = self.artio_parameters["hubble"][0]
-            self.current_redshift = 1.0/self.artio_parameters["abox"][0] - 1.0
+            self.current_redshift = 1.0/self.artio_parameters["auni"][0] - 1.0
+            self.current_redshift_box = 1.0/abox - 1.0
 
             self.parameters["initial_redshift"] =\
                 1.0 / self.artio_parameters["auni_init"][0] - 1.0
             self.parameters["CosmologyInitialRedshift"] =\
                 self.parameters["initial_redshift"]
-        else:
-            self.cosmological_simulation = False
 
-        #units
-        if self.cosmological_simulation:
             self.parameters['unit_m'] = self.artio_parameters["mass_unit"][0]
             self.parameters['unit_t'] =\
                 self.artio_parameters["time_unit"][0] * abox**2
             self.parameters['unit_l'] =\
                 self.artio_parameters["length_unit"][0] * abox
+
+            if self.artio_parameters["DeltaDC"][0] != 0:
+                mylog.warn("DeltaDC != 0, which implies auni != abox.  Be sure you understand which expansion parameter is appropriate for your use! (Gnedin, Kravtsov, & Rudd 2011)")
         else:
+            self.cosmological_simulation = False
+
             self.parameters['unit_l'] = self.artio_parameters["length_unit"][0]
             self.parameters['unit_t'] = self.artio_parameters["time_unit"][0]
             self.parameters['unit_m'] = self.artio_parameters["mass_unit"][0]
 
-        # hard coded assumption of 3D periodicity (add to dataset)
+        # hard coded assumption of 3D periodicity
         self.periodicity = (True, True, True)
 
     @classmethod

diff -r 66661ac7b4208e71cd9b3ce97f36c1bdc06f5deb -r 5130e0892eb0cda34d5bff8a94e424d737e1518d yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -19,6 +19,8 @@
 from yt.funcs import mylog
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from yt.fields.field_detector import \
+    FieldDetector
 from yt.units.yt_array import \
     YTArray
 
@@ -62,7 +64,8 @@
         ("MASS", ("code_mass", ["particle_mass"], None)),
         ("PID", ("", ["particle_index"], None)),
         ("SPECIES", ("", ["particle_type"], None)),
-        ("BIRTH_TIME", ("code_time", ["creation_time"], None)),
+        ("BIRTH_TIME", ("", [], None)),  # code-units defined as dimensionless to 
+                                         # avoid incorrect conversion 
         ("INITIAL_MASS", ("code_mass", ["initial_mass"], None)),
         ("METALLICITY_SNIa", ("", ["metallicity_snia"], None)),
         ("METALLICITY_SNII", ("", ["metallicity_snii"], None)),
@@ -111,123 +114,35 @@
                        take_log=True)
 
     def setup_particle_fields(self, ptype):
+        if ptype == "STAR":
+            def _creation_time(field,data):
+                # this test is necessary to avoid passing invalid tcode values 
+                # to the function tphys_from_tcode during field detection
+                # (1.0 is not a valid tcode value)
+                if isinstance(data, FieldDetector):
+                    return data["STAR","BIRTH_TIME"]
+                return YTArray(data.ds._handle.tphys_from_tcode_array(data["STAR","BIRTH_TIME"]),"yr")
 
-        def _particle_age(field, data):
-            return b2t(data[ptype,"creation_time"])
-        self.add_field((ptype, "particle_age"), function=_particle_age, units="s",
-                  particle_type=True)
+            def _age(field, data):
+                if isinstance(data, FieldDetector):
+                    return data["STAR","creation_time"]
+                return data.ds.current_time - data["STAR","creation_time"]
+
+            self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",
+                        particle_type=True)
+            self.add_field((ptype, "age"), function=_age, units="yr",
+                        particle_type=True)
+
+            if self.ds.cosmological_simulation:
+                def _creation_redshift(field,data):
+                    # this test is necessary to avoid passing invalid tcode values 
+                    # to the function auni_from_tcode during field detection
+                    # (1.0 is not a valid tcode value)
+                    if isinstance(data, FieldDetector):
+                        return data["STAR","BIRTH_TIME"]
+                    return 1.0/data.ds._handle.auni_from_tcode_array(data["STAR","BIRTH_TIME"]) - 1.0
+
+                self.add_field((ptype, "creation_redshift"), function=_creation_redshift,
+                        particle_type=True)
 
         super(ARTIOFieldInfo, self).setup_particle_fields(ptype)
-
-#stolen from frontends/art/
-#All of these functions are to convert from hydro time var to
-#proper time
-sqrt = np.sqrt
-sign = np.sign
-
-
-def find_root(f, a, b, tol=1e-6):
-    c = (a+b)/2.0
-    last = -np.inf
-    assert(sign(f(a)) != sign(f(b)))
-    while np.abs(f(c)-last) > tol:
-        last = f(c)
-        if sign(last) == sign(f(b)):
-            b = c
-        else:
-            a = c
-        c = (a+b)/2.0
-    return c
-
-
-def quad(fintegrand, xmin, xmax, n=1e4):
-    spacings = np.logspace(np.log10(xmin), np.log10(xmax), n)
-    integrand_arr = fintegrand(spacings)
-    val = np.trapz(integrand_arr, dx=np.diff(spacings))
-    return val
-
-
-def a2b(at, Om0=0.27, Oml0=0.73, h=0.700):
-    def f_a2b(x):
-        val = 0.5*sqrt(Om0) / x**3.0
-        val /= sqrt(Om0/x**3.0 + Oml0 + (1.0-Om0-Oml0)/x**2.0)
-        return val
-    #val, err = si.quad(f_a2b,1,at)
-    val = quad(f_a2b, 1, at)
-    return val
-
-
-def b2a(bt, **kwargs):
-    #converts code time into expansion factor
-    #if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
-    #if bt < -190.0 or bt > -.10:  raise 'bt outside of range'
-    f_b2a = lambda at: a2b(at, **kwargs)-bt
-    return find_root(f_b2a, 1e-4, 1.1)
-    #return so.brenth(f_b2a,1e-4,1.1)
-    #return brent.brent(f_b2a)
-
-
-def a2t(at, Om0=0.27, Oml0=0.73, h=0.700):
-    integrand = lambda x: 1./(x*sqrt(Oml0+Om0*x**-3.0))
-    #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
-    current_time = quad(integrand, 1e-4, at)
-    #spacings = np.logspace(-5,np.log10(at),1e5)
-    #integrand_arr = integrand(spacings)
-    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-    current_time *= 9.779/h
-    return current_time
-
-
-def b2t(tb, n=1e2, logger=None, **kwargs):
-    tb = np.array(tb)
-    if len(np.atleast_1d(tb)) == 1: 
-        return a2t(b2a(tb))
-    if tb.shape == ():
-        return None 
-    if len(tb) < n:
-        n = len(tb)
-    age_min = a2t(b2a(tb.max(), **kwargs), **kwargs)
-    age_max = a2t(b2a(tb.min(), **kwargs), **kwargs)
-    tbs = -1.*np.logspace(np.log10(-tb.min()),
-                          np.log10(-tb.max()), n)
-    ages = []
-    for i, tbi in enumerate(tbs):
-        ages += a2t(b2a(tbi)),
-        if logger:
-            logger(i)
-    ages = np.array(ages)
-    fb2t = np.interp(tb, tbs, ages)
-    #fb2t = interp1d(tbs,ages)
-    return fb2t*1e9*31556926
-
-
-def spread_ages(ages, logger=None, spread=.0e7*365*24*3600):
-    #stars are formed in lumps; spread out the ages linearly
-    da = np.diff(ages)
-    assert np.all(da <= 0)
-    #ages should always be decreasing, and ordered so
-    agesd = np.zeros(ages.shape)
-    idx, = np.where(da < 0)
-    idx += 1
-    #mark the right edges
-    #spread this age evenly out to the next age
-    lidx = 0
-    lage = 0
-    for i in idx:
-        n = i-lidx
-        #n stars affected
-        rage = ages[i]
-        lage = max(rage-spread, 0.0)
-        agesd[lidx:i] = np.linspace(lage, rage, n)
-        lidx = i
-        #lage=rage
-        if logger:
-            logger(i)
-    #we didn't get the last iter
-    i = ages.shape[0]-1
-    n = i-lidx
-    #n stars affected
-    rage = ages[i]
-    lage = max(rage-spread, 0.0)
-    agesd[lidx:i] = np.linspace(lage, rage, n)
-    return agesd

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list