[yt-svn] commit/yt: 49 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Nov 27 02:18:20 PST 2012


49 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/8ffecc43df6a/
changeset:   8ffecc43df6a
branch:      yt
user:        Christopher Moody
date:        2012-08-16 20:30:01
summary:     Added alternative stereo pair camera (still in progress)
affected #:  1 file

diff -r 7c5ad85490e8ade384a165f1af51e1ef7cd9f692 -r 8ffecc43df6a0c74a25fb9fcf812282a104b4eeb yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -903,12 +903,87 @@
         return info, values
 
 class StereoPairCamera(Camera):
-    def __init__(self, original_camera, relative_separation = 0.005):
+    def __init__(self,original_camera,
+                 auto_focus=False,
+                 focal_length=None,
+                 frac_near_plane = 0.90, 
+                 frac_far_plane  = 1.10,
+                 frac_eye_separation=0.05,
+                 aperture = 60.0,
+                 relative_separation=0.005):
+        """
+        Auto-focus is adapted from a guide & code at :
+        http://paulbourke.net/miscellaneous/stereographics/stereorender/
+        """
         ParallelAnalysisInterface.__init__(self)
         self.original_camera = original_camera
-        self.relative_separation = relative_separation
+        oc = self.original_camera
+        if self.auto_focus:
+            dist = lambda x,y: na.sqrt(na.sum((x-y)**2.0))
+            if self.focal_length is None:
+                self.focal_length = dist(oc.normal_vector,0.0)
+            self.focal_far  = oc.center + frac_far_plane*oc.normal_vector
+            self.focal_near = oc.center + frac_near_plane*oc.normal_vector
+            self.wh_ratio = oc.resolution[0]/oc.resolution[1]
+            self.eye_sep  = self.focal_length*frac_eye_separation
+            self.aperture = aperture
+            self.frac_eye_separation = frac_eye_separation
+            self.center_eye_pos = oc.center + oc.normal_vector
+        else:
+            #default to old separation
+            self.relative_separation = relative_separation
+    
+    def finalize_image(self,image):
+        if self.auto_focus:
+            #we have extra frustum pixels on the left and right
+            #cameras
+            left_trim,right_trim = self.trim[0],self.trim[1]
+            left = abs(left_trim)
+            right = image.shapae[0]-abs(right_trim)
+            image = image[left:right,:]
+            return image
+
+	def auto_split(self):
+		"""We must calculate the new camera centers, as well
+        as the extended frustum pixels."""
+        oc = self.original_camera
+        nv = oc.orienter.normal_vector
+        up = oc.north_vector
+        c = oc.center
+        px = resolution[0] #pixel width
+        norm = lambda x: na.sqrt(na.dot(x,x.conj()))
+        between_eyes = na.cross(nv,up)
+        between_eyes /= norm(between_eyes)
+        between_eyes *= eye_sep/2.0
+        le_norm = nv-between_eyes 
+        le_c= c-between_eyes 
+        re_norm = nv+between_eyes 
+        re_c = c+between_eyes 
+        angular_aperture = na.tan(self.aperture/360.0*2.0*na.pi/2.0)
+        delta = na.rint(px*self.frac_eye_separation/(2.0*(angular_aperture)))
+        delta = delta.astype('int')
+        eresolution = resolution[0]+delta
+        left_camera = Camera(le_c, le_norm, oc.width,
+                             eresolution, oc.transfer_function, north_vector=up,
+                             volume=oc.volume, fields=oc.fields, 
+                             log_fields=oc.log_fields,
+                             sub_samples=oc.sub_samples, pf=oc.pf)
+        left_camera.trim = [-delta,0]
+        right_camera = Camera(re_c, re_norm, oc.width,
+                             eresolution, oc.transfer_function, north_vector=up,
+                             volume=oc.volume, fields=oc.fields, 
+                             log_fields=oc.log_fields,
+                             sub_samples=oc.sub_samples, pf=oc.pf)
+        right_camera.trim = [0,-delta]
+        return (left_camera, right_camera)
 
     def split(self):
+        if self.auto_focus:
+            return self.auto_split()
+        else:
+            return self.default_split()
+    
+    def default_split(self):
         oc = self.original_camera
         uv = oc.orienter.unit_vectors
         c = oc.center
@@ -926,6 +1001,10 @@
                              sub_samples=oc.sub_samples, pf=oc.pf)
         return (left_camera, right_camera)
 
+
+
+        
+
 class FisheyeCamera(Camera):
     def __init__(self, center, radius, fov, resolution,
                  transfer_function = None, fields = None,



https://bitbucket.org/yt_analysis/yt/changeset/0e9dfbd9ee0c/
changeset:   0e9dfbd9ee0c
branch:      yt
user:        Christopher Moody
date:        2012-08-16 20:30:25
summary:     Fixed OVELAP LENGTH error
affected #:  1 file

diff -r 8ffecc43df6a0c74a25fb9fcf812282a104b4eeb -r 0e9dfbd9ee0c5f740df5af16851c1dc441fe8774 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -237,32 +237,37 @@
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
-    print 'reading from particle filename %s'%filename # should print ./inline.0
+    global SCALE_NOW, TOTAL_PARTICLES
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
     block = int(str(filename).rsplit(".")[-1])
+    
 
     # Now we want to grab data from only a subset of the grids.
     n = rh.block_ratio
-    dd = rh.pf.h.all_data()
+    dd = pf.h.all_data()
+    SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
     tnpart = 0
     for g in grids:
-        tnpart += dd._get_data_from_grid(g, "particle_index").size
+        tnpart += np.sum(dd._get_data_from_grid(g, "particle_type")==rh.dm_type)
     p[0] = <particle *> malloc(sizeof(particle) * tnpart)
     #print "Loading indices: size = ", tnpart
-    conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
+    conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = pf.domain_left_edge[0]
+    left_edge[1] = pf.domain_left_edge[1]
+    left_edge[2] = pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
+        iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
         arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+        arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
             p[0][i+pi].id = arri[i]
@@ -272,38 +277,50 @@
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
+            arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
     num_p[0] = tnpart
+    TOTAL_PARTICLES = tnpart
+    print_rockstar_settings()
+    print 1.0/(1.0+pf.current_redshift)
     print "Block #%i | Particles %i | Grids %i"%\
             ( block, pi, len(grids))
 
 cdef class RockstarInterface:
 
-    cdef public object pf
     cdef public object data_source
+    cdef public object ts
+    cdef public object tsl
     cdef int rank
     cdef int size
     cdef public int block_ratio
+    cdef public int dm_type
+    cdef public int total_particles
 
-    def __cinit__(self, pf, data_source):
-        self.pf = pf
+    def __cinit__(self, ts, data_source):
+        self.ts = ts
+        self.tsl = ts.__iter__() #timseries generator used by read
         self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
+                       int num_snaps, np.int64_t total_particles,
+                       int dm_type,
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, int num_snaps = 1,
+                       int periodic = 1, 
                        int min_halo_size = 25, outbase = "None"):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+        global OVERLAP_LENGTH
+        OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -318,17 +335,19 @@
         FILE_FORMAT = "GENERIC"
         OUTPUT_FORMAT = "ASCII"
         NUM_SNAPS = num_snaps
+        print 'NUM_SNAPS=%i'%num_snaps
         NUM_READERS = num_readers
-        NUM_SNAPS = 1
         NUM_WRITERS = num_writers
         NUM_BLOCKS = num_readers
         MIN_HALO_OUTPUT_SIZE=min_halo_size
+        TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
-
-        h0 = self.pf.hubble_constant
-        Ol = self.pf.omega_lambda
-        Om = self.pf.omega_matter
-        SCALE_NOW = 1.0/(self.pf.current_redshift+1.0)
+        
+        tpf = self.ts[0]
+        h0 = tpf.hubble_constant
+        Ol = tpf.omega_lambda
+        Om = tpf.omega_matter
+        SCALE_NOW = 1.0/(tpf.current_redshift+1.0)
         if not outbase =='None'.decode('UTF-8'):
             #output directory. since we can't change the output filenames
             #workaround is to make a new directory
@@ -337,11 +356,11 @@
 
         if particle_mass < 0:
             print "Assuming single-mass particle."
-            particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
+            particle_mass = tpf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
         PERIODIC = periodic
-        BOX_SIZE = (self.pf.domain_right_edge[0] -
-                    self.pf.domain_left_edge[0]) * self.pf['mpchcm']
+        BOX_SIZE = (tpf.domain_right_edge[0] -
+                    tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
         cdef LPG func = rh_read_particles



https://bitbucket.org/yt_analysis/yt/changeset/0c9a443b7e4f/
changeset:   0c9a443b7e4f
branch:      yt
user:        Christopher Moody
date:        2012-08-16 20:32:00
summary:     Updates in the ART frontend
affected #:  4 files

diff -r 0e9dfbd9ee0c5f740df5af16851c1dc441fe8774 -r 0c9a443b7e4fcc18eae2092c6f8f2b55299c1885 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Christopher Erick Moody <cemoody at ucsc.edu>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -23,6 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+
 import numpy as na
 import stat
 import weakref
@@ -42,24 +45,24 @@
 from .fields import \
     ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-try:
-    import yt.frontends.ramses._ramses_reader as _ramses_reader
-except ImportError:
-    _ramses_reader = None
+import yt.frontends.ramses._ramses_reader as _ramses_reader #do not fail silently;
 
 from yt.utilities.physical_constants import \
-    mass_hydrogen_cgs, sec_per_Gyr
-
+    mass_hydrogen_cgs
+    
 from yt.frontends.art.definitions import art_particle_field_names
 
 from yt.frontends.art.io import _read_child_mask_level
 from yt.frontends.art.io import read_particles
 from yt.frontends.art.io import read_stars
+from yt.frontends.art.io import spread_ages
 from yt.frontends.art.io import _count_art_octs
 from yt.frontends.art.io import _read_art_level_info
 from yt.frontends.art.io import _read_art_child
@@ -81,19 +84,43 @@
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
+    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
+            child_mask=None,np=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
-        start_index = props[0]
+        start_index =start_index 
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
         
-        self.LeftEdge = props[0]
-        self.RightEdge = props[1]
-        self.ActiveDimensions = props[2] 
+        self.LeftEdge = le
+        self.RightEdge = re
+        self.ActiveDimensions = gd
+        self.NumberOfParticles=np
+        self.particle_type = na.array([])
+        self.particle_id= na.array([])
+        self.particle_age= na.array([])
+        self.particle_position_x = na.array([])
+        self.particle_position_y = na.array([])
+        self.particle_position_z = na.array([])
+        self.particle_velocity_x = na.array([])
+        self.particle_velocity_y = na.array([])
+        self.particle_velocity_z = na.array([])
+        self.particle_mass= na.array([])
+        self.star_position_x = na.array([])
+        self.star_position_y = na.array([])
+        self.star_position_z = na.array([])
+        self.star_velocity_x = na.array([])
+        self.star_velocity_y = na.array([])
+        self.star_velocity_z = na.array([])
+        self.star_age = na.array([])
+        self.star_metallicity1 = na.array([])
+        self.star_metallicity2 = na.array([])
+        self.star_mass_initial = na.array([])
+        self.star_mass = na.array([])
+         
         #if child_mask is not None:
         #    self._set_child_mask(child_mask)
 
@@ -109,7 +136,8 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
+                = self.dds
 
     def get_global_startindex(self):
         """
@@ -138,13 +166,105 @@
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = na.float64
         AMRHierarchy.__init__(self,pf,data_style)
+        if 'particle_position' in dir(self.pf):
+            self._setup_particle_grids()
         self._setup_field_list()
         
+    def _setup_particle_grids(self):
+        grid_particle_count = na.zeros(len(self.grids),dtype='int64')
+        npt = self.pf.particle_position.shape[0]
+        if self.pf.do_grid_particles:
+            nps = self.pf.star_position.shape[0]
+            grid_indices = na.zeros(nps,dtype='int64')
+            particle_id= na.arange(nps,dtype='int64')
+            pbar = get_pbar("Gridding Particles",len(self.grids))
+            grid_indices,grid_particle_count,grids_done = \
+                    particle_assignment(self.grids,
+                      self.grids[0], 
+                      self.pf.star_position,
+                      particle_id,
+                      grid_indices,
+                      grid_particle_count, 
+                      self.pf.domain_dimensions,
+                      self.pf.max_level,
+                      logger=pbar)
+            pbar.finish()        
+            pbar = get_pbar("Finalizing grids ",len(self.grids))
+            for gi, (g,npi) in enumerate(zip(self.grids,grid_particle_count)): 
+                star_mask= grid_indices==gi
+                if gi==0:
+                    #attach all the particles to the root grid
+                    g.particle_type = self.pf.particle_type
+                    g.particle_id = na.arange(npt)
+                    g.particle_mass = self.pf.particle_mass
+                    g.particle_mass_initial = self.pf.particle_mass_initial
+                    g.particle_age = self.pf.particle_age
+                    g.particle_metallicity= self.pf.particle_metallicity
+                    g.particle_position_x= self.pf.particle_position[:,0]
+                    g.particle_position_y= self.pf.particle_position[:,1]
+                    g.particle_position_z= self.pf.particle_position[:,2]
+                    g.particle_velocity_x= self.pf.particle_velocity[:,0]
+                    g.particle_velocity_y= self.pf.particle_velocity[:,1]
+                    g.particle_velocity_z= self.pf.particle_velocity[:,2]
+                if star_mask.sum()>0:
+                    star_data = self.pf.star_data[star_mask]         
+                    (g.star_position_x, \
+                        g.star_position_y, \
+                        g.star_position_z, \
+                        g.star_velocity_x,\
+                        g.star_velocity_y,\
+                        g.star_velocity_z,\
+                        g.star_age,\
+                        g.star_metallicity1,\
+                        g.star_metallicity2,\
+                        g.star_mass_initial,\
+                        g.star_mass) = tuple(star_data.T)
+                    g.NumberOfParticles = npi        
+                self.grids[gi] = g
+                pbar.update(gi)
+            pbar.finish()
+        else:        
+            pbar = get_pbar("Finalizing grids ",len(self.grids))
+            for gi, g in enumerate(self.grids): 
+                if gi==0:
+                    #attach all the particles to the root grid
+                    g.particle_type = self.pf.particle_type
+                    g.particle_id = na.arange(npt)
+                    g.particle_mass = self.pf.particle_mass
+                    g.particle_mass_initial = self.pf.particle_mass_initial
+                    g.particle_age = self.pf.particle_age
+                    g.particle_metallicity= self.pf.particle_metallicity
+                    g.particle_position_x= self.pf.particle_position[:,0]
+                    g.particle_position_y= self.pf.particle_position[:,1]
+                    g.particle_position_z= self.pf.particle_position[:,2]
+                    g.particle_velocity_x= self.pf.particle_velocity[:,0]
+                    g.particle_velocity_y= self.pf.particle_velocity[:,1]
+                    g.particle_velocity_z= self.pf.particle_velocity[:,2]
+                    if self.pf.do_stars:
+                        (g.star_position_x, \
+                            g.star_position_y, \
+                            g.star_position_z, \
+                            g.star_velocity_x,\
+                            g.star_velocity_y,\
+                            g.star_velocity_z,\
+                            g.star_age,\
+                            g.star_metallicity1,\
+                            g.star_metallicity2,\
+                            g.star_mass_initial,\
+                            g.star_mass) = tuple(self.pf.star_data.T)
+                    g.NumberOfParticles = npt        
+                else:
+                    g.star_indices = []
+                self.grids[gi] = g
+            pbar.finish()
+            grid_particle_count[0]=npt
+        self.grid_particle_count = grid_particle_count
+
     def _initialize_data_storage(self):
         pass
 
@@ -209,11 +329,18 @@
             if level > self.pf.limit_level : continue
             
             #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
+            left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                    self.pf.level_oct_offsets,level,
+                    coarse_grid=self.pf.domain_dimensions[0])
+            if level>1:
+                assert root_level == last_root_level
+            last_root_level = root_level
+                    
             #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             
             #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
+            idc, art_child_mask = _read_child_mask_level(f, 
+                    self.pf.level_child_offsets,
                 level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
             art_child_mask = art_child_mask.reshape((nocts,2,2,2))
             self.pf.level_art_child_masks[level]=art_child_mask
@@ -307,7 +434,7 @@
                         eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
             
         
-            mylog.debug("Done with level % 2i", level)
+            mylog.info("Done with level % 2i; max LE %i", level,na.max(left_index))
             pbar.finish()
             self.proto_grids.append(psgs)
             #print sum(len(psg.grid_file_locations) for psg in psgs)
@@ -322,35 +449,33 @@
 
         
     def _parse_hierarchy(self):
-        """ The root grid has no octs except one which is refined.
-        Still, it is the size of 128 cells along a length.
-        Ignore the proto subgrid created for the root grid - it is wrong.
-        """
         grids = []
         gi = 0
-        
+        dd=self.pf.domain_dimensions
         for level, grid_list in enumerate(self.proto_grids):
-            #The root level spans [0,2]
-            #The next level spans [0,256]
-            #The 3rd Level spans up to 128*2^3, etc.
-            #Correct root level to span up to 128
-            correction=1L
-            if level == 0:
-                correction=64L
+            dds = ((2**level) * dd).astype("float64")
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()*correction
-                dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                props = g.get_properties()
+                start_index = props[0,:]
+                le = props[0,:].astype('float64')/dds
+                re = props[1,:].astype('float64')/dds
+                gd = props[2,:].astype('int64')
+                if level==0:
+                    le = na.zeros(3,dtype='float64')
+                    re = na.ones(3,dtype='float64')
+                    gd = dd
+                self.grid_left_edge[gi,:] = le
+                self.grid_right_edge[gi,:] = re
+                self.grid_dimensions[gi,:] = gd
+                assert na.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = na.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,props[0],
+                amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*na.array(correction).astype('int64')))
+                    start_index,le,re,gd))
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
         
@@ -359,7 +484,7 @@
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
-            nstars = lspecies[-1]
+            nstars = na.diff(lspecies)[-1]
             a = self.pf.parameters['aexpn']
             hubble = self.pf.parameters['hubble']
             ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
@@ -368,7 +493,7 @@
             um *= 1.989e33 #convert solar masses to grams 
             pbar = get_pbar("Loading Particles   ",5)
             self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,nstars,Nrow)
+                read_particles(self.pf.file_particle_data,Nrow)
             pbar.update(1)
             npa,npb=0,0
             npb = lspecies[-1]
@@ -379,11 +504,16 @@
                     npa = clspecies[self.pf.only_particle_type]
                     npb = clspecies[self.pf.only_particle_type+1]
             np = npb-npa
+            nparticles = np
+            #make sure we aren't going to throw out good particles
+            if not na.all(self.pf.particle_position[npb:]==0.0):
+                print 'WARNING: unused particles discovered from lspecies'
             self.pf.particle_position   = self.pf.particle_position[npa:npb]
             #do NOT correct by an offset of 1.0
             #self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
-            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
+            self.pf.particle_position  /= self.pf.domain_dimensions 
+            #to unitary units (comoving)
             pbar.update(3)
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
@@ -392,13 +522,15 @@
             self.pf.particle_mass         = na.zeros(np,dtype='float64')
             self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
             self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity  = na.zeros(np,dtype='float64')-1
             self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
             self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
             self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_mass_initial'] = 1.0
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
@@ -411,10 +543,12 @@
             self.pf.conversion_factors['particle_index']=1.0
             self.pf.conversion_factors['particle_type']=1
             self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
+            self.pf.conversion_factors['Msun'] = 5.027e-34 
+            #conversion to solar mass units
             
 
             a,b=0,0
+            self.pf.particle_star_index = len(wspecies)-1
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
                 if type(self.pf.only_particle_type)==type(5):
                     if not i==self.pf.only_particle_type:
@@ -425,79 +559,82 @@
                 else:
                     self.pf.particle_type[a:b] = i #particle type
                     self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                    if m==0.0:
+                        self.pf.particle_star_index = i
                 a=b
             pbar.finish()
 
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
+            lparticles = [0,]+list(lspecies)
+            for j,np in enumerate(lparticles):
                 mylog.debug('found %i of particle type %i'%(j,np))
             
-            self.pf.particle_star_index = i
             
             do_stars = (self.pf.only_particle_type is None) or \
                        (self.pf.only_particle_type == -1) or \
                        (self.pf.only_particle_type == len(lspecies))
+            self.pf.do_stars = do_stars           
             if self.pf.file_star_data and do_stars: 
-                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
-                     = read_stars(self.pf.file_star_data,nstars,Nrow)
-                nstars = nstars[0] 
-                if nstars > 0 :
+                nstars_pa = nstars
+                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
+                        ws_old,ws_oldi,tdum,adum \
+                     = read_stars(self.pf.file_star_data)
+                self.pf.nstars_rs = nstars_rs     
+                self.pf.nstars_pa = nstars_pa
+                if not nstars_rs==na.sum(self.pf.particle_type==self.pf.particle_star_index):
+                    print 'WARNING!: nstars is inconsistent!'
+                if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
                     pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
+                    birthtimes= \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= sec_per_Gyr #from Gyr to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
+                    assert birthtimes.shape == tbirth.shape    
+                    birthtimes*= 1.0e9 #from Gyr to yr
+                    birthtimes*= 365*24*3600 #to seconds
+                    ages = self.pf.current_time-birthtimes
+                    spread = self.pf.spread
+                    if spread == False:
+                        pass
+                    elif type(spread)==type(5.5):
+                        ages = spread_ages(ages,spread=spread)
+                    else:
+                        ages = spread_ages(ages)
+                    idx = self.pf.particle_type == self.pf.particle_star_index    
+                    assert na.sum(idx)==nstars_pa
+                    self.pf.star_position = self.pf.particle_position[idx]
+                    self.pf.star_velocity = self.pf.particle_velocity[idx]
+                    self.pf.particle_age[idx] = ages
+                    self.pf.star_age = ages
                     pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    #self.pf.particle_metallicity1 *= 0.0199 
-                    #self.pf.particle_metallicity2 *= 0.0199 
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
-                    self.pf.particle_mass[-nstars:] = mass*um
+                    self.pf.particle_metallicity[idx] = metallicity1+metallicity2
+                    self.pf.particle_metallicity1[idx] = metallicity1
+                    self.pf.particle_metallicity2[idx] = metallicity2
+                    self.pf.particle_mass[idx] = mass*um
+                    self.pf.particle_mass_initial[idx] = mass*um
+                    self.pf.star_metallicity1 = metallicity1
+                    self.pf.star_metallicity2 = metallicity2
+                    self.pf.star_mass_initial = imass*um
+                    self.pf.star_mass = mass*um
+                    self.pf.star_data = na.array([
+                        self.pf.star_position[:,0],
+                        self.pf.star_position[:,1],
+                        self.pf.star_position[:,2],
+                        self.pf.star_velocity[:,0],
+                        self.pf.star_velocity[:,1],
+                        self.pf.star_velocity[:,2],
+                        self.pf.star_age,
+                        self.pf.star_metallicity1,
+                        self.pf.star_metallicity2,
+                        self.pf.star_mass_initial,
+                        self.pf.star_mass]).T
 
             done = 0
             init = self.pf.particle_position.shape[0]
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
             #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
-            if type(self.pf.grid_particles) == type(5):
-                particle_level = min(self.pf.max_level,self.pf.grid_particles)
-            else:
-                particle_level = 2
-            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    na.zeros(len(pos[:,0])).astype('int32')-1,
-                    particle_level, #dont grid particles past this
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
-            pbar.finish()
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
-        pbar = get_pbar("Finalizing grids ",len(grids))
-        for gi, g in enumerate(grids): 
-            self.grids[gi] = g
-        pbar.finish()
-            
+        for gi,g in enumerate(grids):    
+            self.grids[gi]=g
+                    
 
     def _get_grid_parents(self, grid, LE, RE):
         mask = na.zeros(self.num_grids, dtype='bool')
@@ -507,47 +644,35 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = na.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            #Now we do overlapping siblings; note that one has to "win" with
+            #siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
-        self.max_level = self.grid_levels.max()
-
-    # def _populate_grid_objects(self):
-    #     mask = na.empty(self.grids.size, dtype='int32')
-    #     pb = get_pbar("Populating grids", len(self.grids))
-    #     for gi,g in enumerate(self.grids):
-    #         pb.update(gi)
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level - 1,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask)
-    #         parents = self.grids[mask.astype("bool")]
-    #         if len(parents) > 0:
-    #             g.Parent.extend((p for p in parents.tolist()
-    #                     if p.locations[0,0] == g.locations[0,0]))
-    #             for p in parents: p.Children.append(g)
-    #         # Now we do overlapping siblings; note that one has to "win" with
-    #         # siblings, so we assume the lower ID one will "win"
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask, gi)
-    #         mask[gi] = False
-    #         siblings = self.grids[mask.astype("bool")]
-    #         if len(siblings) > 0:
-    #             g.OverlappingSiblings = siblings.tolist()
-    #         g._prepare_grid()
-    #         g._setup_dx()
-    #     pb.finish()
-    #     self.max_level = self.grid_levels.max()
+        pb.finish()
+        #self.max_level = self.grid_levels.max()
 
     def _setup_field_list(self):
         if self.parameter_file.use_particles:
@@ -588,10 +713,10 @@
                  file_particle_data=None,
                  file_star_data=None,
                  discover_particles=True,
-                 use_particles=True,
                  limit_level=None,
                  only_particle_type = None,
-                 grid_particles=False,
+                 do_grid_particles=False,
+                 spread = True,
                  single_particle_mass=False,
                  single_particle_type=0):
         
@@ -605,8 +730,9 @@
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
         self.only_particle_type = only_particle_type
-        self.grid_particles = grid_particles
+        self.do_grid_particles = do_grid_particles
         self.single_particle_mass = single_particle_mass
+        self.spread = spread
         
         if limit_level is None:
             self.limit_level = na.inf
@@ -719,8 +845,11 @@
             # Add on the 1e5 to get to cm/s
             self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
         seconds = self.t0
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
+        self.time_units['Gyr']   = 1.0/(1.0e9*365*3600*24.0)
+        self.time_units['Myr']   = 1.0/(1.0e6*365*3600*24.0)
+        self.time_units['years'] = 1.0/(365*3600*24.0)
+        self.time_units['days']  = 1.0 / (3600*24.0)
+
 
         #we were already in seconds, go back in to code units
         #self.current_time /= self.t0 
@@ -817,7 +946,7 @@
         # integrand_arr = integrand(spacings)
         # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
         # self.current_time *= self.hubble_time
-        self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
+        self.current_time = b2t(self.current_time_raw)*1.0e9*365*3600*24         
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
             _skip_record(f)
 
@@ -865,7 +994,8 @@
         self.root_grid_mask_offset = f.tell()
         #_skip_record(f) # iOctCh
         root_cells = self.domain_dimensions.prod()
-        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+        self.root_iOctChfull = _read_frecord(f,'>i')
+        self.root_iOctCh = self.root_iOctChfull[:root_cells]
         self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
         self.root_grid_offset = f.tell()
         _skip_record(f) # hvar
@@ -927,10 +1057,12 @@
         seek_extras = 137
         fh.seek(seek_extras)
         n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
-        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
-        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
+        self.parameters['wspeciesf'] = na.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspeciesf'] = na.fromfile(fh,dtype='>i',count=10)
+        assert na.all(self.parameters['lspeciesf'][n:]==0.0)
+        assert na.all(self.parameters['wspeciesf'][n:]==0.0)
+        self.parameters['wspecies'] = self.parameters['wspeciesf'][:n]
+        self.parameters['lspecies'] = self.parameters['lspeciesf'][:n]
         fh.close()
         
         ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
@@ -940,14 +1072,64 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        """
-        Defined for Daniel Ceverino's file naming scheme.
-        This could differ for other formats.
-        """
-        fn = ("%s" % (os.path.basename(args[0])))
-        f = ("%s" % args[0])
-        if fn.endswith(".d") and fn.startswith('10Mpc') and\
-                os.path.exists(f): 
-                return True
-        return False
+        if "10MpcBox" in args[0]:
+            return True
+        return os.path.exists("%s.hierarchy" % args[0])
 
+def particle_assignment(grids,this_grid, 
+                                  pos,
+                                  particle_id,
+                                  grid_indices,
+                                  grid_particle_count, 
+                                  domain_dimensions,
+                                  max_level,
+                                  subdiv=2,
+                                  grids_done=0,
+                                  logger=None):
+    #for every particle check every child grid to see if it fits inside
+    #cast the pos -> cell location index (instead of doing a LE<pos<RE check)
+    #find if cell descends into the next mesh
+    
+    #cast every position into a cell on this grid
+    #we may get negative indices or indices outside this grid
+    #mask them out
+    exp = domain_dimensions*subdiv**this_grid.Level
+    lei= na.floor((pos-this_grid.LeftEdge)*exp).astype('int64')
+
+    #now lookup these indices in the child index mask
+    #throw out child grids = -1 and particles outside the range
+    #default state is to not grid a particle
+    child_idx = na.zeros(lei.shape[0],dtype='int64')-1
+    #remove particles to the left or right of the grid
+    lei_out  = na.any(lei>=this_grid.ActiveDimensions,axis=1)
+    lei_out |= na.any(lei<0,axis=1)
+    #lookup grids for every particle except the ones to the 
+    leio=lei[~lei_out]
+    #child_idx[~lei_out]= \
+    child_idx[~lei_out]= \
+            this_grid.child_index_mask[(leio[:,0],leio[:,1],leio[:,2])]
+    mask = (child_idx > -1)
+    #only assign the particles if they point to a grid ID that isnt -1
+    grid_indices[particle_id[mask]] = child_idx[mask]
+    #the number of particles on this grid is equal to those
+    #that point to -1
+    grid_particle_count[this_grid.id] = na.sum(~mask)
+    grids_done +=1
+    if logger:
+        logger.update(grids_done)
+
+    for child_grid_index in na.unique(this_grid.child_index_mask):
+        if child_grid_index == -1: 
+            continue
+        if grids[child_grid_index].Level == max_level:
+            continue
+        mask = child_idx == child_grid_index
+        if na.sum(mask)==0:continue
+        grid_indices,grid_particle_count,grids_done = \
+        particle_assignment(grids,grids[child_grid_index],
+                pos[mask],particle_id[mask],
+                grid_indices,grid_particle_count,
+                domain_dimensions,max_level,grids_done=grids_done,
+                subdiv=subdiv,logger=logger)
+    return grid_indices,grid_particle_count,grids_done
+


diff -r 0e9dfbd9ee0c5f740df5af16851c1dc441fe8774 -r 0c9a443b7e4fcc18eae2092c6f8f2b55299c1885 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -26,13 +26,10 @@
 """
 
 art_particle_field_names = [
-'particle_age',
 'particle_index',
 'particle_mass',
 'particle_mass_initial',
-'particle_creation_time',
-'particle_metallicity1',
-'particle_metallicity2',
+'particle_age',
 'particle_metallicity',
 'particle_position_x',
 'particle_position_y',
@@ -40,4 +37,18 @@
 'particle_velocity_x',
 'particle_velocity_y',
 'particle_velocity_z',
-'particle_type']
+'particle_type',
+'star_position_x',
+'star_position_y',
+'star_position_z',
+'star_velocity_x',
+'star_velocity_y',
+'star_velocity_z',
+'star_age',
+'star_mass',
+'star_mass_initial',
+'star_creation_time',
+'star_metallicity1',
+'star_metallicity2',
+'star_metallicity',
+]


diff -r 0e9dfbd9ee0c5f740df5af16851c1dc441fe8774 -r 0c9a443b7e4fcc18eae2092c6f8f2b55299c1885 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -34,8 +34,6 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, mass_hydrogen_cgs
 import yt.utilities.lib as amr_utils
 
 KnownARTFields = FieldInfoContainer()
@@ -170,32 +168,27 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    cd = data.pf.conversion_factors["Density"]
-    cg = data.pf.conversion_factors["GasEnergy"]
-    ct = data.pf.tr
     dg = data["GasEnergy"].astype('float64')
+    dg /= data.pf.conversion_factors["GasEnergy"]
     dd = data["Density"].astype('float64')
-    di = dd==0.0
+    dd /= data.pf.conversion_factors["Density"]
+    tr = dg/dd*data.pf.tr
+    #ghost cells have zero density?
+    tr[na.isnan(tr)] = 0.0
     #dd[di] = -1.0
-    tr = dg/dd
-    #tr[na.isnan(tr)] = 0.0
     #if data.id==460:
-    #    import pdb;pdb.set_trace()
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
-    tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
     #assert na.all(na.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x = data.pf.conversion_factors["Temperature"]
+    #x = data.pf.conversion_factors["Temperature"]
     x = 1.0
     return x
 add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_converttemperature
+#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
@@ -242,7 +235,7 @@
 
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
-    tr += data["MetalDensitySNII"]
+    tr = data["MetalDensitySNII"]
     return tr
 add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Metal_Density"]._units = r""
@@ -254,17 +247,44 @@
 #Derived particle fields
 
 def mass_dm(field, data):
+    tr = na.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
-    tr  = data["Ones"] #create a grid in the right size
     if na.sum(idx)>0:
-        tr /= na.prod(tr.shape) #divide by the volume
-        tr *= na.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        tr /= na.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
+        tr *= na.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
+        print tr.shape
         return tr
     else:
-        return tr*0.0
+        return tr*1e-9
 
-add_field("particle_cell_mass_dm", function=mass_dm,
-          validators=[ValidateSpatial(0)])
+add_field("particle_cell_mass_dm", function=mass_dm, units = r"\mathrm{M_{sun}}",
+        validators=[ValidateSpatial(0)],        
+        take_log=False,
+        projection_conversion="1")
 
+def _spdensity(field, data):
+    grid_mass = na.zeros(data.ActiveDimensions, dtype='float32')
+    if data.star_mass.shape[0] ==0 : return grid_mass 
+    amr_utils.CICDeposit_3(data.star_position_x,
+                           data.star_position_y,
+                           data.star_position_z,
+                           data.star_mass.astype('float32'),
+                           data.star_mass.shape[0],
+                           grid_mass, 
+                           na.array(data.LeftEdge).astype(na.float64),
+                           na.array(data.ActiveDimensions).astype(na.int32), 
+                           na.float64(data['dx']))
+    return grid_mass 
+
+#add_field("star_density", function=_spdensity,
+#          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+
+def _simple_density(field,data):
+    mass = na.sum(data.star_mass)
+    volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
+    return mass/volume
+
+add_field("star_density", function=_simple_density,
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity)


diff -r 0e9dfbd9ee0c5f740df5af16851c1dc441fe8774 -r 0c9a443b7e4fcc18eae2092c6f8f2b55299c1885 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -122,38 +122,60 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = na.array(grid.particle_indices)
+        import pdb; pdb.set_trace()
         if field == 'particle_index':
-            return na.array(idx)
+            return grid.particle_id
         if field == 'particle_type':
-            return grid.pf.particle_type[idx]
+            return grid.particle_type
         if field == 'particle_position_x':
-            return grid.pf.particle_position[idx][:,0]
+            return grid.particle_position_x
         if field == 'particle_position_y':
-            return grid.pf.particle_position[idx][:,1]
+            return grid.particle_position_y
         if field == 'particle_position_z':
-            return grid.pf.particle_position[idx][:,2]
+            return grid.particle_position_z
+        if field == 'particle_age':
+            return grid.particle_age
         if field == 'particle_mass':
-            return grid.pf.particle_mass[idx]
+            return grid.particle_mass
+        if field == 'particle_mass_initial':
+            return grid.particle_mass_initial
+        if field == 'particle_metallicity':
+            return grid.particle_metallicity
         if field == 'particle_velocity_x':
-            return grid.pf.particle_velocity[idx][:,0]
+            return grid.particle_velocity_x
         if field == 'particle_velocity_y':
-            return grid.pf.particle_velocity[idx][:,1]
+            return grid.particle_velocity_y
         if field == 'particle_velocity_z':
-            return grid.pf.particle_velocity[idx][:,2]
+            return grid.particle_velocity_z
         
         #stellar fields
-        if field == 'particle_age':
-            return grid.pf.particle_age[idx]
-        if field == 'particle_metallicity':
-            return grid.pf.particle_metallicity1[idx] +\
-                   grid.pf.particle_metallicity2[idx]
-        if field == 'particle_metallicity1':
-            return grid.pf.particle_metallicity1[idx]
-        if field == 'particle_metallicity2':
-            return grid.pf.particle_metallicity2[idx]
-        if field == 'particle_mass_initial':
-            return grid.pf.particle_mass_initial[idx]
+        if field == 'star_position_x':
+            return grid.star_position_x
+        if field == 'star_position_y':
+            return grid.star_position_y
+        if field == 'star_position_z':
+            return grid.star_position_z
+        if field == 'star_mass':
+            return grid.star_mass
+        if field == 'star_velocity_x':
+            return grid.star_velocity_x
+        if field == 'star_velocity_y':
+            return grid.star_velocity_y
+        if field == 'star_velocity_z':
+            return grid.star_velocity_z
+        if field == 'star_age':
+            return grid.star_age
+        if field == 'star_metallicity':
+            return grid.star_metallicity1 +\
+                   grid.star_metallicity2
+        if field == 'star_metallicity1':
+            return grid.star_metallicity1
+        if field == 'star_metallicity2':
+            return grid.star_metallicity2
+        if field == 'star_mass_initial':
+            return grid.star_mass_initial
+        if field == 'star_mass':
+            return grid.star_mass
         
         raise 'Should have matched one of the particle fields...'
 
@@ -198,9 +220,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -232,7 +254,7 @@
     f.seek(offset)
     return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
 
-def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
     pos = f.tell()
     f.seek(level_oct_offsets[level])
     #Get the info for this level, skip the rest
@@ -283,13 +305,18 @@
     le = le[idx]
     fl = fl[idx]
 
+
     #left edges are expressed as if they were on 
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
     #le = le/2**(root_level-1-level)-1
 
+    #try to find the root_level first
+    root_level=na.floor(na.log2(le.max()*1.0/coarse_grid))
+    root_level = root_level.astype('int64')
+
     #try without the -1
-    le = le/2**(root_level-2-level)-1
+    le = le/2**(root_level+1-level)-1
 
     #now read the hvars and vars arrays
     #we are looking for iOctCh
@@ -299,13 +326,12 @@
     
     
     f.seek(pos)
-    return le,fl,nLevel
+    return le,fl,nLevel,root_level
 
 
-def read_particles(file,nstars,Nrow):
+def read_particles(file,Nrow):
     words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4 # for file_particle_data; not always true?
-    np = nstars # number of particles including stars, should come from lspecies[-1]
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
@@ -314,7 +340,7 @@
     data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
-def read_stars(file,nstars,Nrow):
+def read_stars(file):
     fh = open(file,'rb')
     tdum,adum   = _read_frecord(fh,'>d')
     nstars      = _read_frecord(fh,'>i')
@@ -327,7 +353,8 @@
     if fh.tell() < os.path.getsize(file):
         metallicity2 = _read_frecord(fh,'>f')     
     assert fh.tell() == os.path.getsize(file)
-    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+    return  nstars, mass, imass, tbirth, metallicity1, metallicity2,\
+            ws_old,ws_oldi,tdum,adum
 
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
@@ -476,3 +503,29 @@
     #fb2t = interp1d(tbs,ages)
     return fb2t
 
+def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
+    #stars are formed in lumps; spread out the ages linearly
+    da= na.diff(ages)
+    assert na.all(da<=0)
+    #ages should always be decreasing, and ordered so
+    agesd = na.zeros(ages.shape)
+    idx, = na.where(da<0)
+    idx+=1 #mark the right edges
+    #spread this age evenly out to the next age
+    lidx=0
+    lage=0
+    for i in idx:
+        n = i-lidx #n stars affected
+        rage = ages[i]
+        lage = max(rage-spread,0.0)
+        agesd[lidx:i]=na.linspace(lage,rage,n)
+        lidx=i
+        #lage=rage
+        if logger: logger(i)
+    #we didn't get the last iter
+    i=ages.shape[0]-1
+    n = i-lidx #n stars affected
+    rage = ages[i]
+    lage = max(rage-spread,0.0)
+    agesd[lidx:i]=na.linspace(lage,rage,n)
+    return agesd



https://bitbucket.org/yt_analysis/yt/changeset/066808c66bc2/
changeset:   066808c66bc2
branch:      yt
user:        Christopher Moody
date:        2012-08-16 23:50:17
summary:     Fixed bug where all data files were marked as ART
affected #:  1 file

diff -r 0c9a443b7e4fcc18eae2092c6f8f2b55299c1885 -r 066808c66bc2eb75b806ff5447863fd8593f90dc yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -1074,7 +1074,7 @@
     def _is_valid(self, *args, **kwargs):
         if "10MpcBox" in args[0]:
             return True
-        return os.path.exists("%s.hierarchy" % args[0])
+        return False
 
 def particle_assignment(grids,this_grid, 
                                   pos,



https://bitbucket.org/yt_analysis/yt/changeset/39375b0ce832/
changeset:   39375b0ce832
branch:      yt
user:        Christopher Moody
date:        2012-08-16 23:52:02
summary:     Updating the Rockstar python manager as well
affected #:  1 file

diff -r 066808c66bc2eb75b806ff5447863fd8593f90dc -r 39375b0ce832a49e46e10d74101d8e0f53812046 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -24,8 +24,6 @@
 """
 
 from yt.mods import *
-from os import environ
-from os import mkdir
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
 
@@ -47,36 +45,64 @@
         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,overwrite=False,
-            left_edge = None, right_edge = None):
+    def __init__(self, ts, num_readers = 1, num_writers = None, 
+            outbase=None,particle_mass=-1.0,dm_type=1):
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
-        self.pf = pf
-        self.hierarchy = pf.h
+        #we assume that all of the snapshots in the time series
+        #use the same domain info as the first snapshots
+        if not isinstance(ts,TimeSeriesData):
+            ts = TimeSeriesData([ts])
+        self.ts = ts
+        self.dm_type = dm_type
+        if self.comm.size > 1: 
+            self.comm.barrier()            
+        tpf = ts.__iter__().next()
+        dd = tpf.h.all_data()
+        print 'total particles: ',
+        total_particles = na.sum(dd['particle_type']==dm_type).astype('int64')
+        print total_particles
+        self.total_particles = -1
+        self.hierarchy = tpf.h
+        self.particle_mass = particle_mass 
+        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
+        if outbase is None:
+            outbase = str(tpf)+'_rockstar'
+        self.outbase = outbase        
         if num_writers is None:
             num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
         self.num_writers = num_writers
-        self.particle_mass = particle_mass 
-        self.overwrite = overwrite
-        if left_edge is None:
-            left_edge = pf.domain_left_edge
-        if right_edge is None:
-            right_edge = pf.domain_right_edge
-        self.le = left_edge
-        self.re = right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
             print '%i reader + %i writers != %i mpi'%\
                     (self.num_readers, self.num_writers, self.comm.size)
             raise RuntimeError
+<<<<<<< local
+        if self.comm.size > 1:
+            print 'creating MPI workgroups'
+            self.pool = ProcessorPool()
+            self.pool.add_workgroup(1, name = "server")
+            self.pool.add_workgroup(num_readers, name = "readers")
+            self.pool.add_workgroup(num_writers, name = "writers")
+            for wg in self.pool.workgroups:
+                if self.comm.rank in wg.ranks: self.workgroup = wg
+=======
         self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         data_source = self.pf.h.all_data()
+>>>>>>> other
         self.handler = rockstar_interface.RockstarInterface(
+<<<<<<< local
+                self.ts, data_source)
+
+    def __del__(self):
+        self.pool.free_all()
+=======
                 self.pf, data_source)
         if outbase is None:
             outbase = str(self.pf)+'_rockstar'
         self.outbase = outbase        
+>>>>>>> other
 
     def _get_hosts(self):
         if self.comm.size == 1 or self.workgroup.name == "server":
@@ -92,6 +118,8 @@
         self.port = str(self.port)
 
     def run(self, block_ratio = 1,**kwargs):
+<<<<<<< local
+=======
         """
         
         """
@@ -104,17 +132,12 @@
             self.pool.add_workgroup(self.num_writers, name = "writers")
             for wg in self.pool.workgroups:
                 if self.comm.rank in wg.ranks: self.workgroup = wg
+>>>>>>> other
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
-        #because rockstar *always* write to exactly the same
-        #out_0.list filename we make a directory for it
-        #to sit inside so it doesn't get accidentally
-        #overwritten 
-        if self.workgroup.name == "server":
-            if not os.path.exists(self.outbase):
-                os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
+                    len(self.ts), self.total_particles, self.dm_type,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
@@ -123,6 +146,13 @@
                     outbase = self.outbase,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
+            if not os.path.exists(self.outbase):
+                os.mkdir(self.outbase)
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
@@ -137,11 +167,12 @@
                 self.handler.start_client()
             self.pool.free_all()
         self.comm.barrier()
-        #quickly rename the out_0.list 
+        self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
         """
         Reads in the out_0.list file and generates RockstarHaloList
         and RockstarHalo objects.
         """
-        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)
+        tpf = self.ts[0]
+        return RockstarHaloList(tpf,file_name)



https://bitbucket.org/yt_analysis/yt/changeset/f99c10f24f2f/
changeset:   f99c10f24f2f
branch:      yt
user:        Christopher Moody
date:        2012-08-17 01:02:49
summary:     Fixed indentation
affected #:  1 file

diff -r 39375b0ce832a49e46e10d74101d8e0f53812046 -r f99c10f24f2f1f2a00245b7874c0dda037983a00 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -964,16 +964,16 @@
         delta = delta.astype('int')
         eresolution = resolution[0]+delta
         left_camera = Camera(le_c, le_norm, oc.width,
-                             eresolution, oc.transfer_function, north_vector=up,
-                             volume=oc.volume, fields=oc.fields, 
-                             log_fields=oc.log_fields,
-                             sub_samples=oc.sub_samples, pf=oc.pf)
+                     eresolution, oc.transfer_function, north_vector=up,
+                     volume=oc.volume, fields=oc.fields, 
+                     log_fields=oc.log_fields,
+                     sub_samples=oc.sub_samples, pf=oc.pf)
         left_camera.trim = [-delta,0]
         right_camera = Camera(re_c, re_norm, oc.width,
-                             eresolution, oc.transfer_function, north_vector=up,
-                             volume=oc.volume, fields=oc.fields, 
-                             log_fields=oc.log_fields,
-                             sub_samples=oc.sub_samples, pf=oc.pf)
+                     eresolution, oc.transfer_function, north_vector=up,
+                     volume=oc.volume, fields=oc.fields, 
+                     log_fields=oc.log_fields,
+                     sub_samples=oc.sub_samples, pf=oc.pf)
         right_camera.trim = [0,-delta]
         return (left_camera, right_camera)
 



https://bitbucket.org/yt_analysis/yt/changeset/0ad23331bd22/
changeset:   0ad23331bd22
branch:      yt
user:        Christopher Moody
date:        2012-08-17 20:30:37
summary:     return how many stars the sunrise exporter is using
affected #:  1 file

diff -r f99c10f24f2f1f2a00245b7874c0dda037983a00 -r 0ad23331bd22b6dc18e1c81aa01d06e07f30b397 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -41,8 +41,6 @@
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
-debug = True
-
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
@@ -95,7 +93,7 @@
     #Create a list of the star particle properties in PARTICLE_DATA
     #Include ID, parent-ID, position, velocity, creation_mass, 
     #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+    particle_data,nstars = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
                                            dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
@@ -109,7 +107,7 @@
 
     create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    return fle,fre,ile,ire,dd,nleaf
+    return fle,fre,ile,ire,dd,nleaf,nstars
 
 def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
                                         halo_list,domains_list=None,**kwargs):
@@ -193,7 +191,7 @@
     domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
-def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
+def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
     add_fields() #add the metal mass field that sunrise wants
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
@@ -278,6 +276,7 @@
     #for the next spot, so we're off by 1
     print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    print 'first few entries :',refined[:12]
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
@@ -298,7 +297,8 @@
     txt  = '%1i '
     txt += '%1.3f '*3+'- '
     txt += '%1.3f '*3
-    print txt%((l,)+tuple(fle)+tuple(fre))
+    if l<2:
+        print txt%((l,)+tuple(fle)+tuple(fre))
 
 def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
                             pos, #the output hydro data position and refinement position
@@ -316,16 +316,18 @@
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    refined[pos.refined_pos] = child_grid_index!=-1
     levels[pos.output_pos]  = level
+    is_leaf = (child_grid_index==-1) and (level>0)
+    refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
     pos.refined_pos+= 1 
-    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+    if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
         for field_index in range(grid.fields.shape[0]):
             output[pos.output_pos,field_index] = \
                     grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
         pos.output_pos+= 1 
     else:
+        assert child_grid_index>-1
         #find the grid we descend into
         #then find the eight cells we break up into
         subgrid = grids[child_grid_index]
@@ -338,18 +340,18 @@
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + vertex*2**(-level)
+                child_ile = cell_index + na.array(vertex)*2**(-level)
             else:
                 child_ile = subgrid_ile+na.array(vertex)
                 child_ile = child_ile.astype('int')
+
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
-                    debug=debug,tracker=tracker)
+                subgrid,hilbert_child,output,refined,levels,grids,level+1,
+                debug=debug,tracker=tracker)
 
 
 
 def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
-
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
@@ -495,11 +497,15 @@
                           dd=None):
     if dd is None:
         dd = pf.h.all_data()
-    idx = dd["particle_type"] == star_type
+    idxst = dd["particle_type"] == star_type
+
+    #make sure we select more than a single particle
+    assert na.sum(idxst)>0
     if pos is None:
         pos = na.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    idx = idxst & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    assert na.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
@@ -546,7 +552,10 @@
     cols = pyfits.ColDefs(col_list)
     pd_table = pyfits.new_table(cols)
     pd_table.name = "PARTICLEDATA"
-    return pd_table
+    
+    #make sure we have nonzero particle number
+    assert pd_table.data.shape[0]>0
+    return pd_table,na.sum(idx)
 
 
 def add_fields():



https://bitbucket.org/yt_analysis/yt/changeset/dbe1c5e556ec/
changeset:   dbe1c5e556ec
branch:      yt
user:        Christopher Moody
date:        2012-08-17 20:33:07
summary:     removed pdb
affected #:  1 file

diff -r 0ad23331bd22b6dc18e1c81aa01d06e07f30b397 -r dbe1c5e556ec116f755f59e60a51495d6bb2e5bc yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -122,7 +122,6 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        import pdb; pdb.set_trace()
         if field == 'particle_index':
             return grid.particle_id
         if field == 'particle_type':



https://bitbucket.org/yt_analysis/yt/changeset/a0e342f55b33/
changeset:   a0e342f55b33
branch:      yt
user:        Christopher Moody
date:        2012-08-17 21:20:17
summary:     fixing conflict in rockstar.py
affected #:  1 file

diff -r dbe1c5e556ec116f755f59e60a51495d6bb2e5bc -r a0e342f55b334f61c9267eb952b6d3db5a4b3a32 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -59,9 +59,7 @@
             self.comm.barrier()            
         tpf = ts.__iter__().next()
         dd = tpf.h.all_data()
-        print 'total particles: ',
         total_particles = na.sum(dd['particle_type']==dm_type).astype('int64')
-        print total_particles
         self.total_particles = -1
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
@@ -75,10 +73,8 @@
         self.num_readers = num_readers
         self.num_writers = num_writers
         if self.num_readers + self.num_writers + 1 != self.comm.size:
-            print '%i reader + %i writers != %i mpi'%\
-                    (self.num_readers, self.num_writers, self.comm.size)
+            #we need readers+writers+1 server = comm size        
             raise RuntimeError
-<<<<<<< local
         if self.comm.size > 1:
             print 'creating MPI workgroups'
             self.pool = ProcessorPool()
@@ -87,22 +83,13 @@
             self.pool.add_workgroup(num_writers, name = "writers")
             for wg in self.pool.workgroups:
                 if self.comm.rank in wg.ranks: self.workgroup = wg
-=======
         self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         data_source = self.pf.h.all_data()
->>>>>>> other
         self.handler = rockstar_interface.RockstarInterface(
-<<<<<<< local
                 self.ts, data_source)
 
     def __del__(self):
         self.pool.free_all()
-=======
-                self.pf, data_source)
-        if outbase is None:
-            outbase = str(self.pf)+'_rockstar'
-        self.outbase = outbase        
->>>>>>> other
 
     def _get_hosts(self):
         if self.comm.size == 1 or self.workgroup.name == "server":
@@ -118,8 +105,6 @@
         self.port = str(self.port)
 
     def run(self, block_ratio = 1,**kwargs):
-<<<<<<< local
-=======
         """
         
         """



https://bitbucket.org/yt_analysis/yt/changeset/9b6303c19f1c/
changeset:   9b6303c19f1c
branch:      yt
user:        Christopher Moody
date:        2012-08-18 00:45:25
summary:     Making the changes Matt has recommended including removing print funcs, and excessive explicit fields in ART IO
affected #:  2 files

diff -r a0e342f55b334f61c9267eb952b6d3db5a4b3a32 -r 9b6303c19f1ca19ea2acc7fa3a003c5234b0ab5e yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -47,6 +47,61 @@
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
             outbase=None,particle_mass=-1.0,dm_type=1):
+        r"""Spawns the Rockstar Halo finder, distributes dark matter
+        particles and finds halos.
+
+        The halo finder requires dark matter particles of a fixed size.
+        Rockstar has three main processes: reader, writer, and the 
+        server which coordinates reader/writer processes.
+
+        Parameters
+        ----------
+        ts   : TimeSeriesData, StaticOutput
+            This is the data source containing the DM particles. Because 
+            halo IDs may change from one snapshot to the next, the only
+            way to keep a consistent halo ID across time is to feed 
+            Rockstar a set of snapshots, ie, via TimeSeriesData.
+        num_readers: int
+            The number of reader can be increased from the default
+            of 1 in the event that a single snapshot is split among
+            many files. This can help in cases where performance is
+            IO-limited. Default is 1.
+        num_writers: int
+            The number of writers determines the number of processing threads
+            as well as the number of threads writing output data.
+            The default is set comm.size-num_readers-1.
+        outbase: str
+            This is where the out*list files that Rockstar makes should be
+            placed. Default is str(pf)+'_rockstar'.
+        particle_mass: float
+            This sets the DM particle mass used in Rockstar.
+        dm_type: 1
+            In order to exclude stars and other particle types, define
+            the dm_type. Default is 1, as Enzo has the DM particle type=1.
+
+        Returns
+        -------
+        None
+
+        Examples
+        --------
+        To use the script below you must run it using MPI:
+        mpirun -np 3 python test_rockstar.py --parallel
+
+        test_rockstar.py:
+
+        from mpi4py import MPI
+        from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+        from yt.mods import *
+        import sys
+
+        files = glob.glob('/u/cmoody3/data/a*')
+        files.sort()
+        ts = TimeSeriesData.from_filenames(files)
+        pm = 7.81769027e+11
+        rh = RockstarHaloFinder(ts, particle_mass=pm)
+        rh.run()
+        """
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
         #we assume that all of the snapshots in the time series
@@ -76,7 +131,7 @@
             #we need readers+writers+1 server = comm size        
             raise RuntimeError
         if self.comm.size > 1:
-            print 'creating MPI workgroups'
+            mylog.debug('creating MPI workgroups')
             self.pool = ProcessorPool()
             self.pool.add_workgroup(1, name = "server")
             self.pool.add_workgroup(num_readers, name = "readers")
@@ -117,7 +172,6 @@
             self.pool.add_workgroup(self.num_writers, name = "writers")
             for wg in self.pool.workgroups:
                 if self.comm.rank in wg.ranks: self.workgroup = wg
->>>>>>> other
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()


diff -r a0e342f55b334f61c9267eb952b6d3db5a4b3a32 -r 9b6303c19f1ca19ea2acc7fa3a003c5234b0ab5e yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -284,10 +284,6 @@
         pi += npart
     num_p[0] = tnpart
     TOTAL_PARTICLES = tnpart
-    print_rockstar_settings()
-    print 1.0/(1.0+pf.current_redshift)
-    print "Block #%i | Particles %i | Grids %i"%\
-            ( block, pi, len(grids))
 
 cdef class RockstarInterface:
 
@@ -335,7 +331,6 @@
         FILE_FORMAT = "GENERIC"
         OUTPUT_FORMAT = "ASCII"
         NUM_SNAPS = num_snaps
-        print 'NUM_SNAPS=%i'%num_snaps
         NUM_READERS = num_readers
         NUM_WRITERS = num_writers
         NUM_BLOCKS = num_readers
@@ -351,11 +346,9 @@
         if not outbase =='None'.decode('UTF-8'):
             #output directory. since we can't change the output filenames
             #workaround is to make a new directory
-            print 'using %s as outbase'%outbase
             OUTBASE = outbase 
 
         if particle_mass < 0:
-            print "Assuming single-mass particle."
             particle_mass = tpf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
         PERIODIC = periodic



https://bitbucket.org/yt_analysis/yt/changeset/8dd74a5ab7df/
changeset:   8dd74a5ab7df
branch:      yt
user:        Christopher Moody
date:        2012-09-18 01:55:55
summary:     small fixes to rockstar
affected #:  4 files

diff -r 9b6303c19f1ca19ea2acc7fa3a003c5234b0ab5e -r 8dd74a5ab7df83179c8432c064eeda8989830877 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -115,6 +115,7 @@
         tpf = ts.__iter__().next()
         dd = tpf.h.all_data()
         total_particles = na.sum(dd['particle_type']==dm_type).astype('int64')
+        mylog.info("Found %i halo particles",total_particles)
         self.total_particles = -1
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
@@ -130,16 +131,9 @@
         if self.num_readers + self.num_writers + 1 != self.comm.size:
             #we need readers+writers+1 server = comm size        
             raise RuntimeError
-        if self.comm.size > 1:
-            mylog.debug('creating MPI workgroups')
-            self.pool = ProcessorPool()
-            self.pool.add_workgroup(1, name = "server")
-            self.pool.add_workgroup(num_readers, name = "readers")
-            self.pool.add_workgroup(num_writers, name = "writers")
-            for wg in self.pool.workgroups:
-                if self.comm.rank in wg.ranks: self.workgroup = wg
-        self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        data_source = self.pf.h.all_data()
+        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
+        self.comm.barrier()
         self.handler = rockstar_interface.RockstarInterface(
                 self.ts, data_source)
 


diff -r 9b6303c19f1ca19ea2acc7fa3a003c5234b0ab5e -r 8dd74a5ab7df83179c8432c064eeda8989830877 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -284,6 +284,14 @@
         pi += npart
     num_p[0] = tnpart
     TOTAL_PARTICLES = tnpart
+    #print 'first particle coordinates'
+    #for i in range(3):
+    #    print p[0][0].pos[i],
+    #print ""
+    #print 'last particle coordinates'
+    #for i in range(3):
+    #    print p[0][tnpart-1].pos[i],
+    #print ""
 
 cdef class RockstarInterface:
 


diff -r 9b6303c19f1ca19ea2acc7fa3a003c5234b0ab5e -r 8dd74a5ab7df83179c8432c064eeda8989830877 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -78,7 +78,7 @@
         raise AttributeError(attr)
 
 class TimeSeriesData(object):
-    def __init__(self, outputs, parallel = True):
+    def __init__(self, outputs, parallel = True ,**kwargs):
         r"""The TimeSeriesData object is a container of multiple datasets,
         allowing easy iteration and computation on them.
 
@@ -107,12 +107,13 @@
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
         self.parallel = parallel
+        self.kwargs = kwargs
 
     def __iter__(self):
         # We can make this fancier, but this works
         for o in self._pre_outputs:
             if isinstance(o, types.StringTypes):
-                yield load(o)
+                yield load(o,**self.kwargs)
             else:
                 yield o
 
@@ -124,7 +125,7 @@
             return TimeSeriesData(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
         if isinstance(o, types.StringTypes):
-            o = load(o)
+            o = load(o,**self.kwargs)
         return o
 
     def __len__(self):
@@ -223,7 +224,7 @@
         return [v for k, v in sorted(return_values.items())]
 
     @classmethod
-    def from_filenames(cls, filenames, parallel = True):
+    def from_filenames(cls, filenames, parallel = True, **kwargs):
         r"""Create a time series from either a filename pattern or a list of
         filenames.
 
@@ -260,7 +261,7 @@
         if isinstance(filenames, types.StringTypes):
             filenames = glob.glob(filenames)
             filenames.sort()
-        obj = cls(filenames[:], parallel = parallel)
+        obj = cls(filenames[:], parallel = parallel, **kwargs)
         return obj
 
     @classmethod


diff -r 9b6303c19f1ca19ea2acc7fa3a003c5234b0ab5e -r 8dd74a5ab7df83179c8432c064eeda8989830877 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -716,6 +716,7 @@
                  limit_level=None,
                  only_particle_type = None,
                  do_grid_particles=False,
+                 merge_dm_and_stars=False,
                  spread = True,
                  single_particle_mass=False,
                  single_particle_type=0):
@@ -732,6 +733,7 @@
         self.only_particle_type = only_particle_type
         self.do_grid_particles = do_grid_particles
         self.single_particle_mass = single_particle_mass
+        self.merge_dm_and_stars = merge_dm_and_stars
         self.spread = spread
         
         if limit_level is None:



https://bitbucket.org/yt_analysis/yt/changeset/8780c913832f/
changeset:   8780c913832f
branch:      yt
user:        Christopher Moody
date:        2012-10-12 22:14:11
summary:     Merging with yt_analysis/yt tip
affected #:  156 files

diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -20,7 +20,7 @@
     setenv YT_DEST
 endif
 set _OLD_VIRTUAL_YT_DEST="$YT_DEST"
-setenv YT_DEST "${VIRTUAL_ENV}:${YT_DEST}"
+setenv YT_DEST "${VIRTUAL_ENV}"
 
 if ($?PYTHONPATH == 0) then
     setenv PYTHONPATH


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -18,7 +18,7 @@
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
    "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "na.multiply(a, 3, a)".
+   be "np.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
  * When writing a subclass, use the super built-in to access the super class,
@@ -40,8 +40,7 @@
 
    from yt.visualization.plot_collection import PlotCollection
 
- * Numpy is to be imported as "na" not "np".  While this may change in the
-   future, for now this is the correct idiom.
+ * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword
    arguments, then you are doing too much in __init__ and not enough via
    parameter setting.


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -220,11 +220,24 @@
         echo "  * libncurses5-dev"
         echo "  * zip"
         echo "  * uuid-dev"
+        echo "  * libfreetype6-dev"
+        echo "  * tk-dev"
         echo
         echo "You can accomplish this by executing:"
         echo
-        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
+        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev"
         echo
+        echo
+        echo " Additionally, if you want to put yt's lib dir in your LD_LIBRARY_PATH"
+        echo " so you can use yt without the activate script, you might "
+        echo " want to consider turning off LIBZ and FREETYPE in this"
+        echo " install script by editing this file and setting"
+        echo
+        echo " INST_ZLIB=0"
+        echo " INST_FTYPE=0"
+        echo 
+        echo " to avoid conflicts with other command-line programs "
+        echo " (like eog and evince, for example)."
     fi
     if [ ! -z "${CFLAGS}" ]
     then
@@ -399,9 +412,8 @@
 # Now we dump all our SHA512 files out.
 
 echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
-echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
@@ -430,7 +442,7 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
-get_ytproject Python-2.7.2.tgz
+get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.1.0.tar.gz
 get_ytproject mercurial-2.2.2.tar.gz
@@ -555,11 +567,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.2/done ]
+if [ ! -e Python-2.7.3/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
-    [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
-    cd Python-2.7.2
+    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
+    cd Python-2.7.3
     ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -684,6 +696,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
+echo "Building Fortran kD-tree module."
+cd yt/utilities/kdtree
+( make 2>&1 ) 1>> ${LOG_FILE}
+cd ../../..
+
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,5 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+__version__ = "2.5-dev"


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def voigt(a,u):
     """
@@ -65,15 +65,15 @@
             J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
                       Sep 1990 (better overflow checking)
     """
-    x = na.asarray(u).astype(na.float64)
-    y = na.asarray(a).astype(na.float64)
+    x = np.asarray(u).astype(np.float64)
+    y = np.asarray(a).astype(np.float64)
 
-    w = na.array([0.462243670,   0.286675505,   0.109017206, 
+    w = np.array([0.462243670,   0.286675505,   0.109017206, 
                   0.0248105209,  0.00324377334, 0.000228338636, 
                   7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
                   2.22939365e-13])
 
-    t = na.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
+    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
                   2.25497400,  2.78880606,  3.34785457, 3.94476404, 
                   4.60368245,  5.38748089])
 
@@ -94,31 +94,31 @@
     y2 = y * y
 
     # limits are y<1.,  x<4 or y<1.8(x+1),  x>4 (no checking performed)
-    u1 = na.exp(-x * x + y2) * na.cos(2. * x * y)
+    u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
 
     # Clenshaw's Algorithm
-    bno1 = na.zeros(x.shape)
-    bno2 = na.zeros(x.shape)
-    x1 = na.clip((x / 5.), -na.inf, 1.)
+    bno1 = np.zeros(x.shape)
+    bno2 = np.zeros(x.shape)
+    x1 = np.clip((x / 5.), -np.inf, 1.)
     coef = 4. * x1 * x1 - 2.
     for i in range(33, -1, -1):
         bn = coef * bno1 - bno2 + c[i]
-        bno2 = na.copy(bno1)
-        bno1 = na.copy(bn)
+        bno2 = np.copy(bno1)
+        bno1 = np.copy(bn)
 
     f = x1 * (bn - bno2)
     dno1 = 1. - 2. * x * f
     dno2 = f
 
-    q = na.abs(x) > 5
+    q = np.abs(x) > 5
     if q.any():
-        x14 = na.power(na.clip(x[q], -na.inf, 500.),  14)
-        x12 = na.power(na.clip(x[q], -na.inf, 1000.), 12)
-        x10 = na.power(na.clip(x[q], -na.inf, 5000.), 10)
-        x8  = na.power(na.clip(x[q], -na.inf, 50000.), 8)
-        x6  = na.power(na.clip(x[q], -na.inf, 1.e6),   6)
-        x4  = na.power(na.clip(x[q], -na.inf, 1.e9),   4)
-        x2  = na.power(na.clip(x[q], -na.inf, 1.e18),  2)
+        x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
+        x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
+        x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
+        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
         dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
@@ -135,12 +135,12 @@
             if (i % 2) == 1:
                 q = -q
                 yn = yn * y2
-                g = dn.astype(na.float64) * yn
+                g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if na.max(na.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8: break
 
     k1 = u1 - 1.12837917 * funct
-    k1 = k1.astype(na.float64).clip(0)
+    k1 = k1.astype(np.float64).clip(0)
     return k1
 
 def tau_profile(lam0, fval, gamma, vkms, column_density, 
@@ -191,19 +191,19 @@
     ## create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
-            na.arange(n_lambda, dtype=na.float) * dlambda - \
+            np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2    # wavelength vector (angstroms)
     nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
 
     ## tau_0
-    tau_X = na.sqrt(na.pi) * e**2 / (me * ccgs) * \
+    tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
     tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq
     x = (nua - nu1) / nudop
-    a = gamma / (4 * na.pi * nudop)   # damping parameter 
+    a = gamma / (4 * np.pi * nudop)   # damping parameter 
     phi = voigt(a, x)                 # profile
     tauphi = tau0 * phi               # profile scaled with tau0
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from absorption_line import tau_profile
 
@@ -48,7 +48,7 @@
         self.tau_field = None
         self.flux_field = None
         self.spectrum_line_list = None
-        self.lambda_bins = na.linspace(lambda_min, lambda_max, n_lambda)
+        self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
         self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
         self.line_list = []
         self.continuum_list = []
@@ -114,13 +114,13 @@
             field_data[field] = input[field].value
         input.close()
 
-        self.tau_field = na.zeros(self.lambda_bins.size)
+        self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
-        self.flux_field = na.exp(-self.tau_field)
+        self.flux_field = np.exp(-self.tau_field)
 
         if output_file.endswith('.h5'):
             self._write_spectrum_hdf5(output_file)
@@ -148,20 +148,20 @@
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = na.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
-            left_index = na.digitize((this_wavelength *
-                                     na.power((tau_min * continuum['normalization'] /
+            right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
+            left_index = np.digitize((this_wavelength *
+                                     np.power((tau_min * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
-            valid_continuua = na.where(((column_density /
+            valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > tau_min) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
-                line_tau = na.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
+                line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
                                      this_wavelength[lixel]), continuum['index']) * \
                                      column_density[lixel] / continuum['normalization']
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
@@ -184,10 +184,10 @@
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
-            thermal_b = km_per_cm * na.sqrt((2 * boltzmann_constant_cgs *
+            thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['Temperature']) /
                                             (amu_cgs * line['atomic_mass']))
-            center_bins = na.digitize((delta_lambda + line['wavelength']),
+            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                       self.lambda_bins)
 
             # ratio of line width to bin width
@@ -201,7 +201,7 @@
                            spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
 
             # loop over all lines wider than the bin width
-            valid_lines = na.where((width_ratio >= 1.0) &
+            valid_lines = np.where((width_ratio >= 1.0) &
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 
 from yt.utilities.linear_interpolators import \
@@ -44,13 +44,13 @@
     mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
     if center is None: center = pf.h.find_max("Density")[1]
     fields = ensure_list(fields)
-    r,theta,phi = na.mgrid[0:rmax:nr*1j,
-                           0:na.pi:ntheta*1j,
-                           0:2*na.pi:nphi*1j]
+    r,theta,phi = np.mgrid[0:rmax:nr*1j,
+                           0:np.pi:ntheta*1j,
+                           0:2*np.pi:nphi*1j]
     new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*na.sin(theta)*na.cos(phi) + center[0]
-    new_grid['y'] = r*na.sin(theta)*na.sin(phi) + center[1]
-    new_grid['z'] = r*na.cos(theta)             + center[2]
+    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
+    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
+    new_grid['z'] = r*np.cos(theta)             + center[2]
     sphere = pf.h.sphere(center, rmax)
     return arbitrary_regrid(new_grid, sphere, fields, smoothed)
 
@@ -62,10 +62,10 @@
     This has not been well-tested other than for regular spherical regridding.
     """
     fields = ensure_list(fields)
-    new_grid['handled'] = na.zeros(new_grid['x'].shape, dtype='bool')
+    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
-        new_grid[field] = na.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = na.argsort(data_source.gridLevels)
+        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
+    grid_order = np.argsort(data_source.gridLevels)
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):
@@ -73,12 +73,12 @@
         cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
 
         # makes x0,x1,y0,y1,z0,z1
-        bounds = na.concatenate(zip(cg.left_edge, cg.right_edge)) 
+        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
 
         
         # Now we figure out which of our points are inside this grid
         # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = na.ones(new_grid['handled'].shape, dtype='bool') # everything at first
+        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
         for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
             # &= does a logical_and on the array
             point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
@@ -116,7 +116,7 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  na.log10(sph_grid[field][:,i,:]))
+                  np.log10(sph_grid[field][:,i,:]))
     pylab.savefig("polar/latitude_%03i.png" % i)
 
 for i in range(n_phi):
@@ -124,6 +124,6 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  na.log10(sph_grid[field][:,:,i]))
+                  np.log10(sph_grid[field][:,:,i]))
     pylab.savefig("polar/longitude_%03i.png" % i)
 """


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.convenience import \
     simulation
@@ -132,12 +132,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (na.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.fabs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:na.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -146,7 +146,7 @@
                     current_slice = cosmology_splice[-1]
                     while current_slice['next'] is not None and \
                             (z < current_slice['next']['redshift'] or \
-                                 na.abs(z - current_slice['next']['redshift']) <
+                                 np.abs(z - current_slice['next']['redshift']) <
                                  z_Tolerance):
                         current_slice = current_slice['next']
 
@@ -164,7 +164,7 @@
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:na.fabs(far_redshift -
+            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
                                                                     obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
@@ -246,9 +246,9 @@
         outputs = []
 
         while z > near_redshift:
-            rounded = na.round(z, decimals=decimals)
+            rounded = np.round(z, decimals=decimals)
             if rounded - z < 0:
-                rounded += na.power(10.0, (-1.0*decimals))
+                rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
@@ -289,7 +289,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -299,9 +299,9 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
-            output['deltazMax'] = na.fabs(z2 - z)
+            output['deltazMax'] = np.fabs(z2 - z)
 
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
@@ -329,7 +329,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -339,10 +339,10 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(na.fabs(z2 - z), deltaz_min)
+            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -364,7 +364,7 @@
         distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
           self.cosmology.HubbleConstantNow / 100.0
 
-        while ((na.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
@@ -374,6 +374,6 @@
             iteration += 1
             if (iteration > max_Iterations):
                 mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, na.fabs(z2 - z)))
+                            (z, np.fabs(z2 - z)))
                 break
-        return na.fabs(z2 - z)
+        return np.fabs(z2 - z)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
@@ -24,25 +24,25 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def common_volume(n_cube_1, n_cube_2, periodic=None):
     "Return the n-volume in common between the two n-cubes."
 
     # Check for proper args.
-    if ((len(na.shape(n_cube_1)) != 2) or
-        (na.shape(n_cube_1)[1] != 2) or
-        (na.shape(n_cube_1) != na.shape(n_cube_2))):
+    if ((len(np.shape(n_cube_1)) != 2) or
+        (np.shape(n_cube_1)[1] != 2) or
+        (np.shape(n_cube_1) != np.shape(n_cube_2))):
         print "Arguments must be 2 (n, 2) numpy array."
         return 0
 
     if ((periodic is not None) and
-        (na.shape(n_cube_1) != na.shape(periodic))):
+        (np.shape(n_cube_1) != np.shape(periodic))):
         print "periodic argument must be (n, 2) numpy array."
         return 0
 
     nCommon = 1.0
-    for q in range(na.shape(n_cube_1)[0]):
+    for q in range(np.shape(n_cube_1)[0]):
         if (periodic is None):
             nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
         else:
@@ -97,10 +97,10 @@
             return min(flen1, flen2)
 
         # Adjust for periodicity
-        seg1[0] = na.mod(seg1[0], scale) + periodic[0]
+        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
         seg1[1] = seg1[0] + len1
         if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = na.mod(seg2[0], scale) + periodic[0]
+        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
         seg2[1] = seg2[0] + len2
         if (seg2[1] > periodic[1]): seg2[1] -= scale
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.analysis_modules.halo_profiler.api import \
@@ -77,7 +77,7 @@
 
     # Write out cube of masks from each slice.
     if cube_file is not None:
-        _write_halo_mask(cube_file, na.array(light_cone_mask))
+        _write_halo_mask(cube_file, np.array(light_cone_mask))
 
     # Write out a text list of all halos in the image.
     if map_file is not None:
@@ -86,7 +86,7 @@
     # Write out final mask.
     if mask_file is not None:
         # Final mask is simply the product of the mask from each slice.
-        final_mask = na.ones(shape=(pixels, pixels))
+        final_mask = np.ones(shape=(pixels, pixels))
         for mask in light_cone_mask:
             final_mask *= mask
         _write_halo_mask(mask_file, final_mask)
@@ -103,7 +103,7 @@
     output = h5py.File(filename, 'a')
     if 'HaloMask' in output.keys():
         del output['HaloMask']
-    output.create_dataset('HaloMask', data=na.array(halo_mask))
+    output.create_dataset('HaloMask', data=np.array(halo_mask))
     output.close()
 
 @parallel_root_only
@@ -155,21 +155,21 @@
     # Make boolean mask and cut out halos.
     dx = slice['box_width_fraction'] / pixels
     x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = na.ones(shape=(pixels, pixels), dtype=bool)
+    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
 
     # Cut out any pixel that has any part at all in the circle.
     for q in range(len(all_halo_radius)):
-        dif_xIndex = na.array(int(all_halo_x[q]/dx) -
-                              na.array(range(pixels))) != 0
-        dif_yIndex = na.array(int(all_halo_y[q]/dx) -
-                              na.array(range(pixels))) != 0
+        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
+                              np.array(range(pixels))) != 0
+        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
+                              np.array(range(pixels))) != 0
 
-        xDistance = (na.abs(x - all_halo_x[q]) -
+        xDistance = (np.abs(x - all_halo_x[q]) -
                      (0.5 * dx)) * dif_xIndex
-        yDistance = (na.abs(x - all_halo_y[q]) -
+        yDistance = (np.abs(x - all_halo_y[q]) -
                      (0.5 * dx)) * dif_yIndex
 
-        distance = na.array([na.sqrt(w**2 + xDistance**2)
+        distance = np.array([np.sqrt(w**2 + xDistance**2)
                              for w in yDistance])
         haloMask *= (distance >= all_halo_radius[q])
 
@@ -231,11 +231,11 @@
                                Mpc_units)
             halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
 
-    halo_x = na.array(halo_x)
-    halo_y = na.array(halo_y)
-    halo_depth = na.array(halo_depth)
-    halo_radius = na.array(halo_radius)
-    halo_mass = na.array(halo_mass)
+    halo_x = np.array(halo_x)
+    halo_y = np.array(halo_y)
+    halo_depth = np.array(halo_depth)
+    halo_radius = np.array(halo_radius)
+    halo_mass = np.array(halo_mass)
 
     # Adjust halo centers along line of sight.
     depth_center = slice['projection_center'][slice['projection_axis']]
@@ -247,15 +247,15 @@
     add_left = (halo_depth + halo_radius) > 1 # should be box width
     add_right = (halo_depth - halo_radius) < 0
 
-    halo_depth = na.concatenate([halo_depth,
+    halo_depth = np.concatenate([halo_depth,
                                  (halo_depth[add_left]-1),
                                  (halo_depth[add_right]+1)])
-    halo_x = na.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = na.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = na.concatenate([halo_radius,
+    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
+    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
+    halo_radius = np.concatenate([halo_radius,
                                   halo_radius[add_left],
                                   halo_radius[add_right]])
-    halo_mass = na.concatenate([halo_mass,
+    halo_mass = np.concatenate([halo_mass,
                                 halo_mass[add_left],
                                 halo_mass[add_right]])
 
@@ -284,19 +284,19 @@
         del mask
     del halo_depth
 
-    all_halo_x = na.array([])
-    all_halo_y = na.array([])
-    all_halo_radius = na.array([])
-    all_halo_mass = na.array([])
+    all_halo_x = np.array([])
+    all_halo_y = np.array([])
+    all_halo_radius = np.array([])
+    all_halo_mass = np.array([])
 
     # Tile halos of width box fraction is greater than one.
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(slice['box_width_fraction']))):
-        for y in range(int(na.ceil(slice['box_width_fraction']))):
-            all_halo_x = na.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = na.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = na.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = na.concatenate([all_halo_mass, halo_mass])
+    for x in range(int(np.ceil(slice['box_width_fraction']))):
+        for y in range(int(np.ceil(slice['box_width_fraction']))):
+            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
+            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
+            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
+            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
 
     del halo_x, halo_y, halo_radius, halo_mass
 
@@ -310,8 +310,8 @@
 
     # Wrap off-edge centers back around to
     # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += na.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += na.ceil(slice['box_width_fraction'])
+    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
+    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
 
     # After shifting, some centers have fractional coverage
     # on both sides of the box.
@@ -319,9 +319,9 @@
 
     # Centers hanging off the right edge.
     add_x_right = all_halo_x + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= na.ceil(slice['box_width_fraction'])
+    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
     add_x_halo_y = all_halo_y[add_x_right]
     add_x_halo_radius = all_halo_radius[add_x_right]
     add_x_halo_mass = all_halo_mass[add_x_right]
@@ -330,7 +330,7 @@
     # Centers hanging off the left edge.
     add_x_left = all_halo_x - all_halo_radius < 0
     add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += na.ceil(slice['box_width_fraction'])
+    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
     add2_x_halo_y = all_halo_y[add_x_left]
     add2_x_halo_radius = all_halo_radius[add_x_left]
     add2_x_halo_mass = all_halo_mass[add_x_left]
@@ -338,10 +338,10 @@
 
     # Centers hanging off the top edge.
     add_y_right = all_halo_y + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_y_halo_x = all_halo_x[add_y_right]
     add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= na.ceil(slice['box_width_fraction'])
+    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
     add_y_halo_radius = all_halo_radius[add_y_right]
     add_y_halo_mass = all_halo_mass[add_y_right]
     del add_y_right
@@ -350,24 +350,24 @@
     add_y_left = all_halo_y - all_halo_radius < 0
     add2_y_halo_x = all_halo_x[add_y_left]
     add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += na.ceil(slice['box_width_fraction'])
+    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
     add2_y_halo_radius = all_halo_radius[add_y_left]
     add2_y_halo_mass = all_halo_mass[add_y_left]
     del add_y_left
 
     # Add the hanging centers back to the projection data.
-    all_halo_x = na.concatenate([all_halo_x,
+    all_halo_x = np.concatenate([all_halo_x,
                                  add_x_halo_x, add2_x_halo_x,
                                  add_y_halo_x, add2_y_halo_x])
-    all_halo_y = na.concatenate([all_halo_y,
+    all_halo_y = np.concatenate([all_halo_y,
                                  add_x_halo_y, add2_x_halo_y,
                                  add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = na.concatenate([all_halo_radius,
+    all_halo_radius = np.concatenate([all_halo_radius,
                                       add_x_halo_radius,
                                       add2_x_halo_radius,
                                       add_y_halo_radius,
                                       add2_y_halo_radius])
-    all_halo_mass = na.concatenate([all_halo_mass,
+    all_halo_mass = np.concatenate([all_halo_mass,
                                     add_x_halo_mass,
                                     add2_x_halo_mass,
                                     add_y_halo_mass,


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -198,7 +198,7 @@
 
         # Calculate projection sizes, and get
         # random projection axes and centers.
-        na.random.seed(self.original_random_seed)
+        np.random.seed(self.original_random_seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -250,9 +250,9 @@
                self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
                 # Random axis and center.
                 self.light_cone_solution[q]['projection_axis'] = \
-                  na.random.randint(0, 3)
+                  np.random.randint(0, 3)
                 self.light_cone_solution[q]['projection_center'] = \
-                  [na.random.random() for i in range(3)]
+                  [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
@@ -342,7 +342,7 @@
                                                    njobs=njobs,
                                                    dynamic=dynamic)
             # Collapse cube into final mask.
-            self.halo_mask = na.ones(shape=(self.pixels, self.pixels),
+            self.halo_mask = np.ones(shape=(self.pixels, self.pixels),
                                      dtype=bool)
             for mask in halo_mask_cube:
                 self.halo_mask *= mask
@@ -428,7 +428,7 @@
                 boxSizeProper = self.simulation.box_size / \
                   (self.simulation.hubble_constant * (1.0 + output['redshift']))
                 pixelarea = (boxSizeProper/self.pixels)**2 #in proper cm^2
-                factor = pixelarea/(4.0*na.pi*dL**2)
+                factor = pixelarea/(4.0*np.pi*dL**2)
                 mylog.info("Distance to slice = %e" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
@@ -461,7 +461,7 @@
                 else:
                     my_image = all_storage[my_slice]['field'] / \
                       all_storage[my_slice]['weight_field']
-                only_on_root(write_image, na.log10(my_image),
+                only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
             self.projection_stack.append(all_storage[my_slice]['field'])
@@ -491,7 +491,7 @@
 
         # Write image.
         if save_final_image:
-            only_on_root(write_image, na.log10(light_cone_projection),
+            only_on_root(write_image, np.log10(light_cone_projection),
                          "%s_%s.png" % (filename, field), cmap_name=cmap_name)
 
         # Write stack to hdf5 file.
@@ -561,7 +561,7 @@
         box_fraction_used = 0.0
 
         # Seed random number generator with new seed.
-        na.random.seed(int(new_seed))
+        np.random.seed(int(new_seed))
 
         for q, output in enumerate(self.light_cone_solution):
             # It is necessary to make the same number of calls to the random
@@ -578,9 +578,9 @@
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
-                newAxis = na.random.randint(0, 3)
+                newAxis = np.random.randint(0, 3)
 
-                newCenter = [na.random.random() for i in range(3)]
+                newCenter = [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice, but with depth center shifted.
@@ -600,8 +600,8 @@
             box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
 
             # Make list of rectangle corners to calculate common volume.
-            newCube = na.zeros(shape=(len(newCenter), 2))
-            oldCube = na.zeros(shape=(len(newCenter), 2))
+            newCube = np.zeros(shape=(len(newCenter), 2))
+            oldCube = np.zeros(shape=(len(newCenter), 2))
             for w in range(len(newCenter)):
                 if (w == self.master_solution[q]['projection_axis']):
                     oldCube[w] = [self.master_solution[q]['projection_center'][w] -
@@ -630,7 +630,7 @@
                                   0.5 * self.master_solution[q]['box_width_fraction']]
 
             my_volume += common_volume(oldCube, newCube,
-                                           periodic=na.array([[0, 1],
+                                           periodic=np.array([[0, 1],
                                                               [0, 1],
                                                               [0, 1]]))
             total_volume += output['box_depth_fraction'] * \
@@ -691,7 +691,7 @@
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
         # Make list of redshifts to include as a dataset attribute.
-        redshiftList = na.array([my_slice['redshift'] \
+        redshiftList = np.array([my_slice['redshift'] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
@@ -727,16 +727,16 @@
 
         if write_data:
             mylog.info("Saving %s to %s." % (field_node, filename))
-            self.projection_stack = na.array(self.projection_stack)
+            self.projection_stack = np.array(self.projection_stack)
             field_dataset = output.create_dataset(field_node,
                                                   data=self.projection_stack)
             field_dataset.attrs['redshifts'] = redshiftList
             field_dataset.attrs['observer_redshift'] = \
-              na.float(self.observer_redshift)
+              np.float(self.observer_redshift)
             field_dataset.attrs['field_of_view_in_arcminutes'] = \
-              na.float(self.field_of_view_in_arcminutes)
+              np.float(self.field_of_view_in_arcminutes)
             field_dataset.attrs['image_resolution_in_arcseconds'] = \
-              na.float(self.image_resolution_in_arcseconds)
+              np.float(self.image_resolution_in_arcseconds)
 
         if (len(self.projection_weight_field_stack) > 0):
             if node_exists:
@@ -754,16 +754,16 @@
             if write_data:
                 mylog.info("Saving %s to %s." % (weight_field_node, filename))
                 self.projection_weight_field_stack = \
-                  na.array(self.projection_weight_field_stack)
+                  np.array(self.projection_weight_field_stack)
                 weight_field_dataset = \
                   output.create_dataset(weight_field_node,
                                         data=self.projection_weight_field_stack)
                 weight_field_dataset.attrs['redshifts'] = redshiftList
                 weight_field_dataset.attrs['observer_redshift'] = \
-                  na.float(self.observer_redshift)
+                  np.float(self.observer_redshift)
                 weight_field_dataset.attrs['field_of_view_in_arcminutes'] = \
-                  na.float(self.field_of_view_in_arcminutes)
+                  np.float(self.field_of_view_in_arcminutes)
                 weight_field_dataset.attrs['image_resolution_in_arcseconds'] = \
-                  na.float(self.image_resolution_in_arcseconds)
+                  np.float(self.image_resolution_in_arcseconds)
 
         output.close()


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -98,15 +98,15 @@
     original_weight_field = copy.deepcopy(proj['weight_field'])
 
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
-        for y in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
+    for x in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
+        for y in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
             if ((x + y) > 0):
-                proj['px'] = na.concatenate([proj['px'], original_px+x])
-                proj['py'] = na.concatenate([proj['py'], original_py+y])
-                proj['pdx'] = na.concatenate([proj['pdx'], original_pdx])
-                proj['pdy'] = na.concatenate([proj['pdy'], original_pdy])
-                proj[field] = na.concatenate([proj[field], original_field])
-                proj['weight_field'] = na.concatenate([proj['weight_field'],
+                proj['px'] = np.concatenate([proj['px'], original_px+x])
+                proj['py'] = np.concatenate([proj['py'], original_py+y])
+                proj['pdx'] = np.concatenate([proj['pdx'], original_pdx])
+                proj['pdy'] = np.concatenate([proj['pdy'], original_pdy])
+                proj[field] = np.concatenate([proj[field], original_field])
+                proj['weight_field'] = np.concatenate([proj['weight_field'],
                                                        original_weight_field])
 
     # Delete originals.
@@ -129,17 +129,17 @@
     proj['py'] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj['px'][proj['px'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
-    proj['py'][proj['py'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
+    proj['px'][proj['px'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
+    proj['py'][proj['py'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
     add_x_right = proj['px'] + 0.5 * proj['pdx'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_x_px = proj['px'][add_x_right]
-    add_x_px -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_x_px -= np.ceil(lightConeSlice['box_width_fraction'])
     add_x_py = proj['py'][add_x_right]
     add_x_pdx = proj['pdx'][add_x_right]
     add_x_pdy = proj['pdy'][add_x_right]
@@ -150,7 +150,7 @@
     # Cells hanging off the left edge.
     add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
     add2_x_px = proj['px'][add_x_left]
-    add2_x_px += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_x_px += np.ceil(lightConeSlice['box_width_fraction'])
     add2_x_py = proj['py'][add_x_left]
     add2_x_pdx = proj['pdx'][add_x_left]
     add2_x_pdy = proj['pdy'][add_x_left]
@@ -160,10 +160,10 @@
 
     # Cells hanging off the top edge.
     add_y_right = proj['py'] + 0.5 * proj['pdy'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_y_px = proj['px'][add_y_right]
     add_y_py = proj['py'][add_y_right]
-    add_y_py -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_y_py -= np.ceil(lightConeSlice['box_width_fraction'])
     add_y_pdx = proj['pdx'][add_y_right]
     add_y_pdy = proj['pdy'][add_y_right]
     add_y_field = proj[field][add_y_right]
@@ -174,7 +174,7 @@
     add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
     add2_y_px = proj['px'][add_y_left]
     add2_y_py = proj['py'][add_y_left]
-    add2_y_py += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_y_py += np.ceil(lightConeSlice['box_width_fraction'])
     add2_y_pdx = proj['pdx'][add_y_left]
     add2_y_pdy = proj['pdy'][add_y_left]
     add2_y_field = proj[field][add_y_left]
@@ -182,17 +182,17 @@
     del add_y_left
 
     # Add the hanging cells back to the projection data.
-    proj['px'] = na.concatenate([proj['px'], add_x_px, add_y_px,
+    proj['px'] = np.concatenate([proj['px'], add_x_px, add_y_px,
                                  add2_x_px, add2_y_px])
-    proj['py'] = na.concatenate([proj['py'], add_x_py, add_y_py,
+    proj['py'] = np.concatenate([proj['py'], add_x_py, add_y_py,
                                  add2_x_py, add2_y_py])
-    proj['pdx'] = na.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
+    proj['pdx'] = np.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
                                   add2_x_pdx, add2_y_pdx])
-    proj['pdy'] = na.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
+    proj['pdy'] = np.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
                                   add2_x_pdy, add2_y_pdy])
-    proj[field] = na.concatenate([proj[field], add_x_field, add_y_field,
+    proj[field] = np.concatenate([proj[field], add_x_field, add_y_field,
                                   add2_x_field, add2_y_field])
-    proj['weight_field'] = na.concatenate([proj['weight_field'],
+    proj['weight_field'] = np.concatenate([proj['weight_field'],
                                            add_x_weight_field, add_y_weight_field,
                                            add2_x_weight_field, add2_y_weight_field])
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
@@ -24,7 +24,7 @@
 """
 
 import copy
-import numpy as na
+import numpy as np
 import random as rand
 import sys
 
@@ -128,7 +128,7 @@
         rand.seed(seed)
         state = rand.getstate()
 
-    fail_digits = str(int(na.log10(failures))+1)
+    fail_digits = str(int(np.log10(failures))+1)
 
     while (len(unique_seeds) < solutions):
         # Create new random seed.
@@ -221,7 +221,7 @@
         mylog.error("Light cone solutions do not have equal volumes, will use the smaller one.")
 
     for q in range(len(solution1)):
-        cube1 = na.zeros(shape=(len(solution1[q]['projection_center']), 2))
+        cube1 = np.zeros(shape=(len(solution1[q]['projection_center']), 2))
         volume1 = 1.0
         for w in range(len(cube1)):
             if (w == solution1[q]['projection_axis']):
@@ -232,7 +232,7 @@
             cube1[w] = [solution1[q]['projection_center'][w] - 0.5 * width,
                         solution1[q]['projection_center'][w] + 0.5 * width]
 
-        cube2 = na.zeros(shape=(len(solution2[q]['projection_center']), 2))
+        cube2 = np.zeros(shape=(len(solution2[q]['projection_center']), 2))
         volume2 = 1.0
         for w in range(len(cube2)):
             if (w == solution2[q]['projection_axis']):
@@ -245,7 +245,7 @@
 
         total_volume += min(volume1, volume2)
         my_volume += common_volume(cube1, cube2,
-                                   periodic=na.array([[0, 1],
+                                   periodic=np.array([[0, 1],
                                                       [0, 1],
                                                       [0, 1]]))
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -124,7 +124,7 @@
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        na.random.seed(seed)
+        np.random.seed(seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -162,9 +162,9 @@
                     (box_fraction_used +
                      self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                 # Random start point
-                self.light_ray_solution[q]['start'] = na.random.random(3)
-                theta = na.pi * na.random.random()
-                phi = 2 * na.pi * na.random.random()
+                self.light_ray_solution[q]['start'] = np.random.random(3)
+                theta = np.pi * np.random.random()
+                phi = 2 * np.pi * np.random.random()
                 box_fraction_used = 0.0
             else:
                 # Use end point of previous segment and same theta and phi.
@@ -174,9 +174,9 @@
             self.light_ray_solution[q]['end'] = \
               self.light_ray_solution[q]['start'] + \
                 self.light_ray_solution[q]['traversal_box_fraction'] * \
-                na.array([na.cos(phi) * na.sin(theta),
-                          na.sin(phi) * na.sin(theta),
-                          na.cos(theta)])
+                np.array([np.cos(phi) * np.sin(theta),
+                          np.sin(phi) * np.sin(theta),
+                          np.cos(theta)])
             box_fraction_used += \
               self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -365,30 +365,30 @@
             sub_data = {}
             sub_data['segment_redshift'] = my_segment['redshift']
             for field in all_fields:
-                sub_data[field] = na.array([])
+                sub_data[field] = np.array([])
 
             # Get data for all subsegments in segment.
             for sub_segment in sub_segments:
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = pf.h.ray(sub_segment[0], sub_segment[1])
-                sub_data['dl'] = na.concatenate([sub_data['dl'],
+                sub_data['dl'] = np.concatenate([sub_data['dl'],
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
                 for field in fields:
-                    sub_data[field] = na.concatenate([sub_data[field],
+                    sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = na.array([sub_ray['x-velocity'],
+                    sub_vel = np.array([sub_ray['x-velocity'],
                                         sub_ray['y-velocity'],
                                         sub_ray['z-velocity']])
                     sub_data['los_velocity'] = \
-                      na.concatenate([sub_data['los_velocity'],
-                                      (na.rollaxis(sub_vel, 1) *
+                      np.concatenate([sub_data['los_velocity'],
+                                      (np.rollaxis(sub_vel, 1) *
                                        line_of_sight).sum(axis=1)])
                     del sub_vel
 
@@ -470,20 +470,20 @@
         if fields is None: fields = []
 
         # Create position array from halo list.
-        halo_centers = na.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, na.array(map(lambda halo: halo[field],
+        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
+        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
                                                        halo_list))) \
                                   for field in fields])
 
-        nearest_distance = na.zeros(data['x'].shape)
-        field_data = dict([(field, na.zeros(data['x'].shape)) \
+        nearest_distance = np.zeros(data['x'].shape)
+        field_data = dict([(field, np.zeros(data['x'].shape)) \
                            for field in fields])
         for index in xrange(nearest_distance.size):
-            nearest = na.argmin(periodic_distance(na.array([data['x'][index],
+            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
                                                             data['y'][index],
                                                             data['z'][index]]),
                                                   halo_centers))
-            nearest_distance[index] = periodic_distance(na.array([data['x'][index],
+            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
                                                                   data['y'][index],
                                                                   data['z'][index]]),
                                                         halo_centers[nearest])
@@ -532,41 +532,41 @@
         for field in [field for field in datum.keys()
                       if field not in exceptions]:
             if field in new_data:
-                new_data[field] = na.concatenate([new_data[field], datum[field]])
+                new_data[field] = np.concatenate([new_data[field], datum[field]])
             else:
-                new_data[field] = na.copy(datum[field])
+                new_data[field] = np.copy(datum[field])
     return new_data
 
 def vector_length(start, end):
     "Calculate vector length."
 
-    return na.sqrt(na.power((end - start), 2).sum())
+    return np.sqrt(np.power((end - start), 2).sum())
 
 def periodic_distance(coord1, coord2):
     "Calculate length of shortest vector between to points in periodic domain."
     dif = coord1 - coord2
 
-    dim = na.ones(coord1.shape,dtype=int)
+    dim = np.ones(coord1.shape,dtype=int)
     def periodic_bind(num):
-        pos = na.abs(num % dim)
-        neg = na.abs(num % -dim)
-        return na.min([pos,neg],axis=0)
+        pos = np.abs(num % dim)
+        neg = np.abs(num % -dim)
+        return np.min([pos,neg],axis=0)
 
     dif = periodic_bind(dif)
-    return na.sqrt((dif * dif).sum(axis=-1))
+    return np.sqrt((dif * dif).sum(axis=-1))
 
 def periodic_ray(start, end, left=None, right=None):
     "Break up periodic ray into non-periodic segments."
 
     if left is None:
-        left = na.zeros(start.shape)
+        left = np.zeros(start.shape)
     if right is None:
-        right = na.ones(start.shape)
+        right = np.ones(start.shape)
     dim = right - left
 
     vector = end - start
-    wall = na.zeros(start.shape)
-    close = na.zeros(start.shape, dtype=object)
+    wall = np.zeros(start.shape)
+    close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
     right_bound = vector > 0
@@ -574,15 +574,15 @@
     bound = vector != 0.0
 
     wall[left_bound] = left[left_bound]
-    close[left_bound] = na.max
+    close[left_bound] = np.max
     wall[right_bound] = right[right_bound]
-    close[right_bound] = na.min
-    wall[no_bound] = na.inf
-    close[no_bound] = na.min
+    close[right_bound] = np.min
+    wall[no_bound] = np.inf
+    close[no_bound] = np.min
 
     segments = []
-    this_start = na.copy(start)
-    this_end = na.copy(end)
+    this_start = np.copy(start)
+    this_end = np.copy(end)
     t = 0.0
     tolerance = 1e-6
 
@@ -596,14 +596,14 @@
             this_start[hit_right] -= dim[hit_right]
             this_end[hit_right] -= dim[hit_right]
 
-        nearest = na.array([close[q]([this_end[q], wall[q]]) \
+        nearest = np.array([close[q]([this_end[q], wall[q]]) \
                                 for q in range(start.size)])
         dt = ((nearest - this_start) / vector)[bound].min()
         now = this_start + vector * dt
-        close_enough = na.abs(now - nearest) < 1e-10
+        close_enough = np.abs(now - nearest) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([na.copy(this_start), na.copy(now)])
-        this_start = na.copy(now)
+        segments.append([np.copy(this_start), np.copy(now)])
+        this_start = np.copy(now)
         t += dt
 
     return segments


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -31,7 +31,7 @@
 import h5py
 import itertools
 import math
-import numpy as na
+import numpy as np
 import random
 import sys
 import os.path as path
@@ -123,13 +123,13 @@
         cy = self["particle_position_y"]
         cz = self["particle_position_z"]
         if isinstance(self, FOFHalo):
-            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
         else:
             c_vec = self.maximum_density_location() - self.pf.domain_center
         cx = (cx - c_vec[0])
         cy = (cy - c_vec[1])
         cz = (cz - c_vec[2])
-        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
         return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
     def maximum_density(self):
@@ -158,7 +158,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[1:]
-        return na.array([
+        return np.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
                 self._max_dens[self.id][3]])
@@ -193,7 +193,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return na.array([vx, vy, vz]) / pm.sum()
+        return np.array([vx, vy, vz]) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -216,8 +216,8 @@
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
         vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
         s = vx ** 2. + vy ** 2. + vz ** 2.
-        ms = na.mean(s)
-        return na.sqrt(ms) * pm.size
+        ms = np.mean(s)
+        return np.sqrt(ms) * pm.size
 
     def maximum_radius(self, center_of_mass=True):
         r"""Returns the maximum radius in the halo for all particles,
@@ -246,13 +246,13 @@
             center = self.center_of_mass()
         else:
             center = self.maximum_density_location()
-        rx = na.abs(self["particle_position_x"] - center[0])
-        ry = na.abs(self["particle_position_y"] - center[1])
-        rz = na.abs(self["particle_position_z"] - center[2])
+        rx = np.abs(self["particle_position_x"] - center[0])
+        ry = np.abs(self["particle_position_y"] - center[1])
+        rz = np.abs(self["particle_position_z"] - center[2])
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
-                + na.minimum(ry, DW[1] - ry) ** 2.0
-                + na.minimum(rz, DW[2] - rz) ** 2.0)
+        r = np.sqrt(np.minimum(rx, DW[0] - rx) ** 2.0
+                + np.minimum(ry, DW[1] - ry) ** 2.0
+                + np.minimum(rz, DW[2] - rz) ** 2.0)
         return r.max()
 
     def __getitem__(self, key):
@@ -393,7 +393,7 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins + 1)[over])
+            vir_bin = max(np.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
@@ -419,8 +419,8 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        dist = na.empty(thissize, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
         mark = 0
         # Find the distances to the particles. I don't like this much, but I
@@ -432,15 +432,15 @@
             mark += 1
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(min(dist) * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(min(dist) * .99 + TINY),
             math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1)
         # Find out which bin each particle goes into, and add the particle
         # mass to that bin.
-        inds = na.digitize(dist, self.radial_bins) - 1
+        inds = np.digitize(dist, self.radial_bins) - 1
         if self["particle_position_x"].size > 1:
-            for index in na.unique(inds):
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                na.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["ParticleMassMsun"][inds == index])
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -450,12 +450,12 @@
         (self.radial_bins * cm)**3.0)
         
     def _get_ellipsoid_parameters_basic(self):
-        na.seterr(all='ignore')
+        np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
         # neglecting to check if the 4 particles in the same plane,
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
-        if na.size(self["particle_position_x"]) < 4:
+        if np.size(self["particle_position_x"]) < 4:
             mylog.warning("Too few particles for ellipsoid parameters.")
             return (0, 0, 0, 0, 0, 0, 0)
         # Calculate the parameters that describe the ellipsoid of
@@ -466,19 +466,19 @@
 		    self["particle_position_y"],
 		    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = na.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
 	position = [position[0] - com[0],
 		    position[1] - com[1],
 		    position[2] - com[2]]
 	# different cases of particles being on other side of boundary
-	for axis in range(na.size(DW)):
-	    cases = na.array([position[axis],
+	for axis in range(np.size(DW)):
+	    cases = np.array([position[axis],
 	  		      position[axis] + DW[axis],
 			      position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
-            position[axis] = na.choose(na.abs(cases).argmin(axis=0), cases)
+            position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
 	# find the furthest particle's index
-	r = na.sqrt(position[0]**2 +
+	r = np.sqrt(position[0]**2 +
 		    position[1]**2 +
 		    position[2]**2)
         A_index = r.argmax()
@@ -490,24 +490,24 @@
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = na.empty((na.size(position[0]), 3), dtype='float64')
+	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
-        rr = na.array([position[0],
+        rr = np.array([position[0],
 		       position[1],
 		       position[2]]).T # Similar to tB_vector in old code.
-        tC_vector = na.cross(e0_vector_copy, rr)
+        tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
-            te2[:,dim] *= na.sum(tC_vector**2., axis = 1)**(-0.5)
-        te1 = na.cross(te2, e0_vector_copy)
-        length = na.abs(-na.sum(rr * te1, axis = 1) * \
-            (1. - na.sum(rr * e0_vector_copy, axis = 1)**2. * \
+            te2[:,dim] *= np.sum(tC_vector**2., axis = 1)**(-0.5)
+        te1 = np.cross(te2, e0_vector_copy)
+        length = np.abs(-np.sum(rr * te1, axis = 1) * \
+            (1. - np.sum(rr * e0_vector_copy, axis = 1)**2. * \
             mag_A**-2.)**(-0.5))
         # This problem apparently happens sometimes, that the NaNs are turned
         # into infs, which messes up the nanargmax below.
-        length[length == na.inf] = 0.
-        tB_index = na.nanargmax(length) # ignores NaNs created above.
+        length[length == np.inf] = 0.
+        tB_index = np.nanargmax(length) # ignores NaNs created above.
         mag_B = length[tB_index]
         e1_vector = te1[tB_index]
         e2_vector = te2[tB_index]
@@ -518,24 +518,24 @@
             temp_e0[:,dim] = e0_vector[dim]
             temp_e1[:,dim] = e1_vector[dim]
             temp_e2[:,dim] = e2_vector[dim]
-        length = na.abs(na.sum(rr * temp_e2, axis = 1) * (1 - \
-            na.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
-            na.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
-        length[length == na.inf] = 0.
-        tC_index = na.nanargmax(length)
+        length = np.abs(np.sum(rr * temp_e2, axis = 1) * (1 - \
+            np.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
+            np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+        length[length == np.inf] = 0.
+        tC_index = np.nanargmax(length)
         mag_C = length[tC_index]
         # tilt is calculated from the rotation about x axis
         # needed to align e1 vector with the y axis
         # after e0 is aligned with x axis
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0_vector[1] / e0_vector[0])
+        t1 = np.arctan(e0_vector[1] / e0_vector[0])
         RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
         r1 = (e0_vector * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
-        r2 = na.dot(RY, na.dot(RZ, e1_vector))
-        tilt = na.arctan(r2[2]/r2[1])
+        r2 = np.dot(RY, np.dot(RZ, e1_vector))
+        tilt = np.arctan(r2[2]/r2[1])
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
@@ -572,11 +572,11 @@
 
         #Halo.__init__(self,halo_list,index,
         self.size=Np 
-        self.CoM=na.array([X,Y,Z])
+        self.CoM=np.array([X,Y,Z])
         self.max_dens_point=-1
         self.group_total_mass=-1
         self.max_radius=Rvir
-        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.bulk_vel=np.array([VX,VY,VZ])*1e5
         self.rms_vel=-1
         self.group_total_mass = -1 #not implemented 
     
@@ -651,7 +651,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -704,7 +704,7 @@
         self.bin_count = bins
         period = self.data.pf.domain_right_edge - \
             self.data.pf.domain_left_edge
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
         cen = self.center_of_mass()
         # Cosmology
         h = self.data.pf.hubble_constant
@@ -716,7 +716,7 @@
         # If I own some of this halo operate on the particles.
         if self.indices is not None:
             # Get some pertinent information about the halo.
-            dist = na.empty(self.indices.size, dtype='float64')
+            dist = np.empty(self.indices.size, dtype='float64')
             mark = 0
             # Find the distances to the particles.
             # I don't like this much, but I
@@ -737,15 +737,15 @@
         dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(dist_min * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(dist_min * .99 + TINY),
             math.log10(dist_max * 1.01 + 2 * TINY), num=self.bin_count + 1)
         if self.indices is not None and self.indices.size > 1:
             # Find out which bin each particle goes into, and add the particle
             # mass to that bin.
-            inds = na.digitize(dist, self.radial_bins) - 1
-            for index in na.unique(inds):
+            inds = np.digitize(dist, self.radial_bins) - 1
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    na.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["ParticleMassMsun"][inds == index])
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -831,7 +831,7 @@
         self.saved_fields = {}
         self.particle_mask = None
         self.ds_sort = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -871,7 +871,7 @@
                     # The result of searchsorted is an array with the positions
                     # of the indexes in pid as they are in sp_pid. This is
                     # because each element of pid is in sp_pid only once.
-                    self.particle_mask = na.searchsorted(sp_pid, pid)
+                    self.particle_mask = np.searchsorted(sp_pid, pid)
                 # We won't store this field below in saved_fields because
                 # that would mean keeping two copies of it, one in the yt
                 # machinery and one here.
@@ -890,9 +890,9 @@
             return None
         elif field == 'particle_index' or field == 'particle_type':
             # the only integer field
-            field_data = na.empty(size, dtype='int64')
+            field_data = np.empty(size, dtype='int64')
         else:
-            field_data = na.empty(size, dtype='float64')
+            field_data = np.empty(size, dtype='float64')
         f.close()
         # Apparently, there's a bug in h5py that was keeping the file pointer
         # f closed, even though it's re-opened below. This del seems to fix
@@ -943,7 +943,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -1025,7 +1025,7 @@
         self.tilt = tilt
         self.bin_count = None
         self.overdensity = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -1084,7 +1084,7 @@
                 self.particle_fields[field] = \
                     self._data_source[field][ii].astype('float64')
             del self._data_source[field]
-        self._base_indices = na.arange(tot_part)[ii]
+        self._base_indices = np.arange(tot_part)[ii]
         gc.collect()
 
     def _get_dm_indices(self):
@@ -1099,10 +1099,10 @@
             return slice(None)
 
     def _parse_output(self):
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount(self.tags + 1)
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount(self.tags + 1)
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         dens = self.densities[sort_indices]
         cp = 0
         for i in unique_ids:
@@ -1112,7 +1112,7 @@
                 continue
             group_indices = grab_indices[cp:cp_c]
             self._groups.append(self._halo_class(self, i, group_indices))
-            md_i = na.argmax(dens[cp:cp_c])
+            md_i = np.argmax(dens[cp:cp_c])
             px, py, pz = \
                 [self.particle_fields['particle_position_%s' % ax][group_indices]
                                             for ax in 'xyz']
@@ -1201,7 +1201,7 @@
         """
         # Set up a vector to multiply other
         # vectors by to project along proj_dim
-        vec = na.array([1., 1., 1.])
+        vec = np.array([1., 1., 1.])
         vec[proj_dim] = 0.
         period = self.pf.domain_right_edge - self.pf.domain_left_edge
         period = period * vec
@@ -1367,9 +1367,9 @@
         splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
         for num in splits:
             if 'nan' not in num:
-                formats += na.array(eval(num)).dtype,
+                formats += np.array(eval(num)).dtype,
             else:
-                formats += na.dtype('float'),
+                formats += np.dtype('float'),
         assert len(formats) == len(names)
 
         #Jc = 1.98892e33/pf['mpchcm']*1e5
@@ -1384,7 +1384,7 @@
                     Rs=1.0/pf['kpchcm'],
                     JX=Jc,JY=Jc,JZ=Jc)
         dtype = {'names':names,'formats':formats}
-        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
         #convert position units  
         for name in names:
             halo_table[name]=halo_table[name]*conv.get(name,1)
@@ -1470,7 +1470,7 @@
                self.particle_fields["particle_position_y"] / self.period[1],
                self.particle_fields["particle_position_z"] / self.period[2],
                self.link)
-        self.densities = na.ones(self.tags.size, dtype='float64') * -1
+        self.densities = np.ones(self.tags.size, dtype='float64') * -1
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
@@ -1518,12 +1518,12 @@
             size = int(line[2])
             fnames = locations[halo]
             # Everything else
-            CoM = na.array([float(line[7]), float(line[8]), float(line[9])])
-            max_dens_point = na.array([float(line[3]), float(line[4]),
+            CoM = np.array([float(line[7]), float(line[8]), float(line[9])])
+            max_dens_point = np.array([float(line[3]), float(line[4]),
                 float(line[5]), float(line[6])])
             group_total_mass = float(line[1])
             max_radius = float(line[13])
-            bulk_vel = na.array([float(line[10]), float(line[11]),
+            bulk_vel = np.array([float(line[10]), float(line[11]),
                 float(line[12])])
             rms_vel = float(line[14])
             if len(line) == 15:
@@ -1541,7 +1541,7 @@
                 e1_vec0 = float(line[18])
                 e1_vec1 = float(line[19])
                 e1_vec2 = float(line[20])
-                e1_vec = na.array([e1_vec0, e1_vec1, e1_vec2])
+                e1_vec = np.array([e1_vec0, e1_vec1, e1_vec2])
                 tilt = float(line[21])
                 self._groups.append(LoadedHalo(self.pf, halo, size = size,
                     CoM = CoM,
@@ -1596,7 +1596,7 @@
             y = float(line[columns['y']])
             z = float(line[columns['z']])
             r = float(line[columns['r']])
-            cen = na.array([x, y, z])
+            cen = np.array([x, y, z])
             # Now we see if there's anything else.
             if extra:
                 temp_dict = {}
@@ -1631,7 +1631,7 @@
         self.rearrange = rearrange
         self.period = period
         self.old_period = period.copy()
-        self.period = na.array([1.] * 3)
+        self.period = np.array([1.] * 3)
         self._data_source = data_source
         self.premerge = premerge
         self.tree = tree
@@ -1645,20 +1645,20 @@
         if (self.particle_fields["particle_index"] < 0).any():
             mylog.error("Negative values in particle_index field. Parallel HOP will fail.")
             exit = True
-        if na.unique(self.particle_fields["particle_index"]).size != \
+        if np.unique(self.particle_fields["particle_index"]).size != \
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
             self.particle_fields['ParticleMassMsun'])
-        na.divide(self.particle_fields["particle_position_x"],
+        np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
-        na.divide(self.particle_fields["particle_position_y"],
+        np.divide(self.particle_fields["particle_position_y"],
             self.old_period[1], self.particle_fields["particle_position_y"])
-        na.divide(self.particle_fields["particle_position_z"],
+        np.divide(self.particle_fields["particle_position_z"],
             self.old_period[2], self.particle_fields["particle_position_z"])
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
@@ -1688,20 +1688,20 @@
         self.period = self.old_period.copy()
         # Precompute the bulk velocity in parallel.
         yt_counters("Precomp bulk vel.")
-        self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
+        self.bulk_vel = np.zeros((self.group_count, 3), dtype='float64')
         yt_counters("bulk vel. reading data")
         pm = obj.mass
         # Fix this back to un-normalized units.
-        na.multiply(pm, self.total_mass, pm)
+        np.multiply(pm, self.total_mass, pm)
         xv = self._data_source["particle_velocity_x"][self._base_indices]
         yv = self._data_source["particle_velocity_y"][self._base_indices]
         zv = self._data_source["particle_velocity_z"][self._base_indices]
         yt_counters("bulk vel. reading data")
         yt_counters("bulk vel. computing")
         select = (self.tags >= 0)
-        calc = len(na.where(select == True)[0])
+        calc = len(np.where(select == True)[0])
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             ms = pm[select]
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
@@ -1710,13 +1710,13 @@
             sort = subchain.argsort()
             vel = vel[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i + 1]], axis=0)
+                self.bulk_vel[u] = np.sum(vel[marks[i]:marks[i + 1]], axis=0)
             del vel, subchain, sort_subchain
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
@@ -1729,27 +1729,27 @@
         # Now calculate the RMS velocity of the groups in parallel, very
         # similarly to the bulk velocity and re-using some of the arrays.
         yt_counters("rms vel computing")
-        rms_vel_temp = na.zeros((self.group_count, 2), dtype='float64')
+        rms_vel_temp = np.zeros((self.group_count, 2), dtype='float64')
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
             vel[:, 2] = zv[select] * ms
             vel = vel[sort]
             for i, u in enumerate(uniq_subchain):
                 # This finds the sum locally.
-                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i + 1]] - \
+                rms_vel_temp[u][0] = np.sum(((vel[marks[i]:marks[i + 1]] - \
                     self.bulk_vel[u]) / self.Tot_M[u]) ** 2.)
                 # I could use self.group_sizes...
                 rms_vel_temp[u][1] = marks[i + 1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
         rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
-        self.rms_vel = na.empty(self.group_count, dtype='float64')
+        self.rms_vel = np.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
             self.rms_vel[groupID] = \
-                na.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
+                np.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
                 self.group_sizes[groupID]
         del rms_vel_temp
         yt_counters("rms vel computing")
@@ -1764,16 +1764,16 @@
         """
         Each task will make an entry for all groups, but it may be empty.
         """
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount((self.tags + 1).tolist())
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount((self.tags + 1).tolist())
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         del sort_indices
         cp = 0
         index = 0
         # We want arrays for parallel HOP
-        self._groups = na.empty(self.group_count, dtype='object')
-        self._max_dens = na.empty((self.group_count, 4), dtype='float64')
+        self._groups = np.empty(self.group_count, dtype='object')
+        self._max_dens = np.empty((self.group_count, 4), dtype='float64')
         if self.group_count == 0:
             mylog.info("There are no halos found.")
             return
@@ -1861,7 +1861,7 @@
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
-        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge)) / 2.0
+        self.center = (np.array(ds.right_edge) + np.array(ds.left_edge)) / 2.0
 
     def _parse_halolist(self, threshold_adjustment):
         groups = []
@@ -1871,7 +1871,7 @@
         for halo in self._groups:
             this_max_dens = halo.maximum_density_location()
             # if the most dense particle is in the box, keep it
-            if na.all((this_max_dens >= LE) & (this_max_dens <= RE)):
+            if np.all((this_max_dens >= LE) & (this_max_dens <= RE)):
                 # Now we add the halo information to OURSELVES, taken from the
                 # self.hop_list
                 # We need to mock up the HOPHaloList thingie, so we need to
@@ -2128,8 +2128,8 @@
         >>> halos = parallelHF(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding=0.0)
@@ -2141,7 +2141,7 @@
         if self.tree != 'F' and self.tree != 'C':
             mylog.error("No kD Tree specified!")
         period = pf.domain_right_edge - pf.domain_left_edge
-        topbounds = na.array([[0., 0., 0.], period])
+        topbounds = np.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
@@ -2190,14 +2190,14 @@
             # approximation, but it's OK with the safety factor
             padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
-            self.padding = (na.ones(3, dtype='float64') * padding,
-                na.ones(3, dtype='float64') * padding)
+            self.padding = (np.ones(3, dtype='float64') * padding,
+                np.ones(3, dtype='float64') * padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
                 (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
-            LE_padding = na.empty(3, dtype='float64')
-            RE_padding = na.empty(3, dtype='float64')
+            LE_padding = np.empty(3, dtype='float64')
+            RE_padding = np.empty(3, dtype='float64')
             avg_spacing = (float(vol) / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
@@ -2215,9 +2215,9 @@
                     self._data_source.left_edge[(dim + 2) % 3])
                 bin_width = base_padding
                 num_bins = int(math.ceil(width / bin_width))
-                bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+                bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
                     self._data_source.left_edge[dim]
-                counts, bins = na.histogram(data, bins)
+                counts, bins = np.histogram(data, bins)
                 # left side.
                 start = 0
                 count = counts[0]
@@ -2250,8 +2250,8 @@
             total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
-            self.padding = (na.zeros(3, dtype='float64'),
-                na.zeros(3, dtype='float64'))
+            self.padding = (np.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
@@ -2282,8 +2282,8 @@
         n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
-        my_points = na.empty((n_random, 3), dtype='float64')
-        uni = na.array(random.sample(xrange(xp.size), n_random))
+        my_points = np.empty((n_random, 3), dtype='float64')
+        uni = np.array(random.sample(xrange(xp.size), n_random))
         uni = uni[uni.argsort()]
         my_points[:, 0] = xp[uni]
         del xp
@@ -2297,10 +2297,10 @@
         mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
-            root_points = na.empty((tot_random, 3), dtype='float64')
+            root_points = np.empty((tot_random, 3), dtype='float64')
             root_points.shape = (1, 3 * tot_random)
         else:
-            root_points = na.empty([])
+            root_points = np.empty([])
         my_points.shape = (1, n_random * 3)
         root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
@@ -2315,9 +2315,9 @@
         num_bins = 1000
         width = bounds[1][dim] - bounds[0][dim]
         bin_width = width / num_bins
-        bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+        bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
             bounds[0][dim]
-        counts, bins = na.histogram(points[:, dim], bins)
+        counts, bins = np.histogram(points[:, dim], bins)
         # Find the bin that passes the cut points.
         midpoints = [bounds[0][dim]]
         sum = 0
@@ -2341,7 +2341,7 @@
         subpoints = []
         subbounds = []
         for pair in zip(midpoints[:-1], midpoints[1:]):
-            select = na.bitwise_and(points[:, dim] >= pair[0],
+            select = np.bitwise_and(points[:, dim] >= pair[0],
                 points[:, dim] < pair[1])
             subpoints.append(points[select])
             nb = bounds.copy()
@@ -2363,7 +2363,7 @@
         ms = -self.Tot_M.copy()
         del self.Tot_M
         Cx = self.CoM[:, 0].copy()
-        sorted = na.lexsort([Cx, ms])
+        sorted = np.lexsort([Cx, ms])
         del Cx, ms
         self._groups = self._groups[sorted]
         self._max_dens = self._max_dens[sorted]
@@ -2426,8 +2426,8 @@
         >>> halos = HaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
@@ -2520,8 +2520,8 @@
         >>> halos = FOFHaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self.pf = pf
         self.hierarchy = pf.h
@@ -2544,7 +2544,7 @@
             avg_spacing = (float(vol) / n_parts) ** (1. / 3.)
             linking_length = link * avg_spacing
         else:
-            linking_length = na.abs(link)
+            linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -25,7 +25,7 @@
 
 from collections import defaultdict
 import itertools, sys
-import numpy as na
+import numpy as np
 import gc
 
 from yt.funcs import *
@@ -88,23 +88,23 @@
         for taskID in global_bounds:
             thisLE, thisRE = global_bounds[taskID]
             if self.mine != taskID:
-                vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
             if self.mine == taskID:
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2]]))
         # Find the neighbors we share corners with. Yes, this is lazy with
         # a double loop, but it works and this is definitely not a performance
         # bottleneck.
@@ -119,13 +119,13 @@
                 # Also test to see if the distance to this corner is within
                 # max_padding, which is more likely the case with load-balancing
                 # turned on.
-                dx = min( na.fabs(my_vertex[0] - vertex[0]), \
-                    self.period[0] - na.fabs(my_vertex[0] - vertex[0]))
-                dy = min( na.fabs(my_vertex[1] - vertex[1]), \
-                    self.period[1] - na.fabs(my_vertex[1] - vertex[1]))
-                dz = min( na.fabs(my_vertex[2] - vertex[2]), \
-                    self.period[2] - na.fabs(my_vertex[2] - vertex[2]))
-                d = na.sqrt(dx*dx + dy*dy + dz*dz)
+                dx = min( np.fabs(my_vertex[0] - vertex[0]), \
+                    self.period[0] - np.fabs(my_vertex[0] - vertex[0]))
+                dy = min( np.fabs(my_vertex[1] - vertex[1]), \
+                    self.period[1] - np.fabs(my_vertex[1] - vertex[1]))
+                dz = min( np.fabs(my_vertex[2] - vertex[2]), \
+                    self.period[2] - np.fabs(my_vertex[2] - vertex[2]))
+                d = np.sqrt(dx*dx + dy*dy + dz*dz)
                 if d <= self.max_padding:
                     self.neighbors.add(int(vertex[3]))
         # Faces and edges.
@@ -219,13 +219,13 @@
         annulus data.
         """
         if round == 'first':
-            max_pad = na.max(self.padding)
+            max_pad = np.max(self.padding)
             self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
             for neighbor in self.neighbors:
-                self.max_padding = na.maximum(self.global_padding[neighbor], \
+                self.max_padding = np.maximum(self.global_padding[neighbor], \
                     self.max_padding)
 
     def _communicate_padding_data(self):
@@ -247,7 +247,7 @@
         # This will reduce the size of the loop over particles.
         yt_counters("Picking padding data to send.")
         send_count = self.is_inside_annulus.sum()
-        points = na.empty((send_count, 3), dtype='float64')
+        points = np.empty((send_count, 3), dtype='float64')
         points[:,0] = self.xpos[self.is_inside_annulus]
         points[:,1] = self.ypos[self.is_inside_annulus]
         points[:,2] = self.zpos[self.is_inside_annulus]
@@ -280,9 +280,9 @@
         recv_size = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_points[opp_neighbor] = na.empty((opp_size, 3), dtype='float64')
-            recv_mass[opp_neighbor] = na.empty(opp_size, dtype='float64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_points[opp_neighbor] = np.empty((opp_size, 3), dtype='float64')
+            recv_mass[opp_neighbor] = np.empty(opp_size, dtype='float64')
             recv_size += opp_size
         yt_counters("Initalizing recv arrays.")
         # Setup the receiving slots.
@@ -306,11 +306,11 @@
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
         # Now we add the data to ourselves.
-        self.index_pad = na.empty(recv_size, dtype='int64')
-        self.xpos_pad = na.empty(recv_size, dtype='float64')
-        self.ypos_pad = na.empty(recv_size, dtype='float64')
-        self.zpos_pad = na.empty(recv_size, dtype='float64')
-        self.mass_pad = na.empty(recv_size, dtype='float64')
+        self.index_pad = np.empty(recv_size, dtype='int64')
+        self.xpos_pad = np.empty(recv_size, dtype='float64')
+        self.ypos_pad = np.empty(recv_size, dtype='float64')
+        self.zpos_pad = np.empty(recv_size, dtype='float64')
+        self.mass_pad = np.empty(recv_size, dtype='float64')
         so_far = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
@@ -335,7 +335,7 @@
         yt_counters("Flipping coordinates around the periodic boundary.")
         self.size = self.index.size + self.index_pad.size
         # Now that we have the full size, initialize the chainID array
-        self.chainID = na.ones(self.size,dtype='int64') * -1
+        self.chainID = np.ones(self.size,dtype='int64') * -1
         # Clean up explicitly, but these should be empty dicts by now.
         del recv_real_indices, hooks, recv_points, recv_mass
         yt_counters("Communicate discriminated padding")
@@ -348,10 +348,10 @@
         if self.tree == 'F':
             # Yes, we really do need to initialize this many arrays.
             # They're deleted in _parallelHOP.
-            fKD.dens = na.zeros(self.size, dtype='float64', order='F')
-            fKD.mass = na.concatenate((self.mass, self.mass_pad))
+            fKD.dens = np.zeros(self.size, dtype='float64', order='F')
+            fKD.mass = np.concatenate((self.mass, self.mass_pad))
             del self.mass
-            fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
+            fKD.pos = np.empty((3, self.size), dtype='float64', order='F')
             # This actually copies the data into the fortran space.
             self.psize = self.xpos.size
             fKD.pos[0, :self.psize] = self.xpos
@@ -364,7 +364,7 @@
             fKD.pos[2, self.psize:] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
+            fKD.qv = np.asfortranarray(np.empty(3, dtype='float64'))
             fKD.nn = self.num_neighbors
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
@@ -375,8 +375,8 @@
             # Now call the fortran.
             create_tree(0)
         elif self.tree == 'C':
-            self.mass = na.concatenate((self.mass, self.mass_pad))
-            self.pos = na.empty((self.size, 3), dtype='float64')
+            self.mass = np.concatenate((self.mass, self.mass_pad))
+            self.pos = np.empty((self.size, 3), dtype='float64')
             self.psize = self.xpos.size
             self.pos[:self.psize, 0] = self.xpos
             self.pos[:self.psize, 1] = self.ypos
@@ -407,7 +407,7 @@
         # Test to see if the points are in the 'real' region
         (LE, RE) = self.bounds
         if round == 'first':
-            points = na.empty((self.real_size, 3), dtype='float64')
+            points = np.empty((self.real_size, 3), dtype='float64')
             points[:,0] = self.xpos
             points[:,1] = self.ypos
             points[:,2] = self.zpos
@@ -426,21 +426,21 @@
         temp_LE = LE + self.max_padding
         temp_RE = RE - self.max_padding
         if round == 'first':
-            inner = na.invert( (points >= temp_LE).all(axis=1) * \
+            inner = np.invert( (points >= temp_LE).all(axis=1) * \
                 (points < temp_RE).all(axis=1) )
         elif round == 'second' or round == 'third':
             if self.tree == 'F':
-                inner = na.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
+                inner = np.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
                     (fKD.pos.T < temp_RE).all(axis=1) )
             elif self.tree == 'C':
-                inner = na.invert( (self.pos >= temp_LE).all(axis=1) * \
+                inner = np.invert( (self.pos >= temp_LE).all(axis=1) * \
                     (self.pos < temp_RE).all(axis=1) )
         if round == 'first':
             del points
         # After inverting the logic above, we want points that are both
         # inside the real region, but within one padding of the boundary,
         # and this will do it.
-        self.is_inside_annulus = na.bitwise_and(self.is_inside, inner)
+        self.is_inside_annulus = np.bitwise_and(self.is_inside, inner)
         del inner
         # Below we make a mapping of real particle index->local ID
         # Unf. this has to be a dict, because any task can have
@@ -449,10 +449,10 @@
         # as the full number of particles.
         # We can skip this the first two times around.
         if round == 'third':
-            temp = na.arange(self.size)
-            my_part = na.bitwise_or(na.invert(self.is_inside), self.is_inside_annulus)
-            my_part = na.bitwise_and(my_part, (self.chainID != -1))
-            catted_indices = na.concatenate(
+            temp = np.arange(self.size)
+            my_part = np.bitwise_or(np.invert(self.is_inside), self.is_inside_annulus)
+            my_part = np.bitwise_and(my_part, (self.chainID != -1))
+            catted_indices = np.concatenate(
                 (self.index, self.index_pad))[my_part]
             self.rev_index = dict.fromkeys(catted_indices)
             self.rev_index.update(itertools.izip(catted_indices, temp[my_part]))
@@ -468,11 +468,11 @@
         keeping the all of this data, just using it.
         """
         yt_counters("densestNN")
-        self.densestNN = na.empty(self.size,dtype='int64')
+        self.densestNN = np.empty(self.size,dtype='int64')
         # We find nearest neighbors in chunks.
         chunksize = 10000
         if self.tree == 'F':
-            fKD.chunk_tags = na.asfortranarray(na.empty((self.num_neighbors, chunksize), dtype='int64'))
+            fKD.chunk_tags = np.asfortranarray(np.empty((self.num_neighbors, chunksize), dtype='int64'))
             start = 1 # Fortran counting!
             finish = 0
             while finish < self.size:
@@ -486,8 +486,8 @@
                 chunk_NNtags = (fKD.chunk_tags[:,:finish-start+1] - 1).transpose()
                 # Find the densest nearest neighbors by referencing the already
                 # calculated density.
-                n_dens = na.take(self.density,chunk_NNtags)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density,chunk_NNtags)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start + 1): # +1 for fortran counting.
                     j = start + i - 1 # -1 for fortran counting.
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -502,9 +502,9 @@
                 # be as memory efficient - fragmenting?
                 chunk_NNtags = self.kdtree.find_chunk_nearest_neighbors(start, \
                     finish, num_neighbors=self.num_neighbors)
-                n_dens = na.take(self.density, chunk_NNtags)
-                max_loc = na.argmax(n_dens, axis=1)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density, chunk_NNtags)
+                max_loc = np.argmax(n_dens, axis=1)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start):
                     j = start + i
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -520,8 +520,8 @@
         """
         yt_counters("build_chains")
         chainIDmax = 0
-        self.densest_in_chain = na.ones(10000, dtype='float64') * -1 # chainID->density, one to one
-        self.densest_in_chain_real_index = na.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
+        self.densest_in_chain = np.ones(10000, dtype='float64') * -1 # chainID->density, one to one
+        self.densest_in_chain_real_index = np.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
         for i in xrange(int(self.size)):
             # If it's already in a group, move on, or if this particle is
             # in the padding, move on because chains can only terminate in
@@ -536,7 +536,7 @@
             # in the next loop.
             if chainIDnew == chainIDmax:
                 chainIDmax += 1
-        self.padded_particles = na.array(self.padded_particles, dtype='int64')
+        self.padded_particles = np.array(self.padded_particles, dtype='int64')
         self.densest_in_chain = self.__clean_up_array(self.densest_in_chain)
         self.densest_in_chain_real_index = self.__clean_up_array(self.densest_in_chain_real_index)
         yt_counters("build_chains")
@@ -598,9 +598,9 @@
         yt_counters("preconnect_chains")
         yt_counters("local chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] = na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] = np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -626,8 +626,8 @@
         elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
             for i in xrange(self.size):
@@ -685,7 +685,7 @@
         # link is to itself. At that point we've found the densest chain
         # in this set of sets and we keep a record of that.
         yt_counters("preconnect pregrouping.")
-        final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
+        final_chain_map = np.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
         for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
@@ -701,9 +701,9 @@
                 self.chainID[i] = final_chain_map[self.chainID[i]]
         del final_chain_map
         # Now make the chainID assignments consecutive.
-        map = na.empty(self.densest_in_chain.size, dtype='int64')
-        dic_new = na.empty(chain_count - removed, dtype='float64')
-        dicri_new = na.empty(chain_count - removed, dtype='int64')
+        map = np.empty(self.densest_in_chain.size, dtype='int64')
+        dic_new = np.empty(chain_count - removed, dtype='float64')
+        dicri_new = np.empty(chain_count - removed, dtype='int64')
         new = 0
         for i,dic in enumerate(self.densest_in_chain):
             if dic > 0:
@@ -763,9 +763,9 @@
         mylog.info("Sorting chains...")
         yt_counters("global chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] =na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] =np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -779,14 +779,14 @@
         mylog.info("Pre-linking chains 'by hand'...")
         yt_counters("global chain hand-linking.")
         # If there are no repeats, we can skip this mess entirely.
-        uniq = na.unique(self.densest_in_chain_real_index)
+        uniq = np.unique(self.densest_in_chain_real_index)
         if uniq.size != self.densest_in_chain_real_index.size:
             # Find only the real particle indices that are repeated to reduce
             # the dict workload below.
             dicri = self.densest_in_chain_real_index[self.densest_in_chain_real_index.argsort()]
-            diff = na.ediff1d(dicri)
+            diff = np.ediff1d(dicri)
             diff = (diff == 0) # Picks out the places where the ids are equal
-            diff = na.concatenate((diff, [False])) # Makes it the same length
+            diff = np.concatenate((diff, [False])) # Makes it the same length
             # This has only the repeated IDs. Sets are faster at searches than
             # arrays.
             dicri = set(dicri[diff])
@@ -837,11 +837,11 @@
         for opp_neighbor in self.neighbors:
             opp_size = self.global_padded_count[opp_neighbor]
             to_recv_count += opp_size
-            temp_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            temp_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            temp_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            temp_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # The arrays we'll actually keep around...
-        self.recv_real_indices = na.empty(to_recv_count, dtype='int64')
-        self.recv_chainIDs = na.empty(to_recv_count, dtype='int64')
+        self.recv_real_indices = np.empty(to_recv_count, dtype='int64')
+        self.recv_chainIDs = np.empty(to_recv_count, dtype='int64')
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -899,9 +899,9 @@
         """
         yt_counters("connect_chains_across_tasks")
         # Remote (lower dens) chain -> local (higher) chain.
-        chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
+        chainID_translate_map_local = np.arange(self.nchains, dtype='int64')
         # Build the stuff to send.
-        self.uphill_real_indices = na.concatenate((
+        self.uphill_real_indices = np.concatenate((
             self.index, self.index_pad))[self.padded_particles]
         self.uphill_chainIDs = self.chainID[self.padded_particles]
         del self.padded_particles
@@ -991,7 +991,7 @@
         """
         yt_counters("communicate_annulus_chainIDs")
         # Pick the particles in the annulus.
-        real_indices = na.concatenate(
+        real_indices = np.concatenate(
             (self.index, self.index_pad))[self.is_inside_annulus]
         chainIDs = self.chainID[self.is_inside_annulus]
         # We're done with this here.
@@ -1012,8 +1012,8 @@
         recv_chainIDs = dict.fromkeys(self.neighbors)
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -1062,8 +1062,8 @@
         # Plus 2 because we're looking for that neighbor, but only keeping 
         # nMerge + 1 neighbor tags, skipping ourselves.
         if self.tree == 'F':
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge+2
         elif self.tree == 'C':
@@ -1160,9 +1160,9 @@
                 top_keys.append(top_key)
                 bot_keys.append(bot_key)
                 vals.append(data[top_key][bot_key])
-        top_keys = na.array(top_keys, dtype='int64')
-        bot_keys = na.array(bot_keys, dtype='int64')
-        vals = na.array(vals, dtype='float64')
+        top_keys = np.array(top_keys, dtype='int64')
+        bot_keys = np.array(bot_keys, dtype='int64')
+        vals = np.array(vals, dtype='float64')
 
         data.clear()
 
@@ -1179,14 +1179,14 @@
         # We need to find out which pairs of self.top_keys, self.bot_keys are
         # both < self.peakthresh, and create arrays that will store this
         # relationship.
-        both = na.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
+        both = np.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
             (self.densest_in_chain[self.bot_keys] < self.peakthresh))
         g_high = self.top_keys[both]
         g_low = self.bot_keys[both]
         g_dens = self.vals[both]
         del both
-        self.reverse_map = na.ones(self.densest_in_chain.size) * -1
-        densestbound = na.ones(self.densest_in_chain.size) * -1.0
+        self.reverse_map = np.ones(self.densest_in_chain.size) * -1
+        densestbound = np.ones(self.densest_in_chain.size) * -1.0
         for i, gl in enumerate(g_low):
             if g_dens[i] > densestbound[gl]:
                 densestbound[gl] = g_dens[i]
@@ -1200,7 +1200,7 @@
             if self.densest_in_chain[chainID] >= self.peakthresh:
                 self.reverse_map[chainID] = groupID
                 groupID += 1
-        group_equivalancy_map = na.empty(groupID, dtype='object')
+        group_equivalancy_map = np.empty(groupID, dtype='object')
         for i in xrange(groupID):
             group_equivalancy_map[i] = set([])
         # Loop over all of the chain linkages.
@@ -1259,7 +1259,7 @@
         # Shack.'
         Set_list = []
         # We only want the holes that are modulo mine.
-        keys = na.arange(groupID, dtype='int64')
+        keys = np.arange(groupID, dtype='int64')
         size = self.comm.size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
@@ -1298,7 +1298,7 @@
         del group_equivalancy_map, final_set, keys, select, groupIDs, current_sets
         del mine_groupIDs, not_mine_groupIDs, new_set, to_add_set, liter
         # Convert this list of sets into a look-up table
-        lookup = na.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
+        lookup = np.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
         for i,item in enumerate(Set_list):
             item_min = min(item)
             for groupID in item:
@@ -1353,7 +1353,7 @@
             # There are no groups, probably.
             pass
         # Make a secondary map to make the IDs consecutive.
-        values = na.arange(len(temp))
+        values = np.arange(len(temp))
         secondary_map = dict(itertools.izip(temp, values))
         del values
         # Update reverse_map
@@ -1386,8 +1386,8 @@
                 self.chainID[i] = -1
         del self.is_inside
         # Create a densest_in_group, analogous to densest_in_chain.
-        keys = na.arange(group_count)
-        vals = na.zeros(group_count)
+        keys = np.arange(group_count)
+        vals = np.zeros(group_count)
         self.densest_in_group = dict(itertools.izip(keys,vals))
         self.densest_in_group_real_index = self.densest_in_group.copy()
         del keys, vals
@@ -1409,12 +1409,12 @@
         velocity, to save time in HaloFinding.py (fewer barriers!).
         """
         select = (self.chainID != -1)
-        calc = len(na.where(select == True)[0])
-        loc = na.empty((calc, 3), dtype='float64')
+        calc = len(np.where(select == True)[0])
+        loc = np.empty((calc, 3), dtype='float64')
         if self.tree == 'F':
-            loc[:, 0] = na.concatenate((self.xpos, self.xpos_pad))[select]
-            loc[:, 1] = na.concatenate((self.ypos, self.ypos_pad))[select]
-            loc[:, 2] = na.concatenate((self.zpos, self.zpos_pad))[select]
+            loc[:, 0] = np.concatenate((self.xpos, self.xpos_pad))[select]
+            loc[:, 1] = np.concatenate((self.ypos, self.ypos_pad))[select]
+            loc[:, 2] = np.concatenate((self.zpos, self.zpos_pad))[select]
             self.__max_memory()
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
         elif self.tree == 'C':
@@ -1424,15 +1424,15 @@
         # I think this will be faster than several vector operations that need
         # to pull the entire chainID array out of memory several times.
         yt_counters("max dens point")
-        max_dens_point = na.zeros((self.group_count,4),dtype='float64')
-        for i,part in enumerate(na.arange(self.size)[select]):
+        max_dens_point = np.zeros((self.group_count,4),dtype='float64')
+        for i,part in enumerate(np.arange(self.size)[select]):
             groupID = self.chainID[part]
             if part < self.real_size:
                 real_index = self.index[part]
             else:
                 real_index = self.index_pad[part - self.real_size]
             if real_index == self.densest_in_group_real_index[groupID]:
-                max_dens_point[groupID] = na.array([self.density[part], \
+                max_dens_point[groupID] = np.array([self.density[part], \
                 loc[i, 0], loc[i, 1], loc[i, 2]])
         del self.index, self.index_pad, self.densest_in_group_real_index
         # Now we broadcast this, effectively, with an allsum. Even though
@@ -1443,25 +1443,25 @@
         yt_counters("max dens point")
         # Now CoM.
         yt_counters("CoM")
-        CoM_M = na.zeros((self.group_count,3),dtype='float64')
-        Tot_M = na.zeros(self.group_count, dtype='float64')
-        #c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
+        CoM_M = np.zeros((self.group_count,3),dtype='float64')
+        Tot_M = np.zeros(self.group_count, dtype='float64')
+        #c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
         if calc:
-            c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
-            size = na.bincount(self.chainID[select]).astype('int64')
+            c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
+            size = np.bincount(self.chainID[select]).astype('int64')
         else:
             # This task has no particles in groups!
-            size = na.zeros(self.group_count, dtype='int64')
+            size = np.zeros(self.group_count, dtype='int64')
         # In case this task doesn't have all the groups, add trailing zeros.
         if size.size != self.group_count:
-            size = na.concatenate((size, na.zeros(self.group_count - size.size, dtype='int64')))
+            size = np.concatenate((size, np.zeros(self.group_count - size.size, dtype='int64')))
         if calc:
             cc = loc - c_vec
-            cc = cc - na.floor(cc)
-            ms = na.concatenate((self.mass, self.mass_pad))[select]
+            cc = cc - np.floor(cc)
+            ms = np.concatenate((self.mass, self.mass_pad))[select]
             # Most of the time, the masses will be all the same, and we can try
             # to save some effort.
-            ms_u = na.unique(ms)
+            ms_u = np.unique(ms)
             if ms_u.size == 1:
                 single = True
                 Tot_M = size.astype('float64') * ms_u
@@ -1475,13 +1475,13 @@
             sort = subchain.argsort()
             cc = cc[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                CoM_M[u] = na.sum(cc[marks[i]:marks[i+1]], axis=0)
+                CoM_M[u] = np.sum(cc[marks[i]:marks[i+1]], axis=0)
             if not single:
                 for i,groupID in enumerate(subchain):
                     Tot_M[groupID] += ms[i]
@@ -1490,31 +1490,31 @@
                 # Don't divide by zero.
                 if groupID in self.I_own:
                     CoM_M[groupID] /= Tot_M[groupID]
-                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
+                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - np.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
         self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
         CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
         self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
-        self.CoM = na.empty((self.group_count,3), dtype='float64')
+        self.CoM = np.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
         yt_counters("CoM")
         self.__max_memory()
         # Now we find the maximum radius for all groups.
         yt_counters("max radius")
-        max_radius = na.zeros(self.group_count, dtype='float64')
+        max_radius = np.zeros(self.group_count, dtype='float64')
         if calc:
             com = self.CoM[subchain]
-            rad = na.fabs(com - loc)
-            dist = (na.minimum(rad, self.period - rad)**2.).sum(axis=1)
+            rad = np.fabs(com - loc)
+            dist = (np.minimum(rad, self.period - rad)**2.).sum(axis=1)
             dist = dist[sort]
             for i, u in enumerate(uniq_subchain):
-                max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
+                max_radius[u] = np.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
         self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
-        self.max_radius = na.sqrt(self.max_radius)
+        self.max_radius = np.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")
         self.__max_memory()
@@ -1558,7 +1558,7 @@
         chain_count = self._build_chains()
         # This array tracks whether or not relationships for this particle
         # need to be examined twice, in preconnect_chains and in connect_chains
-        self.search_again = na.ones(self.size, dtype='bool')
+        self.search_again = np.ones(self.size, dtype='bool')
         if self.premerge:
             chain_count = self._preconnect_chains(chain_count)
         mylog.info('Gobally assigning chainIDs...')
@@ -1625,7 +1625,7 @@
         try:
             arr[key] = value
         except IndexError:
-            arr = na.concatenate((arr, na.ones(10000, dtype=type)*-1))
+            arr = np.concatenate((arr, np.ones(10000, dtype=type)*-1))
             arr[key] = value
         return arr
     


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math, time
 
 from yt.funcs import *
@@ -186,7 +186,7 @@
         f = open(self.halo_file,'r')
         line = f.readline()
         if line == "":
-            self.haloes = na.array([])
+            self.haloes = np.array([])
             return
         while line[0] == '#':
             line = f.readline()
@@ -198,16 +198,16 @@
                 self.haloes.append(float(line[self.mass_column]))
             line = f.readline()
         f.close()
-        self.haloes = na.array(self.haloes)
+        self.haloes = np.array(self.haloes)
 
     def bin_haloes(self):
         """
         With the list of virial masses, find the halo mass function.
         """
-        bins = na.logspace(self.log_mass_min,
+        bins = np.logspace(self.log_mass_min,
             self.log_mass_max,self.num_sigma_bins)
         avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = na.histogram(self.haloes,bins)
+        dis, bins = np.histogram(self.haloes,bins)
         # add right to left
         for i,b in enumerate(dis):
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
@@ -246,13 +246,13 @@
 
         # output arrays
         # 1) log10 of mass (Msolar, NOT Msolar/h)
-        self.Rarray = na.empty(self.num_sigma_bins,dtype='float64')
+        self.Rarray = np.empty(self.num_sigma_bins,dtype='float64')
         # 2) mass (Msolar/h)
-        self.logmassarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.logmassarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 3) spatial scale corresponding to that radius (Mpc/h)
-        self.massarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.massarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 4) sigma(M, z=0, where mass is in Msun/h)
-        self.sigmaarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')
 
         # get sigma_8 normalization
         R = 8.0;  # in units of Mpc/h (comoving)
@@ -305,9 +305,9 @@
         
         # output arrays
         # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-        self.dn_M_z = na.empty(self.num_sigma_bins, dtype='float64')
+        self.dn_M_z = np.empty(self.num_sigma_bins, dtype='float64')
         # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-        self.nofmz_cum = na.zeros(self.num_sigma_bins, dtype='float64')
+        self.nofmz_cum = np.zeros(self.num_sigma_bins, dtype='float64')
         
         for j in xrange(self.num_sigma_bins - 1):
             i = (self.num_sigma_bins - 2) - j
@@ -360,7 +360,7 @@
 
         Rcom = self.R;  # this is R in comoving Mpc/h
 
-        f = k*k*self.PofK(k)*na.power( abs(self.WofK(Rcom,k)), 2.0);
+        f = k*k*self.PofK(k)*np.power( abs(self.WofK(Rcom,k)), 2.0);
 
         return f
 
@@ -369,7 +369,7 @@
         /* returns power spectrum as a function of wavenumber k */
         """
 
-        thisPofK = na.power(k, self.primordial_index) * na.power( self.TofK(k), 2.0);
+        thisPofK = np.power(k, self.primordial_index) * np.power( self.TofK(k), 2.0);
 
         return thisPofK;
 
@@ -389,7 +389,7 @@
 
         x = R*k;
 
-        thisWofK = 3.0 * ( na.sin(x) - x*na.cos(x) ) / (x*x*x);
+        thisWofK = 3.0 * ( np.sin(x) - x*np.cos(x) ) / (x*x*x);
 
         return thisWofK;
 
@@ -660,22 +660,22 @@
         self.y_freestream = 17.2*self.f_hdm*(1+0.488*math.pow(self.f_hdm,-7.0/6.0))* \
             SQR(self.num_degen_hdm*self.qq/self.f_hdm);
         temp1 = math.pow(self.growth_k0, 1.0-self.p_cb);
-        temp2 = na.power(self.growth_k0/(1+self.y_freestream),0.7);
-        self.growth_cb = na.power(1.0+temp2, self.p_cb/0.7)*temp1;
-        self.growth_cbnu = na.power(na.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
+        temp2 = np.power(self.growth_k0/(1+self.y_freestream),0.7);
+        self.growth_cb = np.power(1.0+temp2, self.p_cb/0.7)*temp1;
+        self.growth_cbnu = np.power(np.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
     
         # Compute the master function
         self.gamma_eff = self.omhh*(self.alpha_gamma+(1-self.alpha_gamma)/ \
             (1+SQR(SQR(kk*self.sound_horizon_fit*0.43))));
         self.qq_eff = self.qq*self.omhh/self.gamma_eff;
     
-        tf_sup_L = na.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
-        tf_sup_C = 14.4+325/(1+60.5*na.power(self.qq_eff,1.11));
+        tf_sup_L = np.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
+        tf_sup_C = 14.4+325/(1+60.5*np.power(self.qq_eff,1.11));
         self.tf_sup = tf_sup_L/(tf_sup_L+tf_sup_C*SQR(self.qq_eff));
     
         self.qq_nu = 3.92*self.qq*math.sqrt(self.num_degen_hdm/self.f_hdm);
         self.max_fs_correction = 1+1.2*math.pow(self.f_hdm,0.64)*math.pow(self.num_degen_hdm,0.3+0.6*self.f_hdm)/ \
-            (na.power(self.qq_nu,-1.6)+na.power(self.qq_nu,0.8));
+            (np.power(self.qq_nu,-1.6)+np.power(self.qq_nu,0.8));
         self.tf_master = self.tf_sup*self.max_fs_correction;
     
         # Now compute the CDM+HDM+baryon transfer functions
@@ -707,21 +707,21 @@
     changes by less than *error*. Hopefully someday we can do something
     better than this!
     """
-    xvals = na.logspace(0,na.log10(initial_guess), initial_guess+1)-.9
+    xvals = np.logspace(0,np.log10(initial_guess), initial_guess+1)-.9
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
-    # Trapezoid rule, but with different dxes between values, so na.trapz
+    # Trapezoid rule, but with different dxes between values, so np.trapz
     # will not work.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area0 = na.sum(areas)
+    area0 = np.sum(areas)
     # Next guess.
     next_guess = 10 * initial_guess
-    xvals = na.logspace(0,na.log10(next_guess), 2*initial_guess**2+1)-.99
+    xvals = np.logspace(0,np.log10(next_guess), 2*initial_guess**2+1)-.99
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
     # Trapezoid rule.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area1 = na.sum(areas)
+    area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
     area_final = area1
@@ -729,12 +729,12 @@
     one_pow = 3
     while diff > error:
         next_guess *= 10
-        xvals = na.logspace(0,na.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
+        xvals = np.logspace(0,np.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
         yvals = fcn(xvals)
         xdiffs = xvals[1:] - xvals[:-1]
         # Trapezoid rule.
         areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-        area_next = na.sum(areas)
+        area_next = np.sum(areas)
         diff = area_next - area_last
         area_last = area_next
         one_pow+=1


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -41,7 +41,7 @@
 # 8. Parentage is described by a fraction of particles that pass from one to
 #    the other; we have both descendent fractions and ancestory fractions. 
 
-import numpy as na
+import numpy as np
 import h5py
 import time
 import pdb
@@ -119,7 +119,7 @@
             x,y,z = [float(f) for f in line.split(None, 3)[:-1]]
             hp.append([x,y,z])
         if hp != []:
-            self.halo_positions = na.array(hp)
+            self.halo_positions = np.array(hp)
             self.halo_kdtree = KDTree(self.halo_positions)
         else:
             self.halo_positions = None
@@ -158,7 +158,7 @@
 class HaloParticleList(object):
     def __init__(self, halo_id, position, particle_ids):
         self.halo_id = halo_id
-        self.position = na.array(position)
+        self.position = np.array(position)
         self.particle_ids = particle_ids
         self.number_of_particles = particle_ids.size
 
@@ -168,7 +168,7 @@
     def find_relative_parentage(self, child):
         # Return two values: percent this halo gave to the other, and percent
         # of the other that comes from this halo
-        overlap = na.intersect1d(self.particle_ids, child.particle_ids).size
+        overlap = np.intersect1d(self.particle_ids, child.particle_ids).size
         of_child_from_me = float(overlap)/child.particle_ids.size
         of_mine_from_me = float(overlap)/self.particle_ids.size
         return of_child_from_me, of_mine_from_me


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os, glob, time, gc, md5, sys
 import h5py
 import types
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -174,7 +171,7 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
-        self.with_halos = na.ones(len(restart_files), dtype='bool')
+        self.with_halos = np.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
         self.halo_finder_function = halo_finder_function # which halo finder to use
         self.halo_finder_threshold = halo_finder_threshold # overdensity threshold
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
-            child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            child_points = np.array(child_points)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                query = np.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 
@@ -400,7 +387,7 @@
         # The +1 is an extra element in the array that collects garbage
         # values. This is allowing us to eliminate a try/except later.
         # This extra array element will be cut off eventually.
-        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+        self.child_mass_arr = np.zeros(len(candidates)*NumNeighbors + 1,
             dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
@@ -450,9 +437,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,31 +447,38 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(np.ones(len(thisIDs),
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = np.array([], dtype='int64')
+                parent_masses = np.array([], dtype='float64')
+                parent_halos = np.array([], dtype='int32')
+            else:
+                parent_IDs = np.concatenate(parent_IDs).astype('int64')
+                parent_masses = np.concatenate(parent_masses).astype('float64')
+                parent_halos = np.concatenate(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
-        parent_send = na.ones(parent_IDs.size, dtype='bool')
-        
+        parent_send = np.ones(parent_IDs.size, dtype='bool')
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
+        child_IDs = []
+        child_masses = []
+        child_halos = []
         for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
@@ -492,20 +486,28 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(np.ones(len(thisIDs),
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = np.array([], dtype='int64')
+            child_masses = np.array([], dtype='float64')
+            child_halos = np.array([], dtype='int32')
+        else:
+            child_IDs = np.concatenate(child_IDs).astype('int64')
+            child_masses = np.concatenate(child_masses)
+            child_halos = np.concatenate(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
         
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
-        child_send = na.ones(child_IDs.size, dtype='bool')
-        del sort
+        child_send = np.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,
@@ -618,8 +620,8 @@
     def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
             parent_masses, parent_send = None, child_send = None):
         # Pick out IDs that are in both arrays.
-        parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
-        child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+        parent_in_child = np.in1d(parent_IDs, child_IDs, assume_unique = True)
+        child_in_parent = np.in1d(child_IDs, parent_IDs, assume_unique = True)
         # Pare down the arrays to just matched particle IDs.
         parent_halos_cut = parent_halos[parent_in_child]
         child_halos_cut = child_halos[child_in_parent]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -24,7 +24,7 @@
 """
 
 from copy import deepcopy
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -105,11 +105,11 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = na.log10(temp_profile[field])
+            temp_profile[field] = np.log10(temp_profile[field])
 
     virial = dict((field, 0.0) for field in fields)
 
-    if (not (na.array(overDensity) >= virial_overdensity).any()) and \
+    if (not (np.array(overDensity) >= virial_overdensity).any()) and \
             must_be_virialized:
         mylog.debug("This halo is not virialized!")
         return [False, {}]
@@ -123,7 +123,7 @@
     elif (overDensity[-1] >= virial_overdensity):
         index = -2
     else:
-        for q in (na.arange(len(overDensity),0,-1)-1):
+        for q in (np.arange(len(overDensity),0,-1)-1):
             if (overDensity[q] < virial_overdensity) and (overDensity[q-1] >= virial_overdensity):
                 index = q - 1
                 break
@@ -144,7 +144,7 @@
 
     if use_log:
         for field in virial.keys():
-            virial[field] = na.power(10, virial[field])
+            virial[field] = np.power(10, virial[field])
 
     for vfilter in virial_filters:
         if eval("%s %s %s" % (virial[vfilter[0]],vfilter[1],vfilter[2])):


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os
 import h5py
 import types
@@ -684,7 +684,7 @@
                 max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
                                                                                  lazy_reader=True)
                 max_grid = self.pf.h.grids[mg]
-                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                max_cell = np.unravel_index(maxi, max_grid.ActiveDimensions)
                 sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
                                                              max_grid['y-velocity'][max_cell],
                                                              max_grid['z-velocity'][max_cell]])
@@ -845,7 +845,7 @@
                               (self.projection_output_dir, halo['id'],
                                dataset_name, axis_labels[w])
                             if (frb[hp['field']] != 0).any():
-                                write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                                write_image(np.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
                             else:
                                 mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
                                             (hp['field'], halo['id']))
@@ -1076,7 +1076,7 @@
                     profile[field].append(float(onLine[q]))
 
         for field in fields:
-            profile[field] = na.array(profile[field])
+            profile[field] = np.array(profile[field])
 
         profile_obj._data = profile
 
@@ -1171,7 +1171,7 @@
         for halo in self.filtered_halos:
             for halo_field in halo_fields:
                 if isinstance(halo[halo_field], types.ListType):
-                    field_data = na.array(halo[halo_field])
+                    field_data = np.array(halo[halo_field])
                     field_data.tofile(out_file, sep="\t", format=format)
                 else:
                     if halo_field == 'id':
@@ -1179,7 +1179,7 @@
                     else:
                         out_file.write("%s" % halo[halo_field])
                 out_file.write("\t")
-            field_data = na.array([halo[field] for field in fields])
+            field_data = np.array([halo[field] for field in fields])
             field_data.tofile(out_file, sep="\t", format=format)
             out_file.write("\n")
         out_file.close()
@@ -1207,7 +1207,7 @@
             value_list = []
             for halo in self.filtered_halos:
                 value_list.append(halo[halo_field])
-            value_list = na.array(value_list)
+            value_list = np.array(value_list)
             out_file.create_dataset(halo_field, data=value_list)
         out_file.close()
 
@@ -1215,7 +1215,7 @@
         fid = open(filename, "w")
         fields = [field for field in sorted(profile.keys()) if field != "UsedBins"]
         fid.write("\t".join(["#"] + fields + ["\n"]))
-        field_data = na.array([profile[field] for field in fields])
+        field_data = np.array([profile[field] for field in fields])
         for line in range(field_data.shape[1]):
             field_data[:, line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -1300,17 +1300,17 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px,
+        plot.field_data['px'] = np.concatenate([plot['px'], add_x_px, add_y_px,
                                                 add2_x_px, add2_y_px])
-        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py,
+        plot.field_data['py'] = np.concatenate([plot['py'], add_x_py, add_y_py,
                                                 add2_x_py, add2_y_py])
-        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
+        plot.field_data['pdx'] = np.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
                                                  add2_x_pdx, add2_y_pdx])
-        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
+        plot.field_data['pdy'] = np.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
                                                  add2_x_pdy, add2_y_pdy])
-        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field,
+        plot.field_data[field] = np.concatenate([plot[field], add_x_field, add_y_field,
                                                  add2_x_field, add2_y_field])
-        plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
+        plot.field_data['weight_field'] = np.concatenate([plot['weight_field'],
                                                           add_x_weight_field, add_y_weight_field,
                                                           add2_x_weight_field, add2_y_weight_field])
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
@@ -24,7 +24,7 @@
 """
 
 import h5py, os.path
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.data_containers import YTFieldData
@@ -57,7 +57,7 @@
         self.Level = level
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
-        self.start_index = na.min([grid.get_global_startindex() for grid in
+        self.start_index = np.min([grid.get_global_startindex() for grid in
                              base_pf.h.select_grids(level)], axis=0).astype('int64')
         self.dds = base_pf.h.select_grids(level)[0].dds.copy()
         dims = (self.RightEdge-self.LeftEdge)/self.dds
@@ -106,11 +106,11 @@
         self.pf = pf
         self.always_copy = always_copy
         self.min_level = min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                              pf.h.select_grids(min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.h.select_grids(min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                                    pf.h.select_grids(min_level)], axis=0).astype('float64')
         if offset is None: offset = (max_right + min_left)/2.0
         self.left_edge_offset = offset
@@ -151,7 +151,7 @@
         # Grid objects on this level...
         if grids is None: grids = self.pf.h.select_grids(level+self.min_level)
         level_node.attrs['delta'] = grids[0].dds*self.mult_factor
-        level_node.attrs['relativeRefinementFactor'] = na.array([2]*3, dtype='int32')
+        level_node.attrs['relativeRefinementFactor'] = np.array([2]*3, dtype='int32')
         level_node.attrs['numGrids'] = len(grids)
         for i,g in enumerate(grids):
             self.export_grid(afile, level_node, g, i, field)
@@ -169,8 +169,8 @@
         int_origin, lint, origin, dds = self._convert_grid(grid)
         grid_node.attrs['integerOrigin'] = int_origin
         grid_node.attrs['origin'] = origin
-        grid_node.attrs['ghostzoneFlags'] = na.zeros(6, dtype='int32')
-        grid_node.attrs['numGhostzones'] = na.zeros(3, dtype='int32')
+        grid_node.attrs['ghostzoneFlags'] = np.zeros(6, dtype='int32')
+        grid_node.attrs['numGhostzones'] = np.zeros(3, dtype='int32')
         grid_node.attrs['dims'] = grid.ActiveDimensions[::-1].astype('int32')
         if not self.always_copy and self.pf.h.data_style == 6 \
            and field in self.pf.h.field_list:
@@ -203,11 +203,11 @@
         # First we set up our translation between original and extracted
         self.data_style = data_style
         self.min_level = pf.min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
         level_dx = pf.base_pf.h.select_grids(pf.min_level)[0].dds[0]
         dims = ((max_right-min_left)/level_dx)
@@ -247,12 +247,12 @@
         # Here we need to set up the grid info, which for the Enzo hierarchy
         # is done like:
         # self.grid_dimensions.flat[:] = ei
-        # self.grid_dimensions -= na.array(si, self.float_type)
+        # self.grid_dimensions -= np.array(si, self.float_type)
         # self.grid_dimensions += 1
         # self.grid_left_edge.flat[:] = LE
         # self.grid_right_edge.flat[:] = RE
         # self.grid_particle_count.flat[:] = np
-        # self.grids = na.array(self.grids, dtype='object')
+        # self.grids = np.array(self.grids, dtype='object')
         #
         # For now, we make the presupposition that all of our grids are
         # strictly nested and we are not doing any cuts.  However, we do
@@ -285,7 +285,7 @@
 
         self.grid_left_edge = self._convert_coords(self.grid_left_edge)
         self.grid_right_edge = self._convert_coords(self.grid_right_edge)
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
 
     def _fill_grid_arrays(self, grid, i):
         # This just fills in the grid arrays for a single grid --


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -22,7 +22,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/level_sets/clump_tools.py
--- a/yt/analysis_modules/level_sets/clump_tools.py
+++ b/yt/analysis_modules/level_sets/clump_tools.py
@@ -23,8 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
-nar = na.array
+import numpy as np
+nar = np.array
 
 counter = 0
 def recursive_all_clumps(clump,list,level,parentnumber):
@@ -89,7 +89,7 @@
     yt.visualization.plot_modification.ClumpContourCallback"""
     minDensity = [c['Density'].min() for c in clump_list]
     
-    args = na.argsort(minDensity)
+    args = np.argsort(minDensity)
     list = nar(clump_list)[args]
     reverse = range(list.size-1,-1,-1)
     return list[reverse]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -24,7 +24,7 @@
 """
 
 from itertools import chain
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.data_point_utilities as data_point_utilities
@@ -63,12 +63,12 @@
     tr = []
     for k in joins.keys():
         v = joins.pop(k)
-        tr.append((k, na.array(list(v), dtype="int64")))
+        tr.append((k, np.array(list(v), dtype="int64")))
     return tr
 
 def identify_contours(data_source, field, min_val, max_val,
                           cached_fields=None):
-    cur_max_id = na.sum([g.ActiveDimensions.prod() for g in data_source._grids])
+    cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
     pbar = get_pbar("First pass", len(data_source._grids))
     grids = sorted(data_source._grids, key=lambda g: -g.Level)
     total_contours = 0
@@ -76,27 +76,27 @@
     for gi,grid in enumerate(grids):
         pbar.update(gi+1)
         cm = data_source._get_cut_mask(grid)
-        if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='bool')
+        if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
         old_field_parameters = grid.field_parameters
         grid.field_parameters = data_source.field_parameters
-        local_ind = na.where( (grid[field] > min_val)
+        local_ind = np.where( (grid[field] > min_val)
                             & (grid[field] < max_val) & cm )
         grid.field_parameters = old_field_parameters
         if local_ind[0].size == 0: continue
-        kk = na.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
-        grid["tempContours"] = na.ones(grid.ActiveDimensions, dtype='int64') * -1
+        kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
+        grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
         grid["tempContours"][local_ind] = kk[:]
         cur_max_id -= local_ind[0].size
-        xi_u,yi_u,zi_u = na.where(grid["tempContours"] > -1)
-        cor_order = na.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
+        xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
+        cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
         fd_orig = grid["tempContours"].copy()
         xi = xi_u[cor_order]
         yi = yi_u[cor_order]
         zi = zi_u[cor_order]
         while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
             pass
-        total_contours += na.unique(grid["tempContours"][grid["tempContours"] > -1]).size
-        new_contours = na.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
+        total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
+        new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
         tree += zip(new_contours, new_contours)
     tree = set(tree)
     pbar.finish()
@@ -110,10 +110,10 @@
         boundary_tree = amr_utils.construct_boundary_relationships(fd)
         tree.update(((a, b) for a, b in boundary_tree))
     pbar.finish()
-    sort_new = na.array(list(tree), dtype='int64')
+    sort_new = np.array(list(tree), dtype='int64')
     mylog.info("Coalescing %s joins", sort_new.shape[0])
     joins = coalesce_join_tree(sort_new)
-    #joins = [(i, na.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
+    #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
     pbar = get_pbar("Joining ", len(joins))
     # This process could and should be done faster
     print "Joining..."
@@ -136,9 +136,9 @@
     data_source.get_data("tempContours", in_grids=True)
     contour_ind = {}
     i = 0
-    for contour_id in na.unique(data_source["tempContours"]):
+    for contour_id in np.unique(data_source["tempContours"]):
         if contour_id == -1: continue
-        contour_ind[i] = na.where(data_source["tempContours"] == contour_id)
+        contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
         mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
         i += 1
     mylog.info("Identified %s contours between %0.5e and %0.5e",


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/radial_column_density/radial_column_density.py
--- a/yt/analysis_modules/radial_column_density/radial_column_density.py
+++ b/yt/analysis_modules/radial_column_density/radial_column_density.py
@@ -105,14 +105,14 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.center = na.asarray(center)
+        self.center = np.asarray(center)
         self.max_radius = max_radius
         self.steps = steps
         self.base = base
         self.Nside = Nside
         self.ang_divs = ang_divs
-        self.real_ang_divs = int(na.abs(ang_divs))
-        self.phi, self.theta = na.mgrid[0.0:2*na.pi:ang_divs, 0:na.pi:ang_divs]
+        self.real_ang_divs = int(np.abs(ang_divs))
+        self.phi, self.theta = np.mgrid[0.0:2*np.pi:ang_divs, 0:np.pi:ang_divs]
         self.phi1d = self.phi[:,0]
         self.theta1d = self.theta[0,:]
         self.dphi = self.phi1d[1] - self.phi1d[0]
@@ -135,20 +135,20 @@
         # but this will work for now.
         right = self.pf.domain_right_edge - self.center
         left = self.center - self.pf.domain_left_edge
-        min_r = na.min(right)
-        min_l = na.min(left)
-        self.max_radius = na.min([self.max_radius, min_r, min_l])
+        min_r = np.min(right)
+        min_l = np.min(left)
+        self.max_radius = np.min([self.max_radius, min_r, min_l])
     
     def _make_bins(self):
         # We'll make the bins start from the smallest cell size to the
         # specified radius. Column density inside the same cell as our 
         # center is kind of ill-defined, anyway.
         if self.base == 'lin':
-            self.bins = na.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
+            self.bins = np.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
                 self.steps)
         elif self.base == 'log':
-            self.bins = na.logspace(na.log10(self.pf.h.get_smallest_dx()),
-                na.log10(self.max_radius), self.steps)
+            self.bins = np.logspace(np.log10(self.pf.h.get_smallest_dx()),
+                np.log10(self.max_radius), self.steps)
     
     def _build_surfaces(self, field):
         # This will be index by bin index.
@@ -172,17 +172,17 @@
             Values of zero are found outside the maximum radius and
             in the cell of the user-specified center point.
             This setting is useful if the field is going to be logged
-            (e.g. na.log10) where zeros are inconvenient.
+            (e.g. np.log10) where zeros are inconvenient.
             Default = None
         """
         x = data['x']
         sh = x.shape
-        ad = na.prod(sh)
+        ad = np.prod(sh)
         if type(data) == type(FieldDetector()):
-            return na.ones(sh)
+            return np.ones(sh)
         y = data['y']
         z = data['z']
-        pos = na.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
+        pos = np.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
         del x, y, z
         vals = self._interpolate_value(pos)
         del pos
@@ -199,25 +199,25 @@
         # according to the points angle.
         # 1. Find the angle from the center point to the position.
         vec = pos - self.center
-        phi = na.arctan2(vec[:, 1], vec[:, 0])
+        phi = np.arctan2(vec[:, 1], vec[:, 0])
         # Convert the convention from [-pi, pi) to [0, 2pi).
         sel = (phi < 0)
-        phi[sel] += 2 * na.pi
+        phi[sel] += 2 * np.pi
         # Find the radius.
-        r = na.sqrt(na.sum(vec * vec, axis = 1))
+        r = np.sqrt(np.sum(vec * vec, axis = 1))
         # Keep track of the points outside of self.max_radius, which we'll
         # handle separately before we return.
         outside = (r > self.max_radius)
-        theta = na.arccos(vec[:, 2] / r)
+        theta = np.arccos(vec[:, 2] / r)
         # 2. Find the bin for this position.
-        digi = na.digitize(r, self.bins)
+        digi = np.digitize(r, self.bins)
         # Find the values on the inner and outer surfaces.
-        in_val = na.zeros_like(r)
-        out_val = na.zeros_like(r)
+        in_val = np.zeros_like(r)
+        out_val = np.zeros_like(r)
         # These two will be used for interpolation.
-        in_r = na.zeros_like(r)
-        out_r = na.zeros_like(r)
-        for bin in na.unique(digi):
+        in_r = np.zeros_like(r)
+        out_r = np.zeros_like(r)
+        for bin in np.unique(digi):
             sel = (digi == bin)
             # Special case if we're outside the largest sphere.
             if bin == len(self.bins):
@@ -229,7 +229,7 @@
                 continue
             # Special case if we're inside the smallest sphere.
             elif bin == 0:
-                in_val[sel] = na.zeros_like(phi[sel])
+                in_val[sel] = np.zeros_like(phi[sel])
                 in_r[sel] = 0.
                 out_val[sel] = self._interpolate_surface_value(1,
                     phi[sel], theta[sel])
@@ -244,11 +244,11 @@
                     phi[sel], theta[sel])
                 out_r[sel] = self.bins[bin]
         # Interpolate using a linear fit in column density / r space.
-        val = na.empty_like(r)
+        val = np.empty_like(r)
         # Special case for inside smallest sphere.
         sel = (digi == 0)
         val[sel] = (1. - (out_r[sel] - r[sel]) / out_r[sel]) * out_val[sel]
-        na.invert(sel, sel) # In-place operation!
+        np.invert(sel, sel) # In-place operation!
         val[sel] = (out_val[sel] - in_val[sel]) / (out_r[sel] - in_r[sel]) * \
             (r[sel] - in_r[sel]) + in_val[sel]
         # Fix the things to zero that should be zero.
@@ -259,8 +259,8 @@
         # Given a surface bin and an angle, interpolate the value on
         # that surface to the angle.
         # 1. Find the four values closest to the angle.
-        phi_bin = na.digitize(phi, self.phi1d)
-        theta_bin = na.digitize(theta, self.theta1d)
+        phi_bin = np.digitize(phi, self.phi1d)
+        theta_bin = np.digitize(theta, self.theta1d)
         val00 = self.surfaces[bin][phi_bin - 1, theta_bin - 1]
         val01 = self.surfaces[bin][phi_bin - 1, theta_bin]
         val10 = self.surfaces[bin][phi_bin, theta_bin - 1]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -47,18 +47,18 @@
 
         self.bounds = bounds
         self.ev_bounds = ev_bounds
-        self.ev_vals = na.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
+        self.ev_vals = np.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
         
     def _get_interpolator(self, ev_min, ev_max):
         """
         Integrates from ev_min to ev_max and returns an interpolator.
         """
-        e_is, e_ie = na.digitize([ev_min, ev_max], self.ev_vals)
-        bin_table = na.trapz(self.table[...,e_is-1:e_ie],
+        e_is, e_ie = np.digitize([ev_min, ev_max], self.ev_vals)
+        bin_table = np.trapz(self.table[...,e_is-1:e_ie],
                              2.41799e17*
             (self.ev_vals[e_is:e_ie+1]-self.ev_vals[e_is-1:e_is]),
                              axis=-1)
-        bin_table = na.log10(bin_table.clip(1e-80,bin_table.max()))
+        bin_table = np.log10(bin_table.clip(1e-80,bin_table.max()))
         return BilinearFieldInterpolator(
             bin_table, self.bounds, self.field_names[:],
             truncate=True)
@@ -73,8 +73,8 @@
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : na.log10(data["NumberDensity"]),
-                  'Temperature'   : na.log10(data["Temperature"])}
+            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+                  'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
@@ -91,8 +91,8 @@
     e_n_bins, e_min, e_max = e_spec
     T_n_bins, T_min, T_max = T_spec
     # The second one is the fast-varying one
-    rho_is, e_is = na.mgrid[0:rho_n_bins,0:e_n_bins]
-    table = na.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
+    rho_is, e_is = np.mgrid[0:rho_n_bins,0:e_n_bins]
+    table = np.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
     mylog.info("Parsing Cloudy files")
     for i,ri,ei in zip(range(rho_n_bins*e_n_bins), rho_is.ravel(), e_is.ravel()):
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 import math, itertools
 
@@ -66,8 +66,8 @@
         """
         self._pf = pf
         self._data_source = data_source
-        self.star_mass = na.array(star_mass)
-        self.star_creation_time = na.array(star_creation_time)
+        self.star_mass = np.array(star_mass)
+        self.star_creation_time = np.array(star_creation_time)
         self.volume = volume
         self.bin_count = bins
         # Check to make sure we have the right set of informations.
@@ -114,13 +114,13 @@
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = na.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
-        inds = na.digitize(ct_stars, self.time_bins) - 1
+        inds = np.digitize(ct_stars, self.time_bins) - 1
         # Sum up the stars created in each time bin.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        for index in na.unique(inds):
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        for index in np.unique(inds):
             self.mass_bins[index] += sum(mass_stars[inds == index])
         # Calculate the cumulative mass sum over time by forward adding.
         self.cum_mass_bins = self.mass_bins.copy()
@@ -162,13 +162,13 @@
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])
             self.Msol_cumulative.append(self.cum_mass_bins[i])
-        self.time = na.array(self.time)
-        self.lookback_time = na.array(self.lookback_time)
-        self.redshift = na.array(self.redshift)
-        self.Msol_yr = na.array(self.Msol_yr)
-        self.Msol_yr_vol = na.array(self.Msol_yr_vol)
-        self.Msol = na.array(self.Msol)
-        self.Msol_cumulative = na.array(self.Msol_cumulative)
+        self.time = np.array(self.time)
+        self.lookback_time = np.array(self.lookback_time)
+        self.redshift = np.array(self.redshift)
+        self.Msol_yr = np.array(self.Msol_yr)
+        self.Msol_yr_vol = np.array(self.Msol_yr_vol)
+        self.Msol = np.array(self.Msol)
+        self.Msol_cumulative = np.array(self.Msol_cumulative)
     
     def write_out(self, name="StarFormationRate.out"):
         r"""Write out the star analysis to a text file *name*. The columns are in
@@ -234,10 +234,10 @@
 METAL3 = 0.2828
 METAL4 = 0.6325
 METAL5 = 1.5811
-METALS = na.array([METAL1, METAL2, METAL3, METAL4, METAL5])
+METALS = np.array([METAL1, METAL2, METAL3, METAL4, METAL5])
 
 # Translate METALS array digitize to the table dicts
-MtoD = na.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
+MtoD = np.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
 
 """
 This spectrum code is based on code from Ken Nagamine, converted from C to Python.
@@ -340,7 +340,7 @@
         >>> spec.calculate_spectrum(data_source=sp, min_age = 1.e6)
         """
         # Initialize values
-        self.final_spec = na.zeros(self.wavelength.size, dtype='float64')
+        self.final_spec = np.zeros(self.wavelength.size, dtype='float64')
         self._data_source = data_source
         if iterable(star_mass):
             self.star_mass = star_mass
@@ -372,7 +372,7 @@
                 """)
                 return None
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             if star_metallicity_fraction is not None:
                 self.star_metal = star_metallicity_fraction
@@ -382,7 +382,7 @@
             self.star_creation_time = ct[ct > 0]
             self.star_mass = self._data_source["ParticleMassMsun"][ct > 0]
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             else:
                 self.star_metal = self._data_source["metallicity_fraction"][ct > 0]
@@ -390,7 +390,7 @@
         self.star_metal /= Zsun
         # Age of star in years.
         dt = (self.time_now - self.star_creation_time * self._pf['Time']) / YEAR
-        dt = na.maximum(dt, 0.0)
+        dt = np.maximum(dt, 0.0)
         # Remove young stars
         sub = dt >= self.min_age
         if len(sub) == 0: return
@@ -398,18 +398,18 @@
         dt = dt[sub]
         self.star_creation_time = self.star_creation_time[sub]
         # Figure out which METALS bin the star goes into.
-        Mindex = na.digitize(self.star_metal, METALS)
+        Mindex = np.digitize(self.star_metal, METALS)
         # Replace the indices with strings.
         Mname = MtoD[Mindex]
         # Figure out which age bin this star goes into.
-        Aindex = na.digitize(dt, self.age)
+        Aindex = np.digitize(dt, self.age)
         # Ratios used for the interpolation.
         ratio1 = (dt - self.age[Aindex-1]) / (self.age[Aindex] - self.age[Aindex-1])
         ratio2 = (self.age[Aindex] - dt) / (self.age[Aindex] - self.age[Aindex-1])
         # Sort the stars by metallicity and then by age, which should reduce
         # memory access time by a little bit in the loop.
-        indexes = na.arange(self.star_metal.size)
-        sort = na.asarray([indexes[i] for i in na.lexsort([indexes, Aindex, Mname])])
+        indexes = np.arange(self.star_metal.size)
+        sort = np.asarray([indexes[i] for i in np.lexsort([indexes, Aindex, Mname])])
         Mname = Mname[sort]
         Aindex = Aindex[sort]
         ratio1 = ratio1[sort]
@@ -426,15 +426,15 @@
             # Get the one just before the one above.
             flux_1 = self.flux[star[0]][star[1]-1,:]
             # interpolate in log(flux), linear in time.
-            int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
+            int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux)
             # Add this flux to the total, weighted by mass.
-            self.final_spec += na.power(10., int_flux) * star[4]
+            self.final_spec += np.power(10., int_flux) * star[4]
             pbar.update(i)
         pbar.finish()    
         
         # Normalize.
-        self.total_mass = na.sum(self.star_mass)
-        self.avg_mass = na.mean(self.star_mass)
+        self.total_mass = np.sum(self.star_mass)
+        self.avg_mass = np.mean(self.star_mass)
         tot_metal = sum(self.star_metal * self.star_mass)
         self.avg_metal = math.log10(tot_metal / self.total_mass / Zsun)
 
@@ -455,25 +455,25 @@
 #             # From the flux array for this metal, and our selection, build
 #             # a new flux array just for the ages of these stars, in the 
 #             # same order as the selection of stars.
-#             this_flux = na.matrix(self.flux[metal_name][A])
+#             this_flux = np.matrix(self.flux[metal_name][A])
 #             # Make one for the last time step for each star in the same fashion
 #             # as above.
-#             this_flux_1 = na.matrix(self.flux[metal_name][A-1])
+#             this_flux_1 = np.matrix(self.flux[metal_name][A-1])
 #             # This is kind of messy, but we're going to multiply this_fluxes
 #             # by the appropriate ratios and add it together to do the 
 #             # interpolation in log(flux) and linear in time.
 #             print r1.size
-#             r1 = na.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
-#             r2 = na.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
+#             r1 = np.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
+#             r2 = np.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
 #             print this_flux_1.shape, r1.shape
-#             int_flux = na.multiply(na.log10(this_flux_1),r1) \
-#                 + na.multiply(na.log10(this_flux),r2)
+#             int_flux = np.multiply(np.log10(this_flux_1),r1) \
+#                 + np.multiply(np.log10(this_flux),r2)
 #             # Weight the fluxes by mass.
-#             sm = na.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
-#             int_flux = na.multiply(na.power(10., int_flux), sm)
+#             sm = np.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
+#             int_flux = np.multiply(np.power(10., int_flux), sm)
 #             # Sum along the columns, converting back to an array, adding
 #             # to the full spectrum.
-#             self.final_spec += na.array(int_flux.sum(axis=0))[0,:]
+#             self.final_spec += np.array(int_flux.sum(axis=0))[0,:]
 
     
     def write_out(self, name="sum_flux.out"):
@@ -518,8 +518,8 @@
         >>> spec.write_out_SED(name = "SED.out", flux_norm = 6000.)
         """
         # find the f_nu closest to flux_norm
-        fn_wavelength = na.argmin(abs(self.wavelength - flux_norm))
-        f_nu = self.final_spec * na.power(self.wavelength, 2.) / LIGHT
+        fn_wavelength = np.argmin(abs(self.wavelength - flux_norm))
+        f_nu = self.final_spec * np.power(self.wavelength, 2.) / LIGHT
         # Normalize f_nu
         self.f_nu = f_nu / f_nu[fn_wavelength]
         # Write out.


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -32,7 +32,7 @@
     pass
 
 import time
-import numpy as na
+import numpy as np
 import numpy.linalg as linalg
 import collections
 
@@ -76,14 +76,14 @@
 
     """
 
-    fc = na.array(fc)
-    fwidth = na.array(fwidth)
+    fc = np.array(fc)
+    fwidth = np.array(fwidth)
     
     #we must round the dle,dre to the nearest root grid cells
     ile,ire,super_level,ncells_wide= \
             round_ncells_wide(pf.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide)
 
-    assert na.all((ile-ire)==(ile-ire)[0])
+    assert np.all((ile-ire)==(ile-ire)[0])
     mylog.info("rounding specified region:")
     mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth)))
     mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
@@ -151,7 +151,7 @@
         print "[%03i %03i %03i] "%tuple(dre),
         print " with %i halos"%num_halos
         dle,dre = domain
-        dle, dre = na.array(dle),na.array(dre)
+        dle, dre = np.array(dle),np.array(dre)
         fn = fni 
         fn += "%03i_%03i_%03i-"%tuple(dle)
         fn += "%03i_%03i_%03i"%tuple(dre)
@@ -176,7 +176,7 @@
     dn = pf.domain_dimensions
     for halo in halo_list:
         fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir
-        dle,dre = na.floor(fle*dn), na.ceil(fre*dn)
+        dle,dre = np.floor(fle*dn), np.ceil(fre*dn)
         dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int'))
         if (dle,dre) in domains.keys():
             domains[(dle,dre)] += halo,
@@ -209,7 +209,7 @@
     del field_data
 
     #first we cast every cell as an oct
-    #ngrids = na.max([g.id for g in pf._grids])
+    #ngrids = np.max([g.id for g in pf._grids])
     grids = {}
     levels_all = {} 
     levels_finest = {}
@@ -218,13 +218,13 @@
         levels_all[l]=0
     pbar = get_pbar("Initializing octs ",len(pf.h.grids))
     for gi,g in enumerate(pf.h.grids):
-        ff = na.array([g[f] for f in fields])
+        ff = np.array([g[f] for f in fields])
         og = amr_utils.OctreeGrid(
                 g.child_index_mask.astype('int32'),
                 ff.astype("float64"),
                 g.LeftEdge.astype("float64"),
                 g.ActiveDimensions.astype("int32"),
-                na.ones(1,dtype="float64")*g.dds[0],
+                np.ones(1,dtype="float64")*g.dds[0],
                 g.Level,
                 g.id)
         grids[g.id] = og
@@ -244,11 +244,11 @@
     #oct_list =  amr_utils.OctreeGridList(grids)
     
     #initialize arrays to be passed to the recursion algo
-    o_length = na.sum(levels_all.values())
-    r_length = na.sum(levels_all.values())
-    output   = na.zeros((o_length,len(fields)), dtype='float64')
-    refined  = na.zeros(r_length, dtype='int32')
-    levels   = na.zeros(r_length, dtype='int32')
+    o_length = np.sum(levels_all.values())
+    r_length = np.sum(levels_all.values())
+    output   = np.zeros((o_length,len(fields)), dtype='float64')
+    refined  = np.zeros(r_length, dtype='int32')
+    levels   = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -334,7 +334,7 @@
         #calculate the floating point LE of the children
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
-        subgrid_ile = na.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
+        subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
         for i, (vertex,hilbert_child) in enumerate(hilbert):
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
@@ -342,7 +342,7 @@
                 subgrid = grid #we don't actually descend if we're a superlevel
                 child_ile = cell_index + na.array(vertex)*2**(-level)
             else:
-                child_ile = subgrid_ile+na.array(vertex)
+                child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
 
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
@@ -383,17 +383,17 @@
     col_list.append(pyfits.Column("mass_metals", format='D',
                     array=fd['MetalMass'], unit="Msun"))
     # col_list.append(pyfits.Column("mass_stars", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("age_m", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("age_l", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("L_bol", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # col_list.append(pyfits.Column("L_lambda", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
@@ -404,7 +404,7 @@
                     array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
-                    array=na.zeros(size, dtype='D')))
+                    array=np.zeros(size, dtype='D')))
     cols = pyfits.ColDefs(col_list)
     mg_table = pyfits.new_table(cols)
     mg_table.header.update("M_g_tot", tm)
@@ -413,7 +413,7 @@
     mg_table.name = "GRIDDATA"
 
     # Add a dummy Primary; might be a better way to do this!
-    col_list = [pyfits.Column("dummy", format="F", array=na.zeros(1, dtype='float32'))]
+    col_list = [pyfits.Column("dummy", format="F", array=np.zeros(1, dtype='float32'))]
     cols = pyfits.ColDefs(col_list)
     md_table = pyfits.new_table(cols)
     md_table.header.update("snaptime", pf.current_time*pf['years'])
@@ -439,12 +439,12 @@
 
 def round_ncells_wide(dds,fle,fre,nwide=None):
     fc = (fle+fre)/2.0
-    assert na.all(fle < fc)
-    assert na.all(fre > fc)
-    ic = na.rint(fc*dds) #nearest vertex to the center
+    assert np.all(fle < fc)
+    assert np.all(fre > fc)
+    ic = np.rint(fc*dds) #nearest vertex to the center
     ile,ire = ic.astype('int'),ic.astype('int')
     cfle,cfre = fc.copy(),fc.copy()
-    idx = na.array([0,0,0]) #just a random non-equal array
+    idx = np.array([0,0,0]) #just a random non-equal array
     width = 0.0
     if nwide is None:
         #expand until borders are included and
@@ -452,41 +452,41 @@
         idxq,out=False,True
         while not out or not idxq:
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 0.1/dds
             #quit if idxq is true:
-            idxq = idx[0]>0 and na.all(idx==idx[0])
-            out  = na.all(fle>cfle) and na.all(fre<cfre) 
+            idxq = idx[0]>0 and np.all(idx==idx[0])
+            out  = np.all(fle>cfle) and np.all(fre<cfre) 
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
         #expand until we are nwide cells span
-        while not na.all(idx==nwide):
-            assert na.any(idx<=nwide)
+        while not np.all(idx==nwide):
+            assert np.any(idx<=nwide)
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 1e-2*1.0/dds
-    assert na.all(idx==nwide)
+    assert np.all(idx==nwide)
     assert idx[0]>0
-    maxlevel = -na.rint(na.log2(nwide)).astype('int')
-    assert abs(na.log2(nwide)-na.rint(na.log2(nwide)))<1e-5 #nwide should be a power of 2
+    maxlevel = -np.rint(np.log2(nwide)).astype('int')
+    assert abs(np.log2(nwide)-np.rint(np.log2(nwide)))<1e-5 #nwide should be a power of 2
     return ile,ire,maxlevel,nwide
 
 def round_nearest_edge(pf,fle,fre):
     dds = pf.domain_dimensions
-    ile = na.floor(fle*dds).astype('int')
-    ire = na.ceil(fre*dds).astype('int') 
+    ile = np.floor(fle*dds).astype('int')
+    ire = np.ceil(fre*dds).astype('int') 
     
     #this is the number of cells the super octree needs to expand to
     #must round to the nearest power of 2
-    width = na.max(ire-ile)
+    width = np.max(ire-ile)
     width = nearest_power(width)
     
-    maxlevel = -na.rint(na.log2(width)).astype('int')
+    maxlevel = -np.rint(np.log2(width)).astype('int')
     return ile,ire,maxlevel
 
 def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
@@ -502,15 +502,15 @@
     #make sure we select more than a single particle
     assert na.sum(idxst)>0
     if pos is None:
-        pos = na.array([dd["particle_position_%s" % ax]
+        pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idxst & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
-    assert na.sum(idx)>0
+    idx = idxst & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    assert np.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
     if vel is None:
-        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+        vel = np.array([dd["particle_velocity_%s" % ax][idx]
                         for ax in 'xyz']).transpose()
         # Velocity is cm/s, we want it to be kpc/yr
         #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
@@ -531,8 +531,8 @@
     formation_time = pf.current_time*pf['years']-age
     #create every column
     col_list = []
-    col_list.append(pyfits.Column("ID", format="J", array=na.arange(current_mass.size).astype('int32')))
-    col_list.append(pyfits.Column("parent_ID", format="J", array=na.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("ID", format="J", array=np.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("parent_ID", format="J", array=np.arange(current_mass.size).astype('int32')))
     col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
     col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
     col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
@@ -546,7 +546,7 @@
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
     #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=na.zeros(current_mass.size)))
+    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -579,7 +579,7 @@
                 / data["dynamical_time"])
         xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
                 / data["dynamical_time"])
-        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*np.exp(-xv1)))
         minitial = data["ParticleMassMsun"] / denom
         return minitial
 
@@ -707,14 +707,14 @@
     camera_positions in Sunrise.
     """
 
-    sim_center = na.array(sim_center)
+    sim_center = np.array(sim_center)
     if sim_sphere_radius is None:
         sim_sphere_radius = 10.0/pf['kpc']
     if sim_axis_short is None:
         if dd is None:
             dd = pf.h.all_data()
-        pos = na.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
+        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
+        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
         mas = dd["particle_mass"]
         pos = pos[idx]
         mas = mas[idx]
@@ -731,14 +731,14 @@
     if scene_distance is  None:
         scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
     if scene_fov is None:
-        radii = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))
+        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
         #idx= radii < sim_halo_radius*0.10
         #radii = radii[idx]
         #mass  = mas[idx] #copying mass into mas
-        si = na.argsort(radii)
+        si = np.argsort(radii)
         radii = radii[si]
         mass  = mas[si]
-        idx, = na.where(na.cumsum(mass)>mass.sum()/2.0)
+        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
         re = radii[idx[0]]
         scene_fov = 5*re
         scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
@@ -754,11 +754,11 @@
     
     #rotate the camera
     if scene_rot :
-        irotation = na.eye(3)
-    sunrise_pos = matmul(irotation,na.array(scene_position)*scene_distance) #do NOT include sim center
+        irotation = np.eye(3)
+    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
     sunrise_up  = matmul(irotation,scene_up)
     sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*na.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
+    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
 
     #change to physical kpc
     sunrise_pos *= pf['kpc']
@@ -772,11 +772,11 @@
     use this to muliply two matricies, it will think that you're
     trying to multiply by a set of vectors and all hell will break
     loose."""    
-    assert type(v) is not na.matrix
-    v = na.asarray(v)
-    m, vs = [na.asmatrix(a) for a in (m, v)]
+    assert type(v) is not np.matrix
+    v = np.asarray(v)
+    m, vs = [np.asmatrix(a) for a in (m, v)]
 
-    result = na.asarray(na.transpose(m * na.transpose(vs)))    
+    result = np.asarray(np.transpose(m * np.transpose(vs)))    
     if len(v.shape) == 1:
         return result[0]
     return result
@@ -784,14 +784,14 @@
 
 def mag(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
-        return na.sqrt( (vs**2).sum() )
-    return na.sqrt( (vs**2).sum(axis=1) )
+        return np.sqrt( (vs**2).sum() )
+    return np.sqrt( (vs**2).sum(axis=1) )
 
 def mag2(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
         return (vs**2).sum()
     return (vs**2).sum(axis=1)
@@ -800,25 +800,25 @@
 def position_moment(rs, ms=None, axes=None):
     """Find second position moment tensor.
     If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = na.asarray(rs)
+    rs = np.asarray(rs)
     Npart, N = rs.shape
-    if ms is None: ms = na.ones(Npart)
-    else: ms = na.asarray(ms)    
+    if ms is None: ms = np.ones(Npart)
+    else: ms = np.asarray(ms)    
     if axes is not None:
-        axes = na.asarray(axes,dtype=float64)
+        axes = np.asarray(axes,dtype=float64)
         axes = axes/axes.max()
         norms2 = mag2(rs/axes)
     else:
-        norms2 = na.ones(Npart)
+        norms2 = np.ones(Npart)
     M = ms.sum()
-    result = na.zeros((N,N))
+    result = np.zeros((N,N))
     # matrix is symmetric, so only compute half of it then fill in the
     # other half
     for i in range(N):
         for j in range(i+1):
             result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
         
-    result = result + result.transpose() - na.identity(N)*result
+    result = result + result.transpose() - np.identity(N)*result
     return result
     
 
@@ -835,7 +835,7 @@
     make the long axis line up with the x axis and the short axis line
     up with the x (z) axis for the 2 (3) dimensional case."""
     # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: na.sqrt(na.sum(x**2.0))
+    mag = lambda x: np.sqrt(np.sum(x**2.0))
     v = v/mag(v)
     w = w/mag(w)    
     if check:
@@ -852,7 +852,7 @@
     w_prime = euler_passive(w,phi,theta,0.)
     if w_prime[0] < 0: w_prime = -w_prime
     # Now last Euler angle should just be this:
-    psi = na.arctan2(w_prime[1],w_prime[0])
+    psi = np.arctan2(w_prime[1],w_prime[0])
     return phi, theta, psi
 
 def find_euler_phi_theta(v):
@@ -860,19 +860,19 @@
     direction"""
     # Make sure the vector is normalized
     v = v/mag(v)
-    theta = na.arccos(v[2])
-    phi = na.arctan2(v[0],-v[1])
+    theta = np.arccos(v[2])
+    phi = np.arctan2(v[0],-v[1])
     return phi,theta
 
 def euler_matrix(phi, the, psi):
     """Make an Euler transformation matrix"""
-    cpsi=na.cos(psi)
-    spsi=na.sin(psi)
-    cphi=na.cos(phi)
-    sphi=na.sin(phi)
-    cthe=na.cos(the)
-    sthe=na.sin(the)
-    m = na.mat(na.zeros((3,3)))
+    cpsi=np.cos(psi)
+    spsi=np.sin(psi)
+    cphi=np.cos(phi)
+    sphi=np.sin(phi)
+    cthe=np.cos(the)
+    sthe=np.sin(the)
+    m = np.mat(np.zeros((3,3)))
     m[0,0] = cpsi*cphi - cthe*sphi*spsi
     m[0,1] = cpsi*sphi + cthe*cphi*spsi
     m[0,2] = spsi*sthe
@@ -921,9 +921,9 @@
 cameraset_ring = collections.OrderedDict()
 
 segments = 20
-for angle in na.linspace(0,360,segments):
-    pos = [na.cos(angle),0.,na.sin(angle)]
-    vc  = [na.cos(90-angle),0.,na.sin(90-angle)] 
+for angle in np.linspace(0,360,segments):
+    pos = [np.cos(angle),0.,np.sin(angle)]
+    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
     cameraset_ring['02i'%angle]=(pos,vc)
             
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -144,10 +144,10 @@
             length_range[0] = math.sqrt(3) * self.pf.h.get_smallest_dx()
         # Make the list of ruler lengths.
         if length_type == "lin":
-            self.lengths = na.linspace(length_range[0], length_range[1],
+            self.lengths = np.linspace(length_range[0], length_range[1],
                 length_number)
         elif length_type == "log":
-            self.lengths = na.logspace(math.log10(length_range[0]),
+            self.lengths = np.logspace(math.log10(length_range[0]),
                 math.log10(length_range[1]), length_number)
         else:
             # Something went wrong.
@@ -177,7 +177,7 @@
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge
-        self.mt = na.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
+        self.mt = np.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
     
     def add_function(self, function, out_labels, sqrt, corr_norm=None):
         r"""Add a function to the list that will be evaluated at the
@@ -265,7 +265,7 @@
                 mylog.info("Doing length %1.5e" % length)
             # Things stop when this value below equals total_values.
             self.generated_points = 0
-            self.gen_array = na.zeros(self.size, dtype='int64')
+            self.gen_array = np.zeros(self.size, dtype='int64')
             self.comm_cycle_count = 0
             self.final_comm_cycle_count = 0
             self.sent_done = False
@@ -280,7 +280,7 @@
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
-                        #(na.abs(na.log10(na.abs(self.recv_points))) > 20).any():
+                        #(np.abs(np.log10(np.abs(self.recv_points))) > 20).any():
                     raise ValueError("self.recv_points is no good!")
                 self.points = self.recv_points.copy()
                 self.fields_vals = self.recv_fields_vals.copy()
@@ -312,7 +312,7 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        fKD.pos = na.asfortranarray(na.empty((3,xp.size), dtype='float64'))
+        fKD.pos = np.asfortranarray(np.empty((3,xp.size), dtype='float64'))
         # Normalize the grid points only within the kdtree.
         fKD.pos[0, :] = xp[:] / self.period[0]
         fKD.pos[1, :] = yp[:] / self.period[1]
@@ -332,8 +332,8 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        self.sizes = [na.unique(xp).size, na.unique(yp).size, na.unique(zp).size]        
-        self.sort = na.lexsort([zp, yp, xp])
+        self.sizes = [np.unique(xp).size, np.unique(yp).size, np.unique(zp).size]        
+        self.sort = np.lexsort([zp, yp, xp])
         del xp, yp, zp
         self.ds.clear_data()
     
@@ -341,7 +341,7 @@
         """
         Builds an array to store the field values array.
         """
-        self.fields_vals = na.empty((self.comm_size, len(self.fields)*2), \
+        self.fields_vals = np.empty((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         # At the same time build a dict to label the columns.
         self.fields_columns = {}
@@ -353,7 +353,7 @@
         Initializes the array that contains the random points as all negatives
         to start with.
         """
-        self.points = na.ones((self.comm_size, 6), dtype='float64') * -1.0
+        self.points = np.ones((self.comm_size, 6), dtype='float64') * -1.0
     
     def _setup_done_hooks_on_root(self):
         """
@@ -364,7 +364,7 @@
         self.recv_done = {}
         for task in xrange(self.size):
             if task == self.mine: continue
-            self.recv_done[task] = na.zeros(1, dtype='int64')
+            self.recv_done[task] = np.zeros(1, dtype='int64')
             self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
@@ -376,13 +376,13 @@
         if self.sent_done: return
         if self.mine !=0:
             # I send when I *think* things should finish.
-            self.send_done = na.ones(1, dtype='int64') * \
+            self.send_done = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
             self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
-            self.recv_done[0] = na.ones(1, dtype='int64') * \
+            self.recv_done[0] = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
         self.sent_done = True
     
@@ -416,10 +416,10 @@
         Creates the recv buffers and calls a non-blocking MPI receive pointing
         to the left-hand neighbor.
         """
-        self.recv_points = na.ones((self.comm_size, 6), dtype='float64') * -1.
-        self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
+        self.recv_points = np.ones((self.comm_size, 6), dtype='float64') * -1.
+        self.recv_fields_vals = np.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
-        self.recv_gen_array = na.zeros(self.size, dtype='int64')
+        self.recv_gen_array = np.zeros(self.size, dtype='int64')
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
@@ -470,7 +470,7 @@
         Picks out size random pairs separated by length *length*.
         """
         # First make random points inside this subvolume.
-        r1 = na.empty((size,3), dtype='float64')
+        r1 = np.empty((size,3), dtype='float64')
         for dim in range(3):
             r1[:,dim] = self.mt.uniform(low=self.ds.left_edge[dim],
                 high=self.ds.right_edge[dim], size=size)
@@ -480,15 +480,15 @@
         # but phi and theta are switched to the Physics convention.
         if self.constant_phi is None:
             phi = self.mt.uniform(low=0, high=2.*math.pi, size=size)
-        else: phi = self.constant_phi * na.ones(size, dtype='float64')
+        else: phi = self.constant_phi * np.ones(size, dtype='float64')
         if self.constant_theta is None:
             v = self.mt.uniform(low=0., high=1, size=size)
-            theta = na.arccos(2 * v - 1)
-        else: theta = self.constant_theta * na.ones(size, dtype='float64')
-        r2 = na.empty((size,3), dtype='float64')
-        r2[:,0] = r1[:,0] + length * na.cos(phi) * na.sin(theta)
-        r2[:,1] = r1[:,1] + length * na.sin(phi) * na.sin(theta)
-        r2[:,2] = r1[:,2] + length * na.cos(theta)
+            theta = np.arccos(2 * v - 1)
+        else: theta = self.constant_theta * np.ones(size, dtype='float64')
+        r2 = np.empty((size,3), dtype='float64')
+        r2[:,0] = r1[:,0] + length * np.cos(phi) * np.sin(theta)
+        r2[:,1] = r1[:,1] + length * np.sin(phi) * np.sin(theta)
+        r2[:,2] = r1[:,2] + length * np.cos(theta)
         # Reflect so it's inside the (full) volume.
         r2 %= self.period
         return (r1, r2)
@@ -508,7 +508,7 @@
             points[:, 1] = points[:, 1] / self.period[1]
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
-            fKD.nn_tags = na.asfortranarray(na.empty((1, points.shape[0]), dtype='int64'))
+            fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
             find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
@@ -521,7 +521,7 @@
         """
         # First find the grid data index field.
         indices = self._find_nearest_cell(points)
-        results = na.empty((len(indices), len(self.fields)), dtype='float64')
+        results = np.empty((len(indices), len(self.fields)), dtype='float64')
         # Put the field values into the columns of results.
         for field in self.fields:
             col = self.fields_columns[field]
@@ -547,7 +547,7 @@
                 self.generated_points += size
                 # If size != select.sum(), we need to pad the end of new_r1/r2
                 # which is what is effectively happening below.
-                newpoints = na.ones((ssum, 6), dtype='float64') * -1.
+                newpoints = np.ones((ssum, 6), dtype='float64') * -1.
                 newpoints[:size,:3] = new_r1
                 newpoints[:size,3:] = new_r2
                 # Now we insert them into self.points.
@@ -564,9 +564,9 @@
             # or I don't need to make any new points and I'm just processing the
             # array. Start by finding the indices of the points I own.
             self.points.shape = (self.comm_size*2, 3) # Doesn't make a copy - fast!
-            select = na.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
                 (self.points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             mypoints = self.points[select]
             if mypoints.size > 0:
                 # Get the fields values.
@@ -583,19 +583,19 @@
             # To run the functions, what is key is that the
             # second point in the pair is ours.
             second_points = self.points[:,3:]
-            select = na.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
                 (second_points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             if select.any():
                 points_to_eval = self.points[select]
                 fields_to_eval = self.fields_vals[select]
                 
                 # Find the normal vector between our points.
-                vec = na.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
-                norm = na.sqrt(na.sum(na.multiply(vec,vec), axis=1))
+                vec = np.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
+                norm = np.sqrt(np.sum(np.multiply(vec,vec), axis=1))
                 # I wish there was a better way to do this, but I can't find it.
                 for i, n in enumerate(norm):
-                    vec[i] = na.divide(vec[i], n)
+                    vec[i] = np.divide(vec[i], n)
                 
                 # Now evaluate the functions.
                 for fcn_set in self._fsets:
@@ -604,7 +604,7 @@
                     fcn_set._bin_results(length, fcn_results)
                 
                 # Now clear the buffers at the processed points.
-                self.points[select] = na.array([-1.]*6, dtype='float64')
+                self.points[select] = np.array([-1.]*6, dtype='float64')
                 
             else:
                 # We didn't clear any points, so we should move on with our
@@ -712,8 +712,8 @@
         self.corr_norm = corr_norm # A number used to normalize a correlation function.
         # These below are used to track how many times the function returns
         # unbinned results.
-        self.too_low = na.zeros(len(self.out_labels), dtype='int32')
-        self.too_high = na.zeros(len(self.out_labels), dtype='int32')
+        self.too_low = np.zeros(len(self.out_labels), dtype='int32')
+        self.too_high = np.zeros(len(self.out_labels), dtype='int32')
         
     def set_pdf_params(self, bin_type="lin", bin_number=1000, bin_range=None):
         r"""Set the parameters used to build the Probability Distribution Function
@@ -772,14 +772,14 @@
             bin_type, bin_number = [bin_type], [bin_number]
             bin_range = [bin_range]
         self.bin_type = bin_type
-        self.bin_number = na.array(bin_number) - 1
+        self.bin_number = np.array(bin_number) - 1
         self.dims = range(len(bin_type))
         # Create the dict that stores the arrays to store the bin hits, and
         # the arrays themselves.
         self.length_bin_hits = {}
         for length in self.tpf.lengths:
             # It's easier to index flattened, but will be unflattened later.
-            self.length_bin_hits[length] = na.zeros(self.bin_number,
+            self.length_bin_hits[length] = np.zeros(self.bin_number,
                 dtype='int64').flatten()
         # Create the bin edges for each dimension.
         # self.bins is indexed by dimension
@@ -792,10 +792,10 @@
                 raise ValueError("bin_range[1] must be larger than bin_range[0]")
             # Make the edges for this dimension.
             if bin_type[dim] == "lin":
-                self.bin_edges[dim] = na.linspace(bin_range[dim][0], bin_range[dim][1],
+                self.bin_edges[dim] = np.linspace(bin_range[dim][0], bin_range[dim][1],
                     bin_number[dim])
             elif bin_type[dim] == "log":
-                self.bin_edges[dim] = na.logspace(math.log10(bin_range[dim][0]),
+                self.bin_edges[dim] = np.logspace(math.log10(bin_range[dim][0]),
                     math.log10(bin_range[dim][1]), bin_number[dim])
             else:
                 raise SyntaxError("bin_edges is either \"lin\" or \"log\".")
@@ -822,32 +822,32 @@
         is flattened, so we need to figure out the offset for this hit by
         factoring the sizes of the other dimensions.
         """
-        hit_bin = na.zeros(results.shape[0], dtype='int64')
+        hit_bin = np.zeros(results.shape[0], dtype='int64')
         multi = 1
-        good = na.ones(results.shape[0], dtype='bool')
+        good = np.ones(results.shape[0], dtype='bool')
         for dim in range(len(self.out_labels)):
             for d1 in range(dim):
                 multi *= self.bin_edges[d1].size
             if dim == 0 and len(self.out_labels)==1:
                 try:
-                    digi = na.digitize(results, self.bin_edges[dim])
+                    digi = np.digitize(results, self.bin_edges[dim])
                 except ValueError:
                     # The user probably did something like 
                     # return a * b rather than
                     # return a[0] * b[0], which will only happen
                     # for single field functions.
-                    digi = na.digitize(results[0], self.bin_edges[dim])
+                    digi = np.digitize(results[0], self.bin_edges[dim])
             else:
-                digi = na.digitize(results[:,dim], self.bin_edges[dim])
+                digi = np.digitize(results[:,dim], self.bin_edges[dim])
             too_low = (digi == 0)
             too_high = (digi == self.bin_edges[dim].size)
             self.too_low[dim] += (too_low).sum()
             self.too_high[dim] += (too_high).sum()
-            newgood = na.bitwise_and(na.invert(too_low), na.invert(too_high))
-            good = na.bitwise_and(good, newgood)
-            hit_bin += na.multiply((digi - 1), multi)
-        digi_bins = na.arange(self.length_bin_hits[length].size+1)
-        hist, digi_bins = na.histogram(hit_bin[good], digi_bins)
+            newgood = np.bitwise_and(np.invert(too_low), np.invert(too_high))
+            good = np.bitwise_and(good, newgood)
+            hit_bin += np.multiply((digi - 1), multi)
+        digi_bins = np.arange(self.length_bin_hits[length].size+1)
+        hist, digi_bins = np.histogram(hit_bin[good], digi_bins)
         self.length_bin_hits[length] += hist
 
     def _dim_sum(self, a, dim):
@@ -855,11 +855,11 @@
         Given a multidimensional array a, this finds the sum over all the
         elements leaving the dimension dim untouched.
         """
-        dims = na.arange(len(a.shape))
-        dims = na.flipud(dims)
+        dims = np.arange(len(a.shape))
+        dims = np.flipud(dims)
         gt_dims = dims[dims > dim]
         lt_dims = dims[dims < dim]
-        iter_dims = na.concatenate((gt_dims, lt_dims))
+        iter_dims = np.concatenate((gt_dims, lt_dims))
         for this_dim in iter_dims:
             a = a.sum(axis=this_dim)
         return a
@@ -882,6 +882,6 @@
         """
         xi = {}
         for length in self.tpf.lengths:
-            xi[length] = -1 + na.sum(self.length_bin_hits[length] * \
+            xi[length] = -1 + np.sum(self.length_bin_hits[length] * \
                 self.bin_edges[0][:-1]) / self.corr_norm
         return xi


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import numpy.core.records as rec
 
 # Now define convenience functions
@@ -41,5 +41,5 @@
     """
     blanks = []
     for atype in desc['formats']:
-        blanks.append(na.zeros(elements, dtype=atype))
+        blanks.append(np.zeros(elements, dtype=atype))
     return rec.fromarrays(blanks, **desc)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -38,6 +38,7 @@
     inline = 'False',
     numthreads = '-1',
     __withinreason = 'False',
+    __withintesting = 'False',
     __parallel = 'False',
     __global_parallel_rank = '0',
     __global_parallel_size = '1',
@@ -51,6 +52,7 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
+    reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -24,7 +24,7 @@
 """
 
 import glob
-import numpy as na
+import numpy as np
 import os, os.path, inspect, types
 from functools import wraps
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -65,6 +65,9 @@
     quantity_info, \
     add_quantity
 
+from image_array import \
+    ImageArray
+
 from field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -29,7 +29,7 @@
 
 data_object_registry = {}
 
-import numpy as na
+import numpy as np
 import math
 import weakref
 import exceptions
@@ -74,9 +74,9 @@
         return item
     except AttributeError:
         if item:
-            return na.ones(shape, dtype='bool')
+            return np.ones(shape, dtype='bool')
         else:
-            return na.zeros(shape, dtype='bool')
+            return np.zeros(shape, dtype='bool')
 
 def restore_grid_state(func):
     """
@@ -181,13 +181,13 @@
         if field not in self.field_data.keys():
             if field == "RadiusCode":
                 center = self.field_parameters['center']
-                tempx = na.abs(self['x'] - center[0])
-                tempx = na.minimum(tempx, self.DW[0] - tempx)
-                tempy = na.abs(self['y'] - center[1])
-                tempy = na.minimum(tempy, self.DW[1] - tempy)
-                tempz = na.abs(self['z'] - center[2])
-                tempz = na.minimum(tempz, self.DW[2] - tempz)
-                tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
+                tempx = np.abs(self['x'] - center[0])
+                tempx = np.minimum(tempx, self.DW[0] - tempx)
+                tempy = np.abs(self['y'] - center[1])
+                tempy = np.minimum(tempy, self.DW[1] - tempy)
+                tempz = np.abs(self['z'] - center[2])
+                tempz = np.minimum(tempz, self.DW[2] - tempz)
+                tr = np.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
             else:
                 raise KeyError(field)
         else: tr = self.field_data[field]
@@ -235,14 +235,14 @@
             self.set_field_parameter(key, val)
 
     def __set_default_field_parameters(self):
-        self.set_field_parameter("center",na.zeros(3,dtype='float64'))
-        self.set_field_parameter("bulk_velocity",na.zeros(3,dtype='float64'))
+        self.set_field_parameter("center",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
             pass
-        elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
-            center = na.array(center)
+        elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
+            center = np.array(center)
         elif center in ("c", "center"):
             center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
@@ -250,7 +250,7 @@
         elif center.startswith("max_"):
             center = self.pf.h.find_max(center[4:])[1]
         else:
-            center = na.array(center, dtype='float64')
+            center = np.array(center, dtype='float64')
         self.center = center
         self.set_field_parameter('center', center)
 
@@ -376,7 +376,7 @@
         field_order += [field for field in fields if field not in field_order]
         fid = open(filename,"w")
         fid.write("\t".join(["#"] + field_order + ["\n"]))
-        field_data = na.array([self.field_data[field] for field in field_order])
+        field_data = np.array([self.field_data[field] for field in field_order])
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -421,11 +421,11 @@
         return grids
 
     def select_grid_indices(self, level):
-        return na.where(self.grid_levels == level)
+        return np.where(self.grid_levels == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
-            self.__grid_left_edge = na.array([g.LeftEdge for g in self._grids])
+            self.__grid_left_edge = np.array([g.LeftEdge for g in self._grids])
         return self.__grid_left_edge
 
     def __del_grid_left_edge(self):
@@ -441,7 +441,7 @@
 
     def __get_grid_right_edge(self):
         if self.__grid_right_edge == None:
-            self.__grid_right_edge = na.array([g.RightEdge for g in self._grids])
+            self.__grid_right_edge = np.array([g.RightEdge for g in self._grids])
         return self.__grid_right_edge
 
     def __del_grid_right_edge(self):
@@ -457,7 +457,7 @@
 
     def __get_grid_levels(self):
         if self.__grid_levels == None:
-            self.__grid_levels = na.array([g.Level for g in self._grids])
+            self.__grid_levels = np.array([g.Level for g in self._grids])
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +474,7 @@
 
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
-            self.__grid_dimensions = na.array([g.ActiveDimensions for g in self._grids])
+            self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
         return self.__grid_dimensions
 
     def __del_grid_dimensions(self):
@@ -516,13 +516,13 @@
             if field not in self.hierarchy.field_list and not in_grids:
                 if field not in ("dts", "t") and self._generate_field(field):
                     continue # True means we already assigned it
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
             if not self.field_data.has_key(field):
                 continue
             if self._sortkey is None:
-                self._sortkey = na.argsort(self[self.sort_by])
+                self._sortkey = np.argsort(self[self.sort_by])
             # We *always* sort the field here if we have not successfully
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
@@ -581,7 +581,7 @@
 
     def _get_list_of_grids(self):
         # This bugs me, but we will give the tie to the LeftEdge
-        y = na.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
+        y = np.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
                     & (self.px < self.pf.hierarchy.grid_right_edge[:,self.px_ax])
                     & (self.py >=  self.pf.hierarchy.grid_left_edge[:,self.py_ax])
                     & (self.py < self.pf.hierarchy.grid_right_edge[:,self.py_ax]))
@@ -604,10 +604,10 @@
         else:
             sl = self._cut_masks[grid.id]
         if not iterable(grid[field]):
-            gf = grid[field] * na.ones(grid.child_mask[sl].shape)
+            gf = grid[field] * np.ones(grid.child_mask[sl].shape)
         else:
             gf = grid[field][sl]
-        return gf[na.where(grid.child_mask[sl])]
+        return gf[np.where(grid.child_mask[sl])]
 
 class AMRRayBase(AMR1DData):
     _type_name = "ray"
@@ -646,10 +646,10 @@
         >>> print ray["Density"], ray["t"], ray["dts"]
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
-        self.start_point = na.array(start_point, dtype='float64')
-        self.end_point = na.array(end_point, dtype='float64')
+        self.start_point = np.array(start_point, dtype='float64')
+        self.end_point = np.array(end_point, dtype='float64')
         self.vec = self.end_point - self.start_point
-        #self.vec /= na.sqrt(na.dot(self.vec, self.vec))
+        #self.vec /= np.sqrt(np.dot(self.vec, self.vec))
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
         self._dts, self._ts = {}, {}
@@ -659,7 +659,7 @@
         # Get the value of the line at each LeftEdge and RightEdge
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        p = na.zeros(self.pf.h.num_grids, dtype='bool')
+        p = np.zeros(self.pf.h.num_grids, dtype='bool')
         # Check left faces first
         for i in range(3):
             i1 = (i+1) % 3
@@ -670,10 +670,10 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( na.all( LE <= self.start_point, axis=1 ) 
-                & na.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( na.all( LE <= self.end_point,   axis=1 ) 
-                & na.all( RE >= self.end_point,   axis=1 ) )
+        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+                & np.all( RE >= self.start_point, axis=1 ) )
+        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+                & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
     def _get_line_at_coord(self, v, index):
@@ -684,24 +684,24 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
         gf = grid[field]
         if not iterable(gf):
-            gf = gf * na.ones(grid.child_mask.shape)
+            gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
                        grid.dds, self.center, self.vec)
-        self._dts[grid.id] = na.abs(dts)
-        self._ts[grid.id] = na.abs(ts)
+        self._dts[grid.id] = np.abs(dts)
+        self._ts[grid.id] = np.abs(ts)
         return mask
 
 class AMRStreamlineBase(AMR1DData):
@@ -745,11 +745,11 @@
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
         self.positions = positions
-        self.dts = na.empty_like(positions[:,0])
-        self.dts[:-1] = na.sqrt(na.sum((self.positions[1:]-
+        self.dts = np.empty_like(positions[:,0])
+        self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-1]
-        self.ts = na.add.accumulate(self.dts)
+        self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
         self._dts, self._ts = {}, {}
@@ -760,14 +760,14 @@
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
         # Check left faces first
-        min_streampoint = na.min(self.positions, axis=0)
-        max_streampoint = na.max(self.positions, axis=0)
-        p = na.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
+        min_streampoint = np.min(self.positions, axis=0)
+        max_streampoint = np.max(self.positions, axis=0)
+        p = np.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
         self._grids = self.hierarchy.grids[p]
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
@@ -775,13 +775,13 @@
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
-        points_in_grid = na.all(self.positions > grid.LeftEdge, axis=1) & \
-                         na.all(self.positions <= grid.RightEdge, axis=1) 
-        pids = na.where(points_in_grid)[0]
+        points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
+                         np.all(self.positions <= grid.RightEdge, axis=1) 
+        pids = np.where(points_in_grid)[0]
         for i, pos in zip(pids, self.positions[points_in_grid]):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
@@ -842,8 +842,8 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = na.array([])
-            else: data = na.concatenate(data)
+            if len(data) == 0: data = np.array([])
+            else: data = np.concatenate(data)
             temp_data[field] = data
             # Now the next field can use this field
             self[field] = temp_data[field] 
@@ -855,6 +855,22 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
+    def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
+        axis = self.axis
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+                         plot_type=plot_type)
+        pw.set_axes_unit(axes_unit)
+        return pw
+
     def to_frb(self, width, resolution, center=None, height=None):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
@@ -891,7 +907,7 @@
 
         >>> proj = pf.h.proj(0, "Density")
         >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_100kpc.png')
         """
         if center is None:
             center = self.get_field_parameter("center")
@@ -916,26 +932,6 @@
         frb = FixedResolutionBuffer(self, bounds, resolution)
         return frb
 
-    def to_pw(self):
-        r"""Create a :class:`~yt.visualization.plot_window.PlotWindow` from this
-        object.
-
-        This is a bare-bones mechanism of creating a plot window from this
-        object, which can then be moved around, zoomed, and on and on.  All
-        behavior of the plot window is relegated to that routine.
-        """
-        axis = self.axis
-        center = self.get_field_parameter("center")
-        if center is None:
-            center = (self.pf.domain_right_edge
-                    + self.pf.domain_left_edge)/2.0
-        width = (1.0, 'unitary')
-        from yt.visualization.plot_window import \
-            PWViewerMPL, GetBoundsAndCenter
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
-        pw = PWViewerMPL(self, bounds)
-        return pw
-
     def interpolate_discretize(self, LE, RE, field, side, log_spacing=True):
         """
         This returns a uniform grid of points between *LE* and *RE*,
@@ -944,11 +940,11 @@
         """
         import yt.utilities.delaunay as de
         if log_spacing:
-            zz = na.log10(self[field])
+            zz = np.log10(self[field])
         else:
             zz = self[field]
-        xi, yi = na.array( \
-                 na.mgrid[LE[0]:RE[0]:side*1j, \
+        xi, yi = np.array( \
+                 np.mgrid[LE[0]:RE[0]:side*1j, \
                           LE[1]:RE[1]:side*1j], 'float64')
         zi = de.Triangulation(self['px'],self['py']).nn_interpolator(zz)\
                  [LE[0]:RE[0]:side*1j, \
@@ -1082,7 +1078,7 @@
             points = None
             t = self.comm.par_combine_object(None, datatype="array", op="cat")
         else:
-            points = na.concatenate(points)
+            points = np.concatenate(points)
             # We have to transpose here so that _par_combine_object works
             # properly, as it and the alltoall assume the long axis is the last
             # one.
@@ -1124,27 +1120,27 @@
         nx = grid.child_mask.shape[xaxis]
         ny = grid.child_mask.shape[yaxis]
         mask = self.__cut_mask_child_mask(grid)[sl]
-        cm = na.where(mask.ravel()== 1)
-        cmI = na.indices((nx,ny))
+        cm = np.where(mask.ravel()== 1)
+        cmI = np.indices((nx,ny))
         ind = cmI[0, :].ravel()   # xind
         npoints = cm[0].shape
         # create array of "npoints" ones that will be reused later
-        points = na.ones(npoints, 'float64')
+        points = np.ones(npoints, 'float64')
         # calculate xpoints array
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
         del cmI   # no longer needed 
-        t = na.vstack( (t, points * ind[cm] * dy + \
+        t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
         del ind, cm   # no longer needed
         # calculate zpoints array
-        t = na.vstack((t, points * self.coord))
+        t = np.vstack((t, points * self.coord))
         # calculate dx array
-        t = na.vstack((t, points * dx * 0.5))
+        t = np.vstack((t, points * dx * 0.5))
         # calculate dy array
-        t = na.vstack((t, points * dy * 0.5))
+        t = np.vstack((t, points * dy * 0.5))
         # return [xpoints, ypoints, zpoints, dx, dy] as (5, npoints) array
         return t.swapaxes(0, 1)
 
@@ -1169,7 +1165,7 @@
             dv = self.hierarchy.io._read_data_slice(grid, field, self.axis, sl_ind) * conv_factor
         else:
             dv = grid[field]
-            if dv.size == 1: dv = na.ones(grid.ActiveDimensions)*dv
+            if dv.size == 1: dv = np.ones(grid.ActiveDimensions)*dv
             dv = dv[sl]
         mask = self.__cut_mask_child_mask(grid)[sl]
         dataVals = dv.ravel()[mask.ravel() == 1]
@@ -1193,6 +1189,18 @@
     def hub_upload(self):
         self._mrep.upload()
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Slice')
+        return pw
+
 class AMRCuttingPlaneBase(AMR2DData):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -1251,11 +1259,11 @@
         # ax + by + cz + d = 0
         self.orienter = Orientation(normal, north_vector = north_vector)
         self._norm_vec = self.orienter.normal_vector
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._x_vec = self.orienter.unit_vectors[0]
         self._y_vec = self.orienter.unit_vectors[1]
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
@@ -1276,7 +1284,7 @@
         # @todo: Convert to using corners
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        vertices = na.array([[LE[:,0],LE[:,1],LE[:,2]],
+        vertices = np.array([[LE[:,0],LE[:,1],LE[:,2]],
                              [RE[:,0],RE[:,1],RE[:,2]],
                              [LE[:,0],LE[:,1],RE[:,2]],
                              [RE[:,0],RE[:,1],LE[:,2]],
@@ -1285,27 +1293,27 @@
                              [LE[:,0],RE[:,1],LE[:,2]],
                              [RE[:,0],LE[:,1],RE[:,2]]])
         # This gives us shape: 8, 3, n_grid
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
         self.D = D
         self._grids = self.hierarchy.grids[
-            na.where(na.logical_not(na.all(D<0,axis=0) | na.all(D>0,axis=0) )) ]
+            np.where(np.logical_not(np.all(D<0,axis=0) | np.all(D>0,axis=0) )) ]
 
     @cache_mask
     def _get_cut_mask(self, grid):
         # This is slow.  Suggestions for improvement would be great...
         ss = grid.ActiveDimensions
-        D = na.ones(ss) * self._d
+        D = np.ones(ss) * self._d
         x = grid.LeftEdge[0] + grid.dds[0] * \
-                (na.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
         y = grid.LeftEdge[1] + grid.dds[1] * \
-                (na.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
         z = grid.LeftEdge[2] + grid.dds[2] * \
-                (na.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
         D += (x * self._norm_vec[0]).reshape(ss[0],1,1)
         D += (y * self._norm_vec[1]).reshape(1,ss[1],1)
         D += (z * self._norm_vec[2]).reshape(1,1,ss[2])
-        diag_dist = na.sqrt(na.sum(grid.dds**2.0))
-        cm = (na.abs(D) <= 0.5*diag_dist) # Boolean
+        diag_dist = np.sqrt(np.sum(grid.dds**2.0))
+        cm = (np.abs(D) <= 0.5*diag_dist) # Boolean
         return cm
 
     def _generate_coords(self):
@@ -1313,12 +1321,12 @@
         for grid in self._get_grids():
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
-        else: points = na.concatenate(points)
+        else: points = np.concatenate(points)
         t = self.comm.par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
-        self['px'] = na.dot(pos, self._x_vec)
-        self['py'] = na.dot(pos, self._y_vec)
-        self['pz'] = na.dot(pos, self._norm_vec)
+        self['px'] = np.dot(pos, self._x_vec)
+        self['py'] = np.dot(pos, self._y_vec)
+        self['pz'] = np.dot(pos, self._norm_vec)
         self['pdx'] = t[:,3] * 0.5
         self['pdy'] = t[:,3] * 0.5
         self['pdz'] = t[:,3] * 0.5
@@ -1326,14 +1334,14 @@
     def _generate_grid_coords(self, grid):
         pointI = self._get_point_indices(grid)
         coords = [grid[ax][pointI].ravel() for ax in 'xyz']
-        coords.append(na.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
-        return na.array(coords).swapaxes(0,1)
+        coords.append(np.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
+        return np.array(coords).swapaxes(0,1)
 
     def _get_data_from_grid(self, grid, field):
         if not self.pf.field_info[field].particle_type:
             pointI = self._get_point_indices(grid)
             if grid[field].size == 1: # dx, dy, dz, cellvolume
-                t = grid[field] * na.ones(grid.ActiveDimensions)
+                t = grid[field] * np.ones(grid.ActiveDimensions)
                 return t[pointI].ravel()
             return grid[field][pointI].ravel()
         else:
@@ -1344,10 +1352,10 @@
 
     @cache_point_indices
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _gen_node_name(self):
         cen_name = ("%s" % (self.center,)).replace(" ","_")[1:-1]
@@ -1355,6 +1363,30 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        normal = self.normal
+        center = self.center
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetOffAxisBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        (bounds, center_rot) = GetOffAxisBoundsAndCenter(normal, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
+                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw.set_axes_unit(axes_unit)
+        return pw
+
     def to_frb(self, width, resolution, height=None):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
@@ -1391,7 +1423,7 @@
         >>> L = sp.quantities["AngularMomentumVector"]()
         >>> cutting = pf.h.cutting(L, c)
         >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
             w, u = width
@@ -1435,34 +1467,34 @@
         self.width = width
         self.dims = dims
         self.dds = self.width / self.dims
-        self.bounds = na.array([0.0,1.0,0.0,1.0])
+        self.bounds = np.array([0.0,1.0,0.0,1.0])
         
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
 
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         if node_name is False:
             self._refresh_data()
@@ -1479,11 +1511,11 @@
         # within width/2 of the center.
         vertices = self.hierarchy.gridCorners
         # Shape = (8,3,n_grid)
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        valid_grids = na.where(na.logical_not(na.all(D<0,axis=0) |
-                                              na.all(D>0,axis=0) ))[0]
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        valid_grids = np.where(np.logical_not(np.all(D<0,axis=0) |
+                                              np.all(D>0,axis=0) ))[0]
         # Now restrict these grids to a rect. prism that bounds the slice
-        sliceCorners = na.array([ \
+        sliceCorners = np.array([ \
             self.center + 0.5*self.width * (+self._x_vec + self._y_vec),
             self.center + 0.5*self.width * (+self._x_vec - self._y_vec),
             self.center + 0.5*self.width * (-self._x_vec - self._y_vec),
@@ -1491,12 +1523,12 @@
         sliceLeftEdge = sliceCorners.min(axis=0)
         sliceRightEdge = sliceCorners.max(axis=0)
         # Check for bounding box and grid overlap
-        leftOverlap = na.less(self.hierarchy.gridLeftEdge[valid_grids],
+        leftOverlap = np.less(self.hierarchy.gridLeftEdge[valid_grids],
                               sliceRightEdge).all(axis=1)
-        rightOverlap = na.greater(self.hierarchy.gridRightEdge[valid_grids],
+        rightOverlap = np.greater(self.hierarchy.gridRightEdge[valid_grids],
                                   sliceLeftEdge).all(axis=1)
         self._grids = self.hierarchy.grids[valid_grids[
-            na.where(leftOverlap & rightOverlap)]]
+            np.where(leftOverlap & rightOverlap)]]
         self._grids = self._grids[::-1]
 
     def _generate_coords(self):
@@ -1512,7 +1544,7 @@
             pointI = self._get_point_indices(grid)
             if len(pointI) == 0: return
             vc = self._calc_vertex_centered_data(grid, field)
-            bds = na.array(zip(grid.LeftEdge,
+            bds = np.array(zip(grid.LeftEdge,
                                grid.RightEdge)).ravel()
             interp = TrilinearFieldInterpolator(vc, bds, ['x', 'y', 'z'])
             self[field][pointI] = interp( \
@@ -1538,27 +1570,27 @@
         self.width = width
         self.dds = self.width / self.dims
         self.set_field_parameter('center', center)
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
 
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         self._refresh_data()
         return
@@ -1584,7 +1616,7 @@
                     continue # A "True" return means we did it
             if not self._vc_data.has_key(field):
                 self._vc_data[field] = {}
-            self[field] = na.zeros(_size, dtype='float64')
+            self[field] = np.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
             self[field] = self.comm.mpi_allreduce(\
@@ -1686,9 +1718,9 @@
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
         self.proj_style = style
         if style == "mip":
-            self.func = na.max
+            self.func = np.max
         elif style == "integrate":
-            self.func = na.sum # for the future
+            self.func = np.sum # for the future
         else:
             raise NotImplementedError(style)
         self.weight_field = weight_field
@@ -1743,7 +1775,7 @@
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
-        return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
+        return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
                         style = self.proj_style)
 
     def _get_dls(self, grid, fields):
@@ -1755,13 +1787,25 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        dls = na.array(dls)
-        convs = na.array(convs)
+        dls = np.array(dls)
+        convs = np.array(convs)
         if self.proj_style == "mip":
             dls[:] = 1.0
             convs[:] = 1.0
         return dls, convs
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
     def get_data(self, fields = None):
         if fields is None: fields = ensure_list(self.fields)[:]
         else: fields = ensure_list(fields)
@@ -1822,14 +1866,14 @@
                 ds = gs[0].dds[0]
             else:
                 ds = 0.0
-            dxs.append(na.ones(nvals.shape[0], dtype='float64') * ds)
-        coord_data = na.concatenate(coord_data, axis=0).transpose()
-        field_data = na.concatenate(field_data, axis=0).transpose()
+            dxs.append(np.ones(nvals.shape[0], dtype='float64') * ds)
+        coord_data = np.concatenate(coord_data, axis=0).transpose()
+        field_data = np.concatenate(field_data, axis=0).transpose()
         if self._weight is None:
             dls, convs = self._get_dls(self._grids[0], fields)
             field_data *= convs[:,None]
-        weight_data = na.concatenate(weight_data, axis=0).transpose()
-        dxs = na.concatenate(dxs, axis=0).transpose()
+        weight_data = np.concatenate(weight_data, axis=0).transpose()
+        dxs = np.concatenate(dxs, axis=0).transpose()
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = dxs
@@ -1843,7 +1887,7 @@
         data['pdy'] = data['pdx'] # generalization is out the window!
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -1853,7 +1897,7 @@
     def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
         # We build up the fields to add
         if self._weight is None or fields is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -1873,16 +1917,16 @@
         weight_proj = self.func(weight_data, axis=self.axis) * wdl
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.logical_or.reduce(used_data, self.axis)
+            used_points = np.logical_or.reduce(used_data, self.axis)
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         xind, yind = [arr[used_points].ravel()
-                      for arr in na.indices(full_proj[0].shape)]
+                      for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
-        to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
+        to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
         tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
                     to_add, weight_proj[used_points].ravel())
 
@@ -1894,8 +1938,8 @@
         if len(grids_to_initialize) == 0: return
         pbar = get_pbar('Initializing tree % 2i / % 2i' \
                           % (level, self._max_level), len(grids_to_initialize))
-        start_index = na.empty(2, dtype="int64")
-        dims = na.empty(2, dtype="int64")
+        start_index = np.empty(2, dtype="int64")
+        dims = np.empty(2, dtype="int64")
         xax = x_dict[self.axis]
         yax = y_dict[self.axis]
         for pi, grid in enumerate(grids_to_initialize):
@@ -1920,7 +1964,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2024,7 +2068,7 @@
         self._max_level = max_level
         self._weight = weight_field
         self.preload_style = preload_style
-        self.func = na.sum # for the future
+        self.func = np.sum # for the future
         self.__retval_coords = {}
         self.__retval_fields = {}
         self.__retval_coarse = {}
@@ -2083,7 +2127,7 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        return na.array(dls), na.array(convs)
+        return np.array(dls), np.array(convs)
 
     def __project_level(self, level, fields):
         grids_to_project = self.source.select_grids(level)
@@ -2112,12 +2156,12 @@
             field_data.append([pi[fine] for pi in self.__retval_fields[grid.id]])
             self.__retval_coords[grid.id] = [pi[coarse] for pi in self.__retval_coords[grid.id]]
             self.__retval_fields[grid.id] = [pi[coarse] for pi in self.__retval_fields[grid.id]]
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
         if self._weight is not None:
             field_data = field_data / coord_data[3,:].reshape((1,coord_data.shape[1]))
         else:
-            field_data *= convs[...,na.newaxis]
+            field_data *= convs[...,np.newaxis]
         mylog.info("Level %s done: %s final", \
                    level, coord_data.shape[1])
         pdx = grids_to_project[0].dds[x_dict[self.axis]] # this is our dl
@@ -2142,7 +2186,7 @@
                 args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
                 args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
                 args.append(1) # Refinement factor
-                args.append(na.ones(args[0].shape, dtype='int64'))
+                args.append(np.ones(args[0].shape, dtype='int64'))
                 kk = CombineGrids(*args)
                 goodI = args[-1].astype('bool')
                 self.__retval_coords[grid2.id] = \
@@ -2169,8 +2213,8 @@
                     # that this complicated rounding is because sometimes
                     # epsilon differences in dds between the grids causes this
                     # to round to up or down from the expected value.
-                    args.append(int(na.rint(grid2.dds / grid1.dds)[0]))
-                    args.append(na.ones(args[0].shape, dtype='int64'))
+                    args.append(int(np.rint(grid2.dds / grid1.dds)[0]))
+                    args.append(np.ones(args[0].shape, dtype='int64'))
                     kk = CombineGrids(*args)
                     goodI = args[-1].astype('bool')
                     self.__retval_coords[grid2.id] = \
@@ -2213,8 +2257,8 @@
                 self.__project_level(level, fields)
             coord_data.append(my_coords)
             field_data.append(my_fields)
-            pdxs.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
-            pdys.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
+            pdxs.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
+            pdys.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
             if self._check_region and False:
                 check=self.__cleanup_level(level - 1)
                 if len(check) > 0: all_data.append(check)
@@ -2225,10 +2269,10 @@
                 del self.__overlap_masks[grid.id]
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
-        pdxs = na.concatenate(pdxs, axis=1)
-        pdys = na.concatenate(pdys, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
+        pdxs = np.concatenate(pdxs, axis=1)
+        pdys = np.concatenate(pdys, axis=1)
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = pdxs; del pdxs
@@ -2244,7 +2288,7 @@
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
         data = self.comm.par_combine_object(data, datatype='dict', op='cat')
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -2254,13 +2298,25 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
     def _project_grid(self, grid, fields, zero_out):
         # We split this next bit into two sections to try to limit the IO load
         # on the system.  This way, we perserve grid state (@restore_grid_state
         # in _get_data_from_grid *and* we attempt not to load weight data
         # independently of the standard field data.
         if self._weight is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -2278,18 +2334,18 @@
         weight_proj = self.func(weight_data, axis=self.axis)
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.where(na.logical_or.reduce(used_data, self.axis))
+            used_points = np.where(np.logical_or.reduce(used_data, self.axis))
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         if zero_out:
-            subgrid_mask = na.logical_and.reduce(
-                                na.logical_or(grid.child_mask,
+            subgrid_mask = np.logical_and.reduce(
+                                np.logical_or(grid.child_mask,
                                              ~used_data),
                                 self.axis).astype('int64')
         else:
-            subgrid_mask = na.ones(full_proj[0].shape, dtype='int64')
-        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj[0].shape)]
+            subgrid_mask = np.ones(full_proj[0].shape, dtype='int64')
+        xind, yind = [arr[used_points].ravel() for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
@@ -2300,7 +2356,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2367,30 +2423,30 @@
         >>> print fproj["Density"]
         """
         AMR2DData.__init__(self, axis, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.dims = na.array([dims]*2)
-        self.ActiveDimensions = na.array([dims]*3, dtype='int32')
+        self.dims = np.array([dims]*2)
+        self.ActiveDimensions = np.array([dims]*3, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
-        self.global_startindex = na.rint((self.left_edge - self.pf.domain_left_edge)
+        self.global_startindex = np.rint((self.left_edge - self.pf.domain_left_edge)
                                          /self.dds).astype('int64')
         self._dls = {}
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        if na.any(self.left_edge < self.pf.domain_left_edge) or \
-           na.any(self.right_edge > self.pf.domain_right_edge):
+        if np.any(self.left_edge < self.pf.domain_left_edge) or \
+           np.any(self.right_edge > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids(
                             self.left_edge, self.right_edge)
         else:
             grids,ind = self.pf.hierarchy.get_box_grids(
                             self.left_edge, self.right_edge)
         level_ind = (self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
         self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
 
     def _generate_coords(self):
@@ -2398,9 +2454,9 @@
         yax = y_dict[self.axis]
         ci = self.left_edge + self.dds*0.5
         cf = self.left_edge + self.dds*(self.ActiveDimensions-0.5)
-        cx = na.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
-        cy = na.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
-        blank = na.ones( (self.ActiveDimensions[xax],
+        cx = np.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
+        cy = np.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
+        blank = np.ones( (self.ActiveDimensions[xax],
                           self.ActiveDimensions[yax]), dtype='float64')
         self['px'] = cx[None,:] * blank
         self['py'] = cx[:,None] * blank
@@ -2422,7 +2478,7 @@
         if len(fields_to_get) == 0: return
         temp_data = {}
         for field in fields_to_get:
-            self[field] = na.zeros(self.dims, dtype='float64')
+            self[field] = np.zeros(self.dims, dtype='float64')
         dls = self.__setup_dls(fields_to_get)
         for i,grid in enumerate(self._get_grids()):
             mylog.debug("Getting fields from %s", i)
@@ -2483,10 +2539,10 @@
             if ( (i%100) == 0):
                 mylog.info("Working on % 7i / % 7i", i, len(self._grids))
             grid.set_field_parameter("center", self.center)
-            points.append((na.ones(
+            points.append((np.ones(
                 grid.ActiveDimensions,dtype='float64')*grid['dx'])\
                     [self._get_point_indices(grid)])
-            t = na.concatenate([t,points])
+            t = np.concatenate([t,points])
             del points
         self['dx'] = t
         #self['dy'] = t
@@ -2496,8 +2552,8 @@
     @restore_grid_state
     def _generate_grid_coords(self, grid, field=None):
         pointI = self._get_point_indices(grid)
-        dx = na.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
-        tr = na.array([grid['x'][pointI].ravel(), \
+        dx = np.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
+        tr = np.array([grid['x'][pointI].ravel(), \
                 grid['y'][pointI].ravel(), \
                 grid['z'][pointI].ravel(), \
                 grid["RadiusCode"][pointI].ravel(),
@@ -2533,7 +2589,7 @@
                 if self._generate_field(field):
                     continue # True means we already assigned it
             mylog.info("Getting field %s from %s", field, len(self._grids))
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
         for field in fields_to_get:
@@ -2545,21 +2601,21 @@
     def _get_data_from_grid(self, grid, field):
         if field in self.pf.field_info and self.pf.field_info[field].particle_type:
             # int64 -> float64 with the first real set of data
-            if grid.NumberOfParticles == 0: return na.array([], dtype='int64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='int64')
             pointI = self._get_particle_indices(grid)
             if self.pf.field_info[field].vector_field:
                 f = grid[field]
-                return na.array([f[i,:][pointI] for i in range(3)])
+                return np.array([f[i,:][pointI] for i in range(3)])
             if self._is_fully_enclosed(grid): return grid[field].ravel()
             return grid[field][pointI].ravel()
         if field in self.pf.field_info and self.pf.field_info[field].vector_field:
             pointI = self._get_point_indices(grid)
             f = grid[field]
-            return na.array([f[i,:][pointI] for i in range(3)])
+            return np.array([f[i,:][pointI] for i in range(3)])
         else:
             tr = grid[field]
             if tr.size == 1: # dx, dy, dz, cellvolume
-                tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
+                tr = tr * np.ones(grid.ActiveDimensions, dtype='float64')
             if len(grid.Children) == 0 and grid.OverlappingSiblings is None \
                 and self._is_fully_enclosed(grid):
                 return tr.ravel()
@@ -2579,19 +2635,19 @@
             if grid.has_key(field):
                 new_field = grid[field]
             else:
-                new_field = na.ones(grid.ActiveDimensions, dtype=dtype) * default_val
+                new_field = np.ones(grid.ActiveDimensions, dtype=dtype) * default_val
             new_field[pointI] = self[field][i:i+np]
             grid[field] = new_field
             i += np
 
     def _is_fully_enclosed(self, grid):
-        return na.all(self._get_cut_mask)
+        return np.all(self._get_cut_mask)
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _get_cut_particle_mask(self, grid):
         if self._is_fully_enclosed(grid):
@@ -2600,9 +2656,9 @@
         return self._get_cut_mask(fake_grid)
 
     def _get_particle_indices(self, grid):
-        k = na.zeros(grid.NumberOfParticles, dtype='bool')
+        k = np.zeros(grid.NumberOfParticles, dtype='bool')
         k = (k | self._get_cut_particle_mask(grid))
-        return na.where(k)
+        return np.where(k)
 
     def cut_region(self, field_cuts):
         """
@@ -2705,16 +2761,16 @@
                 samples.append(svals)
             verts.append(my_verts)
         pb.finish()
-        verts = na.concatenate(verts).transpose()
+        verts = np.concatenate(verts).transpose()
         verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
         verts = verts.transpose()
         if sample_values is not None:
-            samples = na.concatenate(samples)
+            samples = np.concatenate(samples)
             samples = self.comm.par_combine_object(samples, op='cat',
                                 datatype='array')
         if rescale:
-            mi = na.min(verts, axis=0)
-            ma = na.max(verts, axis=0)
+            mi = np.min(verts, axis=0)
+            ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
             f = open(filename, "w")
@@ -2818,7 +2874,7 @@
         mask = self._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(field)
         if fluxing_field is None:
-            ff = na.ones(vals.shape, dtype="float64")
+            ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
         xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
@@ -2835,10 +2891,10 @@
         them to be plotted.
         """
         if log_space:
-            cons = na.logspace(na.log10(min_val),na.log10(max_val),
+            cons = np.logspace(np.log10(min_val),np.log10(max_val),
                                num_levels+1)
         else:
-            cons = na.linspace(min_val, max_val, num_levels+1)
+            cons = np.linspace(min_val, max_val, num_levels+1)
         contours = {}
         if cache: cached_fields = defaultdict(lambda: dict())
         else: cached_fields = None
@@ -2867,7 +2923,7 @@
         """
         for grid in self._grids:
             if default_value != None:
-                grid[field] = na.ones(grid.ActiveDimensions)*default_value
+                grid[field] = np.ones(grid.ActiveDimensions)*default_value
             grid[field][self._get_point_indices(grid)] = value
 
     _particle_handler = None
@@ -2951,36 +3007,36 @@
         grid_vals, xi, yi, zi = [], [], [], []
         for grid in self._base_region._grids:
             xit,yit,zit = self._base_region._get_point_indices(grid)
-            grid_vals.append(na.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
+            grid_vals.append(np.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
             xi.append(xit)
             yi.append(yit)
             zi.append(zit)
-        grid_vals = na.concatenate(grid_vals)[self._base_indices]
-        grid_order = na.argsort(grid_vals)
+        grid_vals = np.concatenate(grid_vals)[self._base_indices]
+        grid_order = np.argsort(grid_vals)
         # Note: grid_vals is still unordered
-        grid_ids = na.unique(grid_vals)
-        xi = na.concatenate(xi)[self._base_indices][grid_order]
-        yi = na.concatenate(yi)[self._base_indices][grid_order]
-        zi = na.concatenate(zi)[self._base_indices][grid_order]
-        bc = na.bincount(grid_vals)
+        grid_ids = np.unique(grid_vals)
+        xi = np.concatenate(xi)[self._base_indices][grid_order]
+        yi = np.concatenate(yi)[self._base_indices][grid_order]
+        zi = np.concatenate(zi)[self._base_indices][grid_order]
+        bc = np.bincount(grid_vals)
         splits = []
         for i,v in enumerate(bc):
             if v > 0: splits.append(v)
-        splits = na.add.accumulate(splits)
-        xis, yis, zis = [na.array_split(aa, splits) for aa in [xi,yi,zi]]
+        splits = np.add.accumulate(splits)
+        xis, yis, zis = [np.array_split(aa, splits) for aa in [xi,yi,zi]]
         self._indices = {}
         h = self._base_region.pf.h
         for grid_id, x, y, z in itertools.izip(grid_ids, xis, yis, zis):
             # grid_id needs no offset
             ll = h.grids[grid_id].ActiveDimensions.prod() \
-               - (na.logical_not(h.grids[grid_id].child_mask)).sum()
+               - (np.logical_not(h.grids[grid_id].child_mask)).sum()
             # This means we're completely enclosed, except for child masks
             if x.size == ll:
                 self._indices[grid_id] = None
             else:
                 # This will slow things down a bit, but conserve memory
                 self._indices[grid_id] = \
-                    na.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
+                    np.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
                 self._indices[grid_id][(x,y,z)] = True
         self._grids = h.grids[self._indices.keys()]
 
@@ -2992,16 +3048,16 @@
         return False
 
     def _get_cut_mask(self, grid):
-        cm = na.zeros(grid.ActiveDimensions, dtype='bool')
+        cm = np.zeros(grid.ActiveDimensions, dtype='bool')
         cm[self._get_point_indices(grid, False)] = True
         return cm
 
-    __empty_array = na.array([], dtype='bool')
+    __empty_array = np.array([], dtype='bool')
     def _get_point_indices(self, grid, use_child_mask=True):
         # Yeah, if it's not true, we don't care.
         tr = self._indices.get(grid.id-grid._id_offset, self.__empty_array)
-        if tr is None: tr = na.where(grid.child_mask)
-        else: tr = na.where(tr)
+        if tr is None: tr = np.where(grid.child_mask)
+        else: tr = np.where(tr)
         return tr
 
     def __repr__(self):
@@ -3018,7 +3074,7 @@
             grid = self.pf.h.grids[g]
             if g in other._indices and g in self._indices:
                 # We now join the indices
-                ind = na.zeros(grid.ActiveDimensions, dtype='bool')
+                ind = np.zeros(grid.ActiveDimensions, dtype='bool')
                 ind[self._indices[g]] = True
                 ind[other._indices[g]] = True
                 if ind.prod() == grid.ActiveDimensions.prod(): ind = None
@@ -3056,7 +3112,7 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        point_mask = na.ones(grid.ActiveDimensions, dtype='bool')
+        point_mask = np.ones(grid.ActiveDimensions, dtype='bool')
         point_mask *= self._base_region._get_cut_mask(grid)
         for cut in self._field_cuts:
             point_mask *= eval(cut)
@@ -3076,35 +3132,35 @@
         within the cylinder will be selected.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
-        self.set_field_parameter("height_vector", self._norm_vec)
+        self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
+        self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._refresh_data()
 
     def _get_list_of_grids(self):
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((self.pf.h.grid_corners -
+        D = np.sqrt(np.sum((self.pf.h.grid_corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
+        R = np.sqrt(D**2.0-H**2.0)
         self._grids = self.hierarchy.grids[
-            ( (na.any(na.abs(H)<self._height,axis=0))
-            & (na.any(R<self._radius,axis=0)
-            & (na.logical_not((na.all(H>0,axis=0) | (na.all(H<0, axis=0)))) )
+            ( (np.any(np.abs(H)<self._height,axis=0))
+            & (np.any(R<self._radius,axis=0)
+            & (np.logical_not((np.all(H>0,axis=0) | (np.all(H<0, axis=0)))) )
             ) ) ]
         self._grids = self.hierarchy.grids
 
     def _is_fully_enclosed(self, grid):
         corners = grid._corners.reshape((8,3,1))
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((corners -
+        D = np.sqrt(np.sum((corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
-        return (na.all(na.abs(H) < self._height, axis=0) \
-            and na.all(R < self._radius, axis=0))
+        R = np.sqrt(D**2.0-H**2.0)
+        return (np.all(np.abs(H) < self._height, axis=0) \
+            and np.all(R < self._radius, axis=0))
 
     @cache_mask
     def _get_cut_mask(self, grid):
@@ -3115,13 +3171,13 @@
               + grid['y'] * self._norm_vec[1] \
               + grid['z'] * self._norm_vec[2] \
               + self._d
-            d = na.sqrt(
+            d = np.sqrt(
                 (grid['x'] - self.center[0])**2.0
               + (grid['y'] - self.center[1])**2.0
               + (grid['z'] - self.center[2])**2.0
                 )
-            r = na.sqrt(d**2.0-h**2.0)
-            cm = ( (na.abs(h) <= self._height)
+            r = np.sqrt(d**2.0-h**2.0)
+            cm = ( (np.abs(h) <= self._height)
                  & (r <= self._radius))
         return cm
 
@@ -3138,8 +3194,8 @@
         describe the box.  No checks are done to ensure that the box satisfies
         a right-hand rule, but if it doesn't, behavior is undefined.
         """
-        self.origin = na.array(origin)
-        self.box_vectors = na.array(box_vectors, dtype='float64')
+        self.origin = np.array(origin)
+        self.box_vectors = np.array(box_vectors, dtype='float64')
         self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
         center = origin + 0.5*self.box_vectors.sum(axis=0)
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
@@ -3150,11 +3206,11 @@
         xv = self.box_vectors[0,:]
         yv = self.box_vectors[1,:]
         zv = self.box_vectors[2,:]
-        self._x_vec = xv / na.sqrt(na.dot(xv, xv))
-        self._y_vec = yv / na.sqrt(na.dot(yv, yv))
-        self._z_vec = zv / na.sqrt(na.dot(zv, zv))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._z_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = xv / np.sqrt(np.dot(xv, xv))
+        self._y_vec = yv / np.sqrt(np.dot(yv, yv))
+        self._z_vec = zv / np.sqrt(np.dot(zv, zv))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._z_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
@@ -3172,7 +3228,7 @@
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
             
 
@@ -3185,7 +3241,7 @@
     def _get_cut_mask(self, grid):
         if self._is_fully_enclosed(grid):
             return True
-        pm = na.zeros(grid.ActiveDimensions, dtype='int32')
+        pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
                               self._rot_mat, grid.LeftEdge, 
                               grid.RightEdge, grid.dds, pm, 0)
@@ -3228,7 +3284,7 @@
                                                            self.right_edge)
 
     def _is_fully_enclosed(self, grid):
-        return na.all( (grid._corners <= self.right_edge)
+        return np.all( (grid._corners <= self.right_edge)
                      & (grid._corners >= self.left_edge))
 
     @cache_mask
@@ -3282,10 +3338,10 @@
 
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
-        self.right_edge = na.array(right_edge)
+        self.left_edge = np.array(left_edge)
+        self.right_edge = np.array(right_edge)
         self._refresh_data()
-        self.offsets = (na.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
+        self.offsets = (np.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
                         (self.pf.domain_right_edge -
                          self.pf.domain_left_edge)[:,None,None,None])\
                        .transpose().reshape(27,3) # cached and in order
@@ -3300,7 +3356,7 @@
                            self.left_edge[1]+off_y,self.left_edge[2]+off_z]
             region_right = [self.right_edge[0]+off_x,
                             self.right_edge[1]+off_y,self.right_edge[2]+off_z]
-            if (na.all((grid._corners <= region_right) &
+            if (np.all((grid._corners <= region_right) &
                        (grid._corners >= region_left))):
                 return True
         return False
@@ -3310,7 +3366,7 @@
         if self._is_fully_enclosed(grid):
             return True
         else:
-            cm = na.zeros(grid.ActiveDimensions,dtype='bool')
+            cm = np.zeros(grid.ActiveDimensions,dtype='bool')
             dxp, dyp, dzp = self._dx_pad * grid.dds
             for off_x, off_y, off_z in self.offsets:
                 cm = cm | ( (grid['x'] - dxp + off_x < self.right_edge[0])
@@ -3350,7 +3406,7 @@
         Child cells are not returned.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._grids = na.array(grid_list)
+        self._grids = np.array(grid_list)
         self.grid_list = self._grids
 
     def _get_list_of_grids(self):
@@ -3361,13 +3417,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 class AMRMaxLevelCollection(AMR3DData):
@@ -3394,13 +3450,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask and grid.Level < self.max_level:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 
@@ -3441,14 +3497,14 @@
         # Now we sort by level
         grids = grids.tolist()
         grids.sort(key=lambda x: (x.Level, x.LeftEdge[0], x.LeftEdge[1], x.LeftEdge[2]))
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = na.abs(grid._corners - self.center)
-        r = na.minimum(r, na.abs(self.DW[None,:]-r))
-        corner_radius = na.sqrt((r**2.0).sum(axis=1))
-        return na.all(corner_radius <= self.radius)
+        r = np.abs(grid._corners - self.center)
+        r = np.minimum(r, np.abs(self.DW[None,:]-r))
+        corner_radius = np.sqrt((r**2.0).sum(axis=1))
+        return np.all(corner_radius <= self.radius)
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3477,7 +3533,7 @@
         can define a ellipsoid of any proportion.  Only cells whose centers are
         within the ellipsoid will be selected.
         """
-        AMR3DData.__init__(self, na.array(center), fields, pf, **kwargs)
+        AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
@@ -3488,12 +3544,12 @@
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0[1] / e0[0])
+        t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
         RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
         r1 = (e0 * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
         given the tilt about the x axis when e0 was aligned 
@@ -3505,7 +3561,7 @@
         e1 = ((0, 1, 0) * RX).sum(axis = 1)
         e1 = (e1 * RY).sum(axis = 1)
         e1 = (e1 * RZ).sum(axis = 1)
-        e2 = na.cross(e0, e1)
+        e2 = np.cross(e0, e1)
 
         self._e1 = e1
         self._e2 = e2
@@ -3535,7 +3591,7 @@
                                   x.LeftEdge[0], \
                                   x.LeftEdge[1], \
                                   x.LeftEdge[2]))
-        self._grids = na.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype = 'object')
 
     def _is_fully_enclosed(self, grid):
         """
@@ -3545,18 +3601,18 @@
         vr = (grid._corners - self.center)
         # 3 possible cases of locations taking periodic BC into account
         # just listing the components, find smallest later
-        dotarr=na.array([vr, vr + self.DW, vr - self.DW])
+        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
         # these vrdote# finds the product of vr components with e#
         # square the results
         # find the smallest
         # sums it
-        vrdote0_2 = (na.multiply(dotarr, self._e0)**2).min(axis \
+        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
+        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
+        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        return na.all(vrdote0_2 / self._A**2 + \
+        return np.all(vrdote0_2 / self._A**2 + \
                       vrdote1_2 / self._B**2 + \
                       vrdote2_2 / self._C**2 <=1.0)
 
@@ -3572,21 +3628,21 @@
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = na.zeros(grid["x"].shape, dtype = 'float64')
+        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
         dim = grid["x"].shape
         # need this to take into account non-cube root grid tiles
-        dot_evec = na.zeros([3, dim[0], dim[1], dim[2]])
+        dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
         for i, ax in enumerate('xyz'):
             # distance to center
             ar  = grid[ax]-self.center[i]
             # cases to take into account periodic BC
-            case = na.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
             # find which of the 3 cases is smallest in magnitude
-            index = na.abs(case).argmin(axis = 0)
+            index = np.abs(case).argmin(axis = 0)
             # restrict distance to only the smallest cases
-            vec = na.choose(index, case)
+            vec = np.choose(index, case)
             # sum up to get the dot product with e_vectors
-            dot_evec += na.array([vec * self._e0[i], \
+            dot_evec += np.array([vec * self._e0[i], \
                                   vec * self._e1[i], \
                                   vec * self._e2[i]])
         # Calculate the eqn of ellipsoid, if it is inside
@@ -3602,7 +3658,7 @@
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"
-    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
+    _con_args = ('level', 'left_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
         """A 3D region with all data extracted to a single, specified
@@ -3627,22 +3683,23 @@
         """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
-        self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = na.array(dims,dtype='int32')
+        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+        self.dds = self.pf.domain_width/rdx.astype("float64")
+        self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar
-        self.global_startindex = na.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self, buffer = 0.0):
         if self._grids is not None: return
-        if na.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
-           na.any(self.right_edge + buffer > self.pf.domain_right_edge):
+        if np.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
+           np.any(self.right_edge + buffer > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
                             self.right_edge + buffer, self.level)
@@ -3650,14 +3707,14 @@
             grids,ind = self.pf.hierarchy.get_box_grids_below_level(
                 self.left_edge - buffer,
                 self.right_edge + buffer, self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind])
         self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
     def _refresh_data(self):
         AMR3DData._refresh_data(self)
-        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dx'] = self.dds[0] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dy'] = self.dds[1] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dz'] = self.dds[2] * np.ones(self.ActiveDimensions, dtype='float64')
 
     def get_data(self, fields=None):
         if self._grids is None:
@@ -3677,7 +3734,7 @@
                 except NeedsOriginalGrid, ngt_exception:
                     pass
             obtain_fields.append(field)
-            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
+            self[field] = np.zeros(self.ActiveDimensions, dtype='float64') -999
         if len(obtain_fields) == 0: return
         mylog.debug("Getting fields %s from %s possible grids",
                    obtain_fields, len(self._grids))
@@ -3689,9 +3746,9 @@
             count -= self._get_data_from_grid(grid, obtain_fields)
             if count <= 0: break
         if self._use_pbar: pbar.finish()
-        if count > 0 or na.any(self[obtain_fields[0]] == -999):
+        if count > 0 or np.any(self[obtain_fields[0]] == -999):
             # and self.dx < self.hierarchy.grids[0].dx:
-            n_bad = na.where(self[obtain_fields[0]]==-999)[0].size
+            n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
             
@@ -3737,7 +3794,7 @@
         g_fields = []
         for field in fields:
             if not grid.has_key(field): grid[field] = \
-               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
+               np.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
             g_fields.append(grid[field])
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
@@ -3832,7 +3889,7 @@
         if self.level > 0:
             for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]
-                if na.any(self[field] == -999):
+                if np.any(self[field] == -999):
                     # and self.dx < self.hierarchy.grids[0].dx:
                     n_bad = (self[field]==-999).sum()
                     mylog.error("Covering problem: %s cells are uncovered", n_bad)
@@ -3846,35 +3903,35 @@
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint(LL / dx).astype('int64') - 1
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
-            self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
+            self.global_startindex = np.array(np.floor(LL/ dx), dtype='int64')
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
-        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
+        dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
+        output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
 
         for field in fields:
-            output_field = na.zeros(output_dims, dtype="float64")
+            output_field = np.zeros(output_dims, dtype="float64")
             output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
@@ -3944,7 +4001,7 @@
             self._all_regions.append(item)
             # So cut_masks don't get messed up.
             item._boolean_touched = True
-        self._all_regions = na.unique(self._all_regions)
+        self._all_regions = np.unique(self._all_regions)
     
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
@@ -3969,7 +4026,7 @@
                 # The whole grid is in the hybrid region if a) its cut_mask
                 # in the original region is identical to the new one and b)
                 # the original region cut_mask is all ones.
-                if (local == na.bitwise_and(overall, local)).all() and \
+                if (local == np.bitwise_and(overall, local)).all() and \
                         (local == True).all():
                     self._all_overlap.append(grid)
                     continue
@@ -3997,7 +4054,7 @@
         return (grid in self._all_overlap)
 
     def _get_list_of_grids(self):
-        self._grids = na.array(self._some_overlap + self._all_overlap,
+        self._grids = np.array(self._some_overlap + self._all_overlap,
             dtype='object')
 
     def _get_cut_mask(self, grid, field=None):
@@ -4054,13 +4111,13 @@
             if i == 0: continue
             if item == "AND":
                 # So, the next item in level_masks we want to AND.
-                na.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
             if item == "NOT":
                 # It's convenient to remember that NOT == AND NOT
-                na.bitwise_and(this_cut_mask, na.invert(level_masks[i+1]),
+                np.bitwise_and(this_cut_mask, np.invert(level_masks[i+1]),
                     this_cut_mask)
             if item == "OR":
-                na.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
         if not isinstance(grid, FakeGridForParticles):
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -100,7 +100,7 @@
             if not iterable(rv): rv = (rv,)
             for i in range(self.n_ret): self.retvals[i].append(rv[i])
             g.clear_data()
-        self.retvals = [na.array(self.retvals[i]) for i in range(self.n_ret)]
+        self.retvals = [np.array(self.retvals[i]) for i in range(self.n_ret)]
         return self.c_func(self._data_source, *self.retvals)
 
     def _finalize_parallel(self):
@@ -110,7 +110,7 @@
         # our long axis is the first one!
         rv = []
         for my_list in self.retvals:
-            data = na.array(my_list).transpose()
+            data = np.array(my_list).transpose()
             rv.append(self.comm.par_combine_object(data,
                         datatype="array", op="cat").transpose())
         self.retvals = rv
@@ -185,7 +185,7 @@
 
     return x,y,z, den
 def _combCenterOfMass(data, x,y,z, den):
-    return na.array([x.sum(), y.sum(), z.sum()])/den.sum()
+    return np.array([x.sum(), y.sum(), z.sum()])/den.sum()
 add_quantity("CenterOfMass", function=_CenterOfMass,
              combine_function=_combCenterOfMass, n_ret = 4)
 
@@ -218,7 +218,7 @@
     xv = xv.sum()/w
     yv = yv.sum()/w
     zv = zv.sum()/w
-    return na.array([xv, yv, zv])
+    return np.array([xv, yv, zv])
 add_quantity("BulkVelocity", function=_BulkVelocity,
              combine_function=_combBulkVelocity, n_ret=4)
 
@@ -249,9 +249,9 @@
     return [j_mag]
 
 def _combAngularMomentumVector(data, j_mag):
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     L_vec = j_mag.sum(axis=0)
-    L_vec_norm = L_vec / na.sqrt((L_vec**2.0).sum())
+    L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
     return L_vec_norm
 add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
              combine_function=_combAngularMomentumVector, n_ret=1)
@@ -268,17 +268,17 @@
     amx = data["SpecificAngularMomentumX"]*data["CellMassMsun"]
     amy = data["SpecificAngularMomentumY"]*data["CellMassMsun"]
     amz = data["SpecificAngularMomentumZ"]*data["CellMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
     weight=data["CellMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
 def _combBaryonSpinParameter(data, j_mag, m_enc, e_term_pre, weight):
     # Because it's a vector field, we have to ensure we have enough dimensions
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     W = weight.sum()
     M = m_enc.sum()
-    J = na.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
-    E = na.sqrt(e_term_pre.sum()/W)
+    J = np.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
+    E = np.sqrt(e_term_pre.sum()/W)
     G = 6.67e-8 # cm^3 g^-1 s^-2
     spin = J * E / (M*1.989e33*G)
     return spin
@@ -292,11 +292,11 @@
     """
     m_enc = data["CellMassMsun"].sum() + data["ParticleMassMsun"].sum()
     amx = data["ParticleSpecificAngularMomentumX"]*data["ParticleMassMsun"]
-    if amx.size == 0: return (na.zeros((3,), dtype='float64'), m_enc, 0, 0)
+    if amx.size == 0: return (np.zeros((3,), dtype='float64'), m_enc, 0, 0)
     amy = data["ParticleSpecificAngularMomentumY"]*data["ParticleMassMsun"]
     amz = data["ParticleSpecificAngularMomentumZ"]*data["ParticleMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["ParticleMassMsun"]
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["ParticleMassMsun"]
                        *data["ParticleVelocityMagnitude"]**2.0)
     weight=data["ParticleMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
@@ -360,15 +360,15 @@
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
         kinetic += thermal
     if periodic_test:
-        kinetic = na.ones_like(kinetic)
+        kinetic = np.ones_like(kinetic)
     # Gravitational potential energy
     # We only divide once here because we have velocity in cgs, but radius is
     # in code.
     G = 6.67e-8 / data.convert("cm") # cm^3 g^-1 s^-2
     # Check for periodicity of the clump.
-    two_root = 2. / na.array(data.pf.domain_dimensions)
+    two_root = 2. / np.array(data.pf.domain_dimensions)
     domain_period = data.pf.domain_right_edge - data.pf.domain_left_edge
-    periodic = na.array([0., 0., 0.])
+    periodic = np.array([0., 0., 0.])
     for i,dim in enumerate(["x", "y", "z"]):
         sorted = data[dim][data[dim].argsort()]
         # If two adjacent values are different by (more than) two root grid
@@ -380,7 +380,7 @@
             # define the gap from the right boundary, which we'll use for the
             # periodic adjustment later.
             sel = (diff >= two_root[i])
-            index = na.min(na.nonzero(sel))
+            index = np.min(np.nonzero(sel))
             # The last addition term below ensures that the data makes a full
             # wrap-around.
             periodic[i] = data.pf.domain_right_edge[i] - sorted[index + 1] + \
@@ -402,26 +402,26 @@
             local_data[dim] += periodic[i]
             local_data[dim] %= domain_period[i]
     if periodic_test:
-        local_data["CellMass"] = na.ones_like(local_data["CellMass"])
+        local_data["CellMass"] = np.ones_like(local_data["CellMass"])
     import time
     t1 = time.time()
     if treecode:
         # Calculate the binding energy using the treecode method.
         # Faster but less accurate.
         # The octree doesn't like uneven root grids, so we will make it cubical.
-        root_dx = 1./na.array(data.pf.domain_dimensions).astype('float64')
-        left = min([na.amin(local_data['x']), na.amin(local_data['y']),
-            na.amin(local_data['z'])])
-        right = max([na.amax(local_data['x']), na.amax(local_data['y']),
-            na.amax(local_data['z'])])
-        cover_min = na.array([left, left, left])
-        cover_max = na.array([right, right, right])
+        root_dx = 1./np.array(data.pf.domain_dimensions).astype('float64')
+        left = min([np.amin(local_data['x']), np.amin(local_data['y']),
+            np.amin(local_data['z'])])
+        right = max([np.amax(local_data['x']), np.amax(local_data['y']),
+            np.amax(local_data['z'])])
+        cover_min = np.array([left, left, left])
+        cover_max = np.array([right, right, right])
         # Fix the coverage to match to root grid cell left 
         # edges for making indexes.
         cover_min = cover_min - cover_min % root_dx
         cover_max = cover_max - cover_max % root_dx
-        cover_imin = (cover_min * na.array(data.pf.domain_dimensions)).astype('int64')
-        cover_imax = (cover_max * na.array(data.pf.domain_dimensions) + 1).astype('int64')
+        cover_imin = (cover_min * np.array(data.pf.domain_dimensions)).astype('int64')
+        cover_imax = (cover_max * np.array(data.pf.domain_dimensions) + 1).astype('int64')
         cover_ActiveDimensions = cover_imax - cover_imin
         # Create the octree with these dimensions.
         # One value (mass) with incremental=True.
@@ -429,12 +429,12 @@
         #print 'here', cover_ActiveDimensions
         # Now discover what levels this data comes from, not assuming
         # symmetry.
-        dxes = na.unique(data['dx']) # unique returns a sorted array,
-        dyes = na.unique(data['dy']) # so these will all have the same
-        dzes = na.unique(data['dz']) # order.
+        dxes = np.unique(data['dx']) # unique returns a sorted array,
+        dyes = np.unique(data['dy']) # so these will all have the same
+        dzes = np.unique(data['dz']) # order.
         # We only need one dim to figure out levels, we'll use x.
         dx = 1./data.pf.domain_dimensions[0]
-        levels = (na.log(dx / dxes) / na.log(data.pf.refine_by)).astype('int')
+        levels = (np.log(dx / dxes) / np.log(data.pf.refine_by)).astype('int')
         lsort = levels.argsort()
         levels = levels[lsort]
         dxes = dxes[lsort]
@@ -447,9 +447,9 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = na.array([local_data["CellMass"][sel]], order='F')
+	    vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
-               na.ones_like(thisx).astype('float64'), treecode = 1)
+               np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)
         mylog.info("Using a treecode to find gravitational energy for %d cells." % local_data['x'].size)
@@ -484,7 +484,7 @@
     m = (data['CellMass'] * mass_scale_factor).astype('float32')
     assert(m.size > bsize)
 
-    gsize=int(na.ceil(float(m.size)/bsize))
+    gsize=int(np.ceil(float(m.size)/bsize))
     assert(gsize > 16)
 
     # Now the tedious process of rescaling our values...
@@ -492,7 +492,7 @@
     x = ((data['x'] - data['x'].min()) * length_scale_factor).astype('float32')
     y = ((data['y'] - data['y'].min()) * length_scale_factor).astype('float32')
     z = ((data['z'] - data['z'].min()) * length_scale_factor).astype('float32')
-    p = na.zeros(z.shape, dtype='float32')
+    p = np.zeros(z.shape, dtype='float32')
     
     x_gpu = cuda.mem_alloc(x.size * x.dtype.itemsize)
     y_gpu = cuda.mem_alloc(y.size * y.dtype.itemsize)
@@ -569,7 +569,7 @@
          block=(bsize,1,1), grid=(gsize, gsize), time_kernel=True)
     cuda.memcpy_dtoh(p, p_gpu)
     p1 = p.sum()
-    if na.any(na.isnan(p)): raise ValueError
+    if np.any(np.isnan(p)): raise ValueError
     return p1 * (length_scale_factor / (mass_scale_factor**2.0))
 
 def _Extrema(data, fields, non_zero = False, filter=None):
@@ -613,9 +613,9 @@
                 maxs.append(-1e90)
     return len(fields), mins, maxs
 def _combExtrema(data, n_fields, mins, maxs):
-    mins, maxs = na.atleast_2d(mins, maxs)
+    mins, maxs = np.atleast_2d(mins, maxs)
     n_fields = mins.shape[1]
-    return [(na.min(mins[:,i]), na.max(maxs[:,i])) for i in range(n_fields)]
+    return [(np.min(mins[:,i]), np.max(maxs[:,i])) for i in range(n_fields)]
 add_quantity("Extrema", function=_Extrema, combine_function=_combExtrema,
              n_ret=3)
 
@@ -644,14 +644,14 @@
     """
     ma, maxi, mx, my, mz, mg = -1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        maxi = na.argmax(data[field])
+        maxi = np.argmax(data[field])
         ma = data[field][maxi]
         mx, my, mz = [data[ax][maxi] for ax in 'xyz']
         mg = data["GridIndices"][maxi]
     return (ma, maxi, mx, my, mz, mg)
 def _combMaxLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmax(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmax(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MaxLocation", function=_MaxLocation,
              combine_function=_combMaxLocation, n_ret = 6)
@@ -663,14 +663,14 @@
     """
     ma, mini, mx, my, mz, mg = 1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        mini = na.argmin(data[field])
+        mini = np.argmin(data[field])
         ma = data[field][mini]
         mx, my, mz = [data[ax][mini] for ax in 'xyz']
         mg = data["GridIndices"][mini]
     return (ma, mini, mx, my, mz, mg)
 def _combMinLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmin(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmin(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MinLocation", function=_MinLocation,
              combine_function=_combMinLocation, n_ret = 6)
@@ -691,8 +691,8 @@
         totals.append(data[field].sum())
     return len(fields), totals
 def _combTotalQuantity(data, n_fields, totals):
-    totals = na.atleast_2d(totals)
+    totals = np.atleast_2d(totals)
     n_fields = totals.shape[1]
-    return [na.sum(totals[:,i]) for i in range(n_fields)]
+    return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -30,7 +30,7 @@
 import copy
 import itertools
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -151,8 +151,8 @@
         self.ActiveDimensions = [nd,nd,nd]
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
-        self.dds = na.ones(3, "float64")
-        self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
+        self.dds = np.ones(3, "float64")
+        self['dx'] = self['dy'] = self['dz'] = np.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
 
@@ -161,8 +161,8 @@
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
-            pf.domain_left_edge = na.zeros(3, 'float64')
-            pf.domain_right_edge = na.ones(3, 'float64')
+            pf.domain_left_edge = np.zeros(3, 'float64')
+            pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3
         self.pf = pf
 
@@ -180,12 +180,12 @@
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd, nd, nd), dtype='float64')
-                + 1e-4*na.random.random((nd, nd, nd)))
+                lambda: np.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*np.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd * nd * nd), dtype='float64')
-                + 1e-4*na.random.random((nd * nd * nd)))
+                lambda: np.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*np.random.random((nd * nd * nd)))
 
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
@@ -215,13 +215,13 @@
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(field_name) and FI[field_name].particle_type:
             self.requested.append(field_name)
-            return na.ones(self.NumberOfParticles)
+            return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity', 'center', 'height_vector']:
-            return na.random.random(3) * 1e-2
+        if param in ['bulk_velocity', 'center', 'normal']:
+            return np.random.random(3) * 1e-2
         else:
             return 0.0
     _num_ghost_zones = 0


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -27,7 +27,7 @@
 import pdb
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
@@ -79,11 +79,11 @@
         if self.Parent == None:
             left = self.LeftEdge - self.pf.domain_left_edge
             start_index = left / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
 
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
+                       np.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
         self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -184,15 +184,15 @@
                 if self.pf.field_info[field].particle_type and \
                    self.NumberOfParticles == 0:
                     # because this gets upcast to float
-                    self[field] = na.array([],dtype='int64')
+                    self[field] = np.array([],dtype='int64')
                     return self.field_data[field]
                 try:
                     temp = self.hierarchy.io.pop(self, field)
-                    self[field] = na.multiply(temp, conv_factor, temp)
+                    self[field] = np.multiply(temp, conv_factor, temp)
                 except self.hierarchy.io._read_exception, exc:
                     if field in self.pf.field_info:
                         if self.pf.field_info[field].not_in_all:
-                            self[field] = na.zeros(self.ActiveDimensions, dtype='float64')
+                            self[field] = np.zeros(self.ActiveDimensions, dtype='float64')
                         else:
                             raise
                     else: raise
@@ -209,14 +209,14 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
-        return na.array([ # Unroll!
+        return np.array([ # Unroll!
             [self.LeftEdge[0],  self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
@@ -237,9 +237,9 @@
         x = x_dict[axis]
         y = y_dict[axis]
         cond = self.RightEdge[x] >= LE[:,x]
-        cond = na.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
-        cond = na.logical_and(cond, self.RightEdge[y] >= LE[:,y])
-        cond = na.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
+        cond = np.logical_and(cond, self.RightEdge[y] >= LE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
         return cond
 
     def __repr__(self):
@@ -278,19 +278,19 @@
         self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
     def __len__(self):
-        return na.prod(self.ActiveDimensions)
+        return np.prod(self.ActiveDimensions)
 
     def find_max(self, field):
         """ Returns value, index of maximum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmax()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def find_min(self, field):
         """ Returns value, index of minimum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmin()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
@@ -366,11 +366,13 @@
         self._child_index_mask = None
 
     #@time_execution
-    def __fill_child_mask(self, child, mask, tofill):
+    def __fill_child_mask(self, child, mask, tofill, dlevel = 1):
         rf = self.pf.refine_by
+        if dlevel != 1:
+            rf = rf**dlevel
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = na.maximum(0, cgi / rf - gi)
-        endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],
@@ -383,12 +385,12 @@
         thus, where higher resolution data is available).
 
         """
-        self._child_mask = na.ones(self.ActiveDimensions, 'int32')
+        self._child_mask = np.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
             self.__fill_child_mask(child, self._child_mask, 0)
         if self.OverlappingSiblings is not None:
             for sibling in self.OverlappingSiblings:
-                self.__fill_child_mask(sibling, self._child_mask, 0)
+                self.__fill_child_mask(sibling, self._child_mask, 0, 0)
         
         self._child_indices = (self._child_mask==0) # bool, possibly redundant
 
@@ -398,7 +400,7 @@
         and otherwise has the ID of the grid that resides there.
 
         """
-        self._child_index_mask = na.zeros(self.ActiveDimensions, 'int32') - 1
+        self._child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
             self.__fill_child_mask(child, self._child_index_mask,
                                    child.id)
@@ -425,8 +427,8 @@
         Creates self.coords, which is of dimensions (3, ActiveDimensions)
 
         """
-        ind = na.indices(self.ActiveDimensions)
-        left_shaped = na.reshape(self.LeftEdge, (3, 1, 1, 1))
+        ind = np.indices(self.ActiveDimensions)
+        left_shaped = np.reshape(self.LeftEdge, (3, 1, 1, 1))
         self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
 
     child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
@@ -462,7 +464,7 @@
         return cube
 
     def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
             of = self[field]
@@ -474,9 +476,9 @@
             new_field[1:,:-1,1:] += of
             new_field[1:,1:,:-1] += of
             new_field[1:,1:,1:] += of
-            na.multiply(new_field, 0.125, new_field)
+            np.multiply(new_field, 0.125, new_field)
             if self.pf.field_info[field].take_log:
-                new_field = na.log10(new_field)
+                new_field = np.log10(new_field)
 
             new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
             new_field[:,:, 0]  = 2.0*new_field[:,:,1] - new_field[:,:,2]
@@ -486,17 +488,17 @@
             new_field[0,:,:]  = 2.0*new_field[1,:,:] - new_field[2,:,:]
 
             if self.pf.field_info[field].take_log:
-                na.power(10.0, new_field, new_field)
+                np.power(10.0, new_field, new_field)
         else:
             cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            na.multiply(new_field, 0.125, new_field)
+            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
+            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
+            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
+            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
+            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
+            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
+            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
+            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
+            np.multiply(new_field, 0.125, new_field)
 
         return new_field


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import string, re, gc, time, cPickle, pdb
 import weakref
 
@@ -116,11 +116,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _setup_classes(self, dd):
         # Called by subclass
@@ -172,7 +172,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -361,13 +361,13 @@
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]
         self.level_stats['numcells'] = [0 for i in range(MAXLEVEL)]
         for level in xrange(self.max_level+1):
-            self.level_stats[level]['numgrids'] = na.sum(self.grid_levels == level)
+            self.level_stats[level]['numgrids'] = np.sum(self.grid_levels == level)
             li = (self.grid_levels[:,0] == level)
             self.level_stats[level]['numcells'] = self.grid_dimensions[li,:].prod(axis=1).sum()
 
     @property
     def grid_corners(self):
-        return na.array([
+        return np.array([
           [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/image_array.py
--- /dev/null
+++ b/yt/data_objects/image_array.py
@@ -0,0 +1,271 @@
+"""
+ImageArray Class
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+    Copyright (C) 2012 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  """
+
+import numpy as np
+import h5py as h5
+from yt.visualization.image_writer import write_bitmap, write_image
+
+class ImageArray(np.ndarray):
+    r"""A custom Numpy ndarray used for images.
+
+    This differs from ndarray in that you can optionally specify an
+    info dictionary which is used later in saving, and can be accessed with
+    ImageArray.info.
+
+    Parameters
+    ----------
+    input_array: array_like
+        A numpy ndarray, or list. 
+
+    Other Parameters
+    ----------------
+    info: dictionary
+        Contains information to be stored with image.
+
+    Returns
+    -------
+    obj: ImageArray object 
+
+    Raises
+    ------
+    None
+
+    See Also
+    --------
+    numpy.ndarray : Inherits
+
+    Notes
+    -----
+
+    References
+    ----------
+
+    Examples
+    --------
+    These are written in doctest format, and should illustrate how to
+    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    a plot collection, 'c' for a center, and 'L' for a vector. 
+
+    >>> im = np.zeros([64,128,3])
+    >>> for i in xrange(im.shape[0]):
+    >>>     for k in xrange(im.shape[2]):
+    >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+    >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    >>> im_arr = ImageArray(im, info=myinfo)
+    >>> im_arr.save('test_ImageArray')
+
+    Numpy ndarray documentation appended:
+
+    """
+    def __new__(cls, input_array, info=None):
+        # Input array is an already formed ndarray instance
+        # We first cast to be our class type
+        obj = np.asarray(input_array).view(cls)
+        # add the new attribute to the created instance
+        if info is None:
+            info = {}
+        obj.info = info
+        # Finally, we must return the newly created object:
+        return obj
+
+    def __array_finalize__(self, obj):
+        # see InfoArray.__array_finalize__ for comments
+        if obj is None: return
+        self.info = getattr(obj, 'info', None)
+
+    def write_hdf5(self, filename):
+        r"""Writes ImageArray to hdf5 file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        -------- 
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_hdf5('test_ImageArray.h5')
+
+        """
+        array_name = self.info.get("name","image")
+
+        f = h5.File(filename)
+        if array_name in f.keys():
+            del f[array_name]
+        d = f.create_dataset(array_name, data=self)
+        for k, v in self.info.iteritems():
+            d.attrs.create(k, v)
+        f.close()
+
+    def write_png(self, filename, clip_ratio=None):
+        r"""Writes ImageArray to png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_png('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if clip_ratio is not None:
+            return write_bitmap(self.swapaxes(0, 1), filename,
+                                clip_ratio * self.std())
+        else:
+            return write_bitmap(self.swapaxes(0, 1), filename)
+
+    def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
+        r"""Writes a single channel of the ImageArray to a png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Other Parameters
+        ----------------
+        channel: int
+            Which channel to write out as an image. Defaults to 0
+        cmap_name: string
+            Name of the colormap to be used.
+        color_bounds : tuple of floats, optional
+            The min and max to scale between.  Outlying values will be clipped.
+        cmap_name : string, optional
+            An acceptable colormap.  See either yt.visualization.color_maps or
+            http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
+        func : function, optional
+            A function to transform the buffer before applying a colormap. 
+
+        Returns
+        -------
+        scaled_image : uint8 image that has been saved
+        
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128])
+        >>> for i in xrange(im.shape[0]):
+        >>>     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_image('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if channel is None:
+            return write_image(self.swapaxes(0,1), filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+        else:
+            return write_image(self.swapaxes(0,1)[:,:,channel], filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+
+    def save(self, filename, png=True, hdf5=True):
+        """
+        Saves ImageArray. 
+
+        Arguments:
+          filename: string
+            This should not contain the extension type (.png, .h5, ...)
+
+        Optional Arguments:
+          png: boolean, default True
+            Save to a png
+
+          hdf5: boolean, default True
+            Save to hdf5 file, including info dictionary as attributes.
+
+        """
+        if png:
+            if len(self.shape) > 2:
+                self.write_png("%s.png" % filename)
+            else:
+                self.write_image("%s.png" % filename)
+        if hdf5:
+            self.write_hdf5("%s.h5" % filename)
+
+    __doc__ += np.ndarray.__doc__
+
+if __name__ == "__main__":
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_3d_ImageArray')
+
+    im = np.zeros([64,128])
+    for i in xrange(im.shape[0]):
+        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_2d_ImageArray')
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.lib import \
@@ -38,15 +38,15 @@
         along *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the two edges, we win!
-        na.choose(na.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
-        na.choose(na.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
+        np.choose(np.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_max(self, field, finest_levels = 3):
@@ -70,18 +70,18 @@
         max_val, maxi, mx, my, mz, mg = \
             source.quantities["MaxLocation"]( field, lazy_reader=True)
         max_grid = self.grids[mg]
-        mc = na.unravel_index(maxi, max_grid.ActiveDimensions)
+        mc = np.unravel_index(maxi, max_grid.ActiveDimensions)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s %s", \
               max_val, mx, my, mz, max_grid, max_grid.Level, mc)
         self.parameters["Max%sValue" % (field)] = max_val
         self.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
-        return max_grid, mc, max_val, na.array((mx,my,mz), dtype='float64')
+        return max_grid, mc, max_val, np.array((mx,my,mz), dtype='float64')
 
     def find_min(self, field):
         """
         Returns (value, center) of location of minimum for a given field
         """
-        gI = na.where(self.grid_levels >= 0) # Slow but pedantic
+        gI = np.where(self.grid_levels >= 0) # Slow but pedantic
         minVal = 1e100
         for grid in self.grids[gI[0]]:
             mylog.debug("Checking %s (level %s)", grid.id, grid.Level)
@@ -90,7 +90,7 @@
                 minCoord = coord
                 minVal = val
                 minGrid = grid
-        mc = na.array(minCoord)
+        mc = np.array(minCoord)
         pos=minGrid.get_position(mc)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", \
               minVal, pos[0], pos[1], pos[2], minGrid, minGrid.Level)
@@ -103,11 +103,11 @@
         """
         Returns the (objects, indices) of grids containing an (x,y,z) point
         """
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         for i in xrange(len(coord)):
-            na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-            na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-        ind = na.where(mask == 1)
+            np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+            np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_field_value_at_point(self, fields, coord):
@@ -134,7 +134,7 @@
         # Get the most-refined grid at this coordinate.
         this = self.find_point(coord)[0][-1]
         cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions
-        mark = na.zeros(3).astype('int')
+        mark = np.zeros(3).astype('int')
         # Find the index for the cell containing this point.
         for dim in xrange(len(coord)):
             mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim])
@@ -151,15 +151,15 @@
         *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the edges, we win!
-        #ind = na.where( na.logical_and(self.grid_right_edge[:,axis] > coord, \
+        #ind = np.where( np.logical_and(self.grid_right_edge[:,axis] > coord, \
                                        #self.grid_left_edge[:,axis] < coord))
-        na.choose(na.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_sphere_grids(self, center, radius):
@@ -167,29 +167,29 @@
         Returns objects, indices of grids within a sphere
         """
         centers = (self.grid_right_edge + self.grid_left_edge)/2.0
-        long_axis = na.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
-        t = na.abs(centers - center)
+        long_axis = np.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
+        t = np.abs(centers - center)
         DW = self.parameter_file.domain_right_edge \
            - self.parameter_file.domain_left_edge
-        na.minimum(t, na.abs(DW-t), t)
-        dist = na.sqrt(na.sum((t**2.0), axis=1))
-        gridI = na.where(dist < (radius + long_axis))
+        np.minimum(t, np.abs(DW-t), t)
+        dist = np.sqrt(np.sum((t**2.0), axis=1))
+        gridI = np.where(dist < (radius + long_axis))
         return self.grids[gridI], gridI
 
     def get_box_grids(self, left_edge, right_edge):
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = na.where((na.all(self.grid_right_edge > left_edge, axis=1)
-                         & na.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
+                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -203,26 +203,26 @@
                     nre = nle + db
                     g, gi = self.get_box_grids(nle, nre)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_box_grids_below_level(self, left_edge, right_edge, level,
                                   min_level = 0):
         # We discard grids if they are ABOVE the level
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
                             self.grid_levels.astype("int32"), mask, min_level)
         mask = mask.astype("bool")
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_periodic_box_grids_below_level(self, left_edge, right_edge, level,
                                            min_level = 0):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -237,5 +237,5 @@
                     g, gi = self.get_box_grids_below_level(nle, nre,
                                             level, min_level)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -86,7 +86,7 @@
         for field in fields:
             f = self.pf.field_info[field]
             to_add = f.get_dependencies(pf = self.pf).requested
-            to_add = list(na.unique(to_add))
+            to_add = list(np.unique(to_add))
             if len(to_add) != 1: raise KeyError
             fields_to_read += to_add
             if f._particle_convert_function is None:
@@ -95,9 +95,9 @@
                 func = f.particle_convert
             func = particle_converter(func)
             conv_factors.append(
-              na.fromiter((func(g) for g in grid_list),
+              np.fromiter((func(g) for g in grid_list),
                           count=len(grid_list), dtype='float64'))
-        conv_factors = na.array(conv_factors).transpose()
+        conv_factors = np.array(conv_factors).transpose()
         self.conv_factors = conv_factors
         rvs = self.pf.h.io._read_particles(
             fields_to_read, rtype, args, grid_list, count_list,
@@ -115,9 +115,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64') 
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64') 
-        args = (na.array(self.left_edge), na.array(self.right_edge), 
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64') 
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64') 
+        args = (np.array(self.left_edge), np.array(self.right_edge), 
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
@@ -140,9 +140,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64')
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64')
-        return (1, (na.array(self.center, dtype='float64'), self.radius,
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64')
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64')
+        return (1, (np.array(self.center, dtype='float64'), self.radius,
             1, DLE, DRE))
 
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
@@ -156,8 +156,8 @@
         ParticleIOHandler.__init__(self, pf, source)
     
     def _get_args(self):
-        args = (na.array(self.center, dtype='float64'),
-                na.array(self.normal, dtype='float64'),
+        args = (np.array(self.center, dtype='float64'),
+                np.array(self.normal, dtype='float64'),
                 self.radius, self.height)
         return (2, args)
         


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -25,7 +25,7 @@
 from yt.utilities.lib import sample_field_at_positions
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import h5py
 
 class ParticleTrajectoryCollection(object) :
@@ -112,16 +112,16 @@
         for pf in self.pfs :
             dd = pf.h.all_data()
             newtags = dd["particle_index"].astype("int")
-            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)) :
                 print "Not all requested particle ids contained in this file!"
                 raise IndexError
-            mask = na.in1d(newtags, indices, assume_unique=True)
-            sorts = na.argsort(newtags[mask])
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
             self.masks.append(mask)            
             self.sorts.append(sorts)
             self.times.append(pf.current_time)
 
-        self.times = na.array(self.times)
+        self.times = np.array(self.times)
 
         # Set up the derived field list and the particle field list
         # so that if the requested field is a particle field, we'll
@@ -226,7 +226,7 @@
         
         if not self.field_data.has_key(field):
             
-            particles = na.empty((0))
+            particles = np.empty((0))
 
             step = int(0)
                 
@@ -238,13 +238,13 @@
 
                     dd = pf.h.all_data()
                     pfield = dd[field][mask]
-                    particles = na.append(particles, pfield[sort])
+                    particles = np.append(particles, pfield[sort])
 
                 else :
 
                     # This is hard... must loop over grids
 
-                    pfield = na.zeros((self.num_indices))
+                    pfield = np.zeros((self.num_indices))
                     x = self["particle_position_x"][:,step]
                     y = self["particle_position_y"][:,step]
                     z = self["particle_position_z"][:,step]
@@ -258,7 +258,7 @@
                                                             grid.RightEdge,
                                                             x, y, z)
 
-                    particles = na.append(particles, pfield)
+                    particles = np.append(particles, pfield)
 
                 step += 1
                 
@@ -294,9 +294,9 @@
         >>> pl.savefig("orbit")
         """
         
-        mask = na.in1d(self.indices, (index,), assume_unique=True)
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
 
-        if not na.any(mask) :
+        if not np.any(mask) :
             print "The particle index %d is not in the list!" % (index)
             raise IndexError
 
@@ -376,7 +376,7 @@
 
         fields = [field for field in sorted(self.field_data.keys())]
         
-        fid.create_dataset("particle_indices", dtype=na.int32,
+        fid.create_dataset("particle_indices", dtype=np.int32,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -115,13 +115,13 @@
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
         #pbar.finish()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 self.__data[field][ub] /= self.__weight_data[field][ub]
                 self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
-            self["%s_std" % field] = na.sqrt(self.__std_data[field])
+            self["%s_std" % field] = np.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
         del self.__data, self.__std_data, self.__weight_data, self.__used
 
@@ -131,7 +131,7 @@
         for key in self.__data:
             my_mean[key] = self._get_empty_field()
             my_weight[key] = self._get_empty_field()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for key in self.__data:
             my_mean[key][ub] = self.__data[key][ub] / self.__weight_data[key][ub]
             my_weight[key][ub] = self.__weight_data[key][ub]
@@ -151,7 +151,7 @@
                                          accumulation, self._args, check_cut = False)
             if weight:
                 f[u] /= w[u]
-                q[u] = na.sqrt(q[u] / w[u])
+                q[u] = np.sqrt(q[u] / w[u])
             self[field] = f
             self["%s_std" % field] = q
         self["UsedBins"] = u
@@ -202,7 +202,7 @@
                 else:
                     pointI = self._data_source._get_point_indices(source)
             data.append(source[field][pointI].ravel().astype('float64'))
-        return na.concatenate(data, axis=0)
+        return np.concatenate(data, axis=0)
 
     def _fix_pickle(self):
         if isinstance(self._data_source, tuple):
@@ -235,10 +235,10 @@
 
         # Get our bins
         if log_space:
-            func = na.logspace
-            lower_bound, upper_bound = na.log10(lower_bound), na.log10(upper_bound)
+            func = np.logspace
+            lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
         else:
-            func = na.linspace
+            func = np.linspace
 
         # These are the bin *edges*
         self._bins = func(lower_bound, upper_bound, n_bins + 1)
@@ -253,7 +253,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros(self[self.bin_field].size, dtype='float64')
+        return np.zeros(self[self.bin_field].size, dtype='float64')
 
     @preserve_source_parameters
     def _bin_field(self, source, field, weight, accumulation,
@@ -263,7 +263,7 @@
         # (i.e., lazy_reader)
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -282,7 +282,7 @@
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
-            binned_field = na.add.accumulate(binned_field)
+            binned_field = np.add.accumulate(binned_field)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -293,7 +293,7 @@
             raise EmptyProfileData()
         # Truncate at boundaries.
         if self.end_collect:
-            mi = na.ones_like(source_data).astype('bool')
+            mi = np.ones_like(source_data).astype('bool')
         else:
             mi = ((source_data > self._bins.min())
                &  (source_data < self._bins.max()))
@@ -301,9 +301,9 @@
         if sd.size == 0:
             raise EmptyProfileData()
         # Stick the bins into our fixed bins, set at initialization
-        bin_indices = na.digitize(sd, self._bins)
+        bin_indices = np.digitize(sd, self._bins)
         if self.end_collect: #limit the range of values to 0 and n_bins-1
-            bin_indices = na.clip(bin_indices, 0, self.n_bins - 1)
+            bin_indices = np.clip(bin_indices, 0, self.n_bins - 1)
         else: #throw away outside values
             bin_indices -= 1
           
@@ -319,7 +319,7 @@
         elif bin_style is 'left': x = x[:-1]
         elif bin_style is 'right': x = x[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
+            if self._x_log: x=np.log10(x)
             x = 0.5*(x[:-1] + x[1:])
             if self._x_log: x=10**x
         else:
@@ -337,11 +337,11 @@
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
-        field_data = na.array(self.choose_bins(bin_style)) 
+        field_data = np.array(self.choose_bins(bin_style)) 
         if bin_style is 'both':
-            field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field] for field in fields]), axis=0)
         else: 
-            field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field][:-1] for field in fields]), axis=0)
         
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -409,18 +409,18 @@
         self.x_n_bins = x_n_bins
         self.y_n_bins = y_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])):
             mylog.error("Your min/max values for x, y have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -428,7 +428,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size), dtype='float64')
 
     @preserve_source_parameters
@@ -436,7 +436,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -456,9 +456,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -470,9 +470,9 @@
             raise EmptyProfileData()
 
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
-            mi = na.where( (source_data_x > self._x_bins.min())
+            mi = np.where( (source_data_x > self._x_bins.min())
                            & (source_data_x < self._x_bins.max())
                            & (source_data_y > self._y_bins.min())
                            & (source_data_y < self._y_bins.max()))
@@ -481,11 +481,11 @@
         if sd_x.size == 0 or sd_y.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y)
@@ -507,8 +507,8 @@
             x = x[1:]
             y = y[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             if self._x_log: x=10**x
@@ -531,7 +531,7 @@
         fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
                           + fields + ["\n"]))
         x,y = self.choose_bins(bin_style)
-        x,y = na.meshgrid(x,y)
+        x,y = np.meshgrid(x,y)
         field_data = [x.ravel(), y.ravel()]
         if bin_style is not 'both':
             field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
@@ -540,7 +540,7 @@
             field_data += [self.field_data[field].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
 
-        field_data = na.array(field_data)
+        field_data = np.array(field_data)
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -579,7 +579,7 @@
         return [self.x_bin_field, self.y_bin_field]
 
 def fix_bounds(upper, lower, logit):
-    if logit: return na.log10(upper), na.log10(lower)
+    if logit: return np.log10(upper), np.log10(lower)
     return upper, lower
 
 class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -599,7 +599,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -617,9 +617,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, used_field.astype('bool')
 
         
@@ -656,24 +656,24 @@
         self.y_n_bins = y_n_bins
         self.z_n_bins = z_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        func = {True:na.logspace, False:na.linspace}[z_log]
+        func = {True:np.logspace, False:np.linspace}[z_log]
         bounds = fix_bounds(z_lower_bound, z_upper_bound, z_log)
         self._z_bins = func(bounds[0], bounds[1], z_n_bins + 1)
         self[z_bin_field] = self._z_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])) \
-            or na.any(na.isnan(self[z_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])) \
+            or np.any(np.isnan(self[z_bin_field])):
             mylog.error("Your min/max values for x, y or z have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -681,7 +681,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size,
                          self[self.z_bin_field].size), dtype='float64')
 
@@ -689,9 +689,9 @@
     def _bin_field(self, source, field, weight, accumulation,
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
-        weight_data = na.ones(source_data.shape).astype('float64')
+        weight_data = np.ones(source_data.shape).astype('float64')
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape).astype('float64')
+        else: weight_data = np.ones(source_data.shape).astype('float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -711,11 +711,11 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
-                binned_field = na.add.accumulate(binned_field, axis=2)
+                binned_field = np.add.accumulate(binned_field, axis=2)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -727,7 +727,7 @@
         if source_data_x.size == 0:
             raise EmptyProfileData()
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
             mi = ( (source_data_x > self._x_bins.min())
                  & (source_data_x < self._x_bins.max())
@@ -741,13 +741,13 @@
         if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
-        bin_indices_z = na.digitize(sd_z, self._z_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
+        bin_indices_z = np.digitize(sd_z, self._z_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
-            bin_indices_z = na.minimum(na.maximum(1, bin_indices_z), self.z_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_z = np.minimum(np.maximum(1, bin_indices_z), self.z_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y, bin_indices_z)
@@ -772,9 +772,9 @@
             y = y[1:]
             z = z[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
-            if self._z_log: z=na.log10(z)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
+            if self._z_log: z=np.log10(z)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             z = 0.5*(z[:-1] + z[1:])
@@ -853,7 +853,7 @@
             if field in set_attr.values(): continue
             order.append(field)
             values.append(self[field].ravel())
-        values = na.array(values).transpose()
+        values = np.array(values).transpose()
         self._data_source.hierarchy.save_data(values, "/Profiles", name,
                                               set_attr, force=force)
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/tests/test_covering_grid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            yield assert_equal, cg["Ones"].max(), 1.0
+            yield assert_equal, cg["Ones"].min(), 1.0
+            yield assert_equal, cg["CellVolume"].sum(), pf.domain_width.prod()
+            for g in pf.h.grids:
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/tests/test_profiles.py
--- /dev/null
+++ b/yt/data_objects/tests/test_profiles.py
@@ -0,0 +1,74 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+_fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
+
+def test_profiles():
+    pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+    nv = pf.domain_dimensions.prod()
+    dd = pf.h.all_data()
+    (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+        ["Density", "Temperature", "Dinosaurs"])
+    rt, tt, dt = dd.quantities["TotalQuantity"](
+        ["Density", "Temperature", "Dinosaurs"])
+    # First we look at the 
+    for nb in [8, 16, 32, 64]:
+        for lr in [True, False]:
+            # We log all the fields or don't log 'em all.  No need to do them
+            # individually.
+            for lf in [True, False]: 
+                # We have the min and the max, but to avoid cutting them off
+                # since we aren't doing end-collect, we cut a bit off the edges
+                for ec, e1, e2 in [(False, 0.9, 1.1), (True, 1.0, 1.0)]:
+                    p1d = BinnedProfile1D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        lr, end_collect=ec)
+                    p1d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p1d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+                    p2d = BinnedProfile2D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        lr, end_collect=ec)
+                    p2d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p2d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+                    p3d = BinnedProfile3D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        nb, "Dinosaurs", dmi*e1, dma*e2, lf,
+                        lr, end_collect=ec)
+                    p3d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p3d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+            p1d = BinnedProfile1D(dd, nb, "x", 0.0, 1.0, log_space=False)
+            p1d.add_fields("Ones", weight=None)
+            av = nv / nb
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)*av
+            # We re-bin ones with a weight now
+            p1d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)
+
+            p2d = BinnedProfile2D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False)
+            p2d.add_fields("Ones", weight=None)
+            av = nv / nb**2
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))*av
+            # We re-bin ones with a weight now
+            p2d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))
+
+            p3d = BinnedProfile3D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False,
+                                      nb, "z", 0.0, 1.0, False)
+            p3d.add_fields("Ones", weight=None)
+            av = nv / nb**3
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb, nb, nb))*av
+            # We re-bin ones with a weight now
+            p3d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/tests/test_projection.py
--- /dev/null
+++ b/yt/data_objects/tests/test_projection.py
@@ -0,0 +1,39 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_projection():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        dd = pf.h.all_data()
+        rho_tot = dd.quantities["TotalQuantity"]("Density")[0]
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        # Some simple projection tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                proj = pf.h.proj(ax, ["Ones", "Density"], weight_field = wf)
+                yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
+                yield assert_equal, proj["Ones"].min(), 1.0
+                yield assert_equal, proj["Ones"].max(), 1.0
+                yield assert_equal, np.unique(proj["px"]), uc[xax]
+                yield assert_equal, np.unique(proj["py"]), uc[yax]
+                yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+            # wf == None
+            yield assert_equal, wf, None
+            v1 = proj["Density"].sum()
+            v2 = (dd["Density"] * dd["d%s" % an]).sum()
+            yield assert_rel_equal, v1, v2, 10


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -26,7 +26,7 @@
 """
 
 import types
-import numpy as na
+import numpy as np
 import inspect
 import copy
 
@@ -61,66 +61,66 @@
 
 def _dx(field, data):
     return data.dds[0]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
 add_field('dx', function=_dx, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dy(field, data):
     return data.dds[1]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
 add_field('dy', function=_dy, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dz(field, data):
     return data.dds[2]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
 add_field('dz', function=_dz,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _coordX(field, data):
     dim = data.ActiveDimensions[0]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[0])[:,None,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
             +0.5) * data['dx'] + data.LeftEdge[0]
 add_field('x', function=_coordX, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordY(field, data):
     dim = data.ActiveDimensions[1]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[1])[None,:,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
             +0.5) * data['dy'] + data.LeftEdge[1]
 add_field('y', function=_coordY, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordZ(field, data):
     dim = data.ActiveDimensions[2]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[2])[None,None,:]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
             +0.5) * data['dz'] + data.LeftEdge[2]
 add_field('z', function=_coordZ, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _GridLevel(field, data):
-    return na.ones(data.ActiveDimensions)*(data.Level)
+    return np.ones(data.ActiveDimensions)*(data.Level)
 add_field("GridLevel", function=_GridLevel,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
 def _GridIndices(field, data):
-    return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
+    return np.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)], take_log=False)
 
 def _OnesOverDx(field, data):
-    return na.ones(data["Ones"].shape,
+    return np.ones(data["Ones"].shape,
                    dtype=data["Density"].dtype)/data['dx']
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
 
 def _Ones(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64')
+    return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
           validators=[ValidateSpatial(0)],
           projection_conversion="unitary",
@@ -130,7 +130,7 @@
 
 def _SoundSpeed(field, data):
     if data.pf["EOSType"] == 1:
-        return na.ones(data["Density"].shape, dtype='float64') * \
+        return np.ones(data["Density"].shape, dtype='float64') * \
                 data.pf["EOSSoundSpeed"]
     return ( data.pf["Gamma"]*data["Pressure"] / \
              data["Density"] )**(1.0/2.0)
@@ -139,7 +139,7 @@
 
 def _RadialMachNumber(field, data):
     """M{|v|/t_sound}"""
-    return na.abs(data["RadialVelocity"]) / data["SoundSpeed"]
+    return np.abs(data["RadialVelocity"]) / data["SoundSpeed"]
 add_field("RadialMachNumber", function=_RadialMachNumber)
 
 def _MachNumber(field, data):
@@ -157,7 +157,7 @@
     t3 = data['dz'] / (
         data["SoundSpeed"] + \
         abs(data["z-velocity"]))
-    return na.minimum(na.minimum(t1,t2),t3)
+    return np.minimum(np.minimum(t1,t2),t3)
 def _convertCourantTimeStep(data):
     # SoundSpeed and z-velocity are in cm/s, dx is in code
     return data.convert("cm")
@@ -169,7 +169,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["particle_velocity_x"]-bulk_velocity[0])**2.0 + \
              (data["particle_velocity_y"]-bulk_velocity[1])**2.0 + \
              (data["particle_velocity_z"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -181,7 +181,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
              (data["y-velocity"]-bulk_velocity[1])**2.0 + \
              (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -189,13 +189,13 @@
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _TangentialOverVelocityMagnitude(field, data):
-    return na.abs(data["TangentialVelocity"])/na.abs(data["VelocityMagnitude"])
+    return np.abs(data["TangentialVelocity"])/np.abs(data["VelocityMagnitude"])
 add_field("TangentialOverVelocityMagnitude",
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
 def _TangentialVelocity(field, data):
-    return na.sqrt(data["VelocityMagnitude"]**2.0
+    return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)
 add_field("TangentialVelocity", 
           function=_TangentialVelocity,
@@ -217,50 +217,181 @@
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
+
+
+### spherical coordinates: r (radius)
+def _sph_r(field, data):
+    center = data.get_field_parameter("center")
+      
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The spherical coordinates radius is simply the magnitude of the
+    ## coords vector.
+
+    return np.sqrt(np.sum(coords**2,axis=-1))
+
+def _Convert_sph_r_CGS(data):
+   return data.convert("cm")
+
+add_field("sph_r", function=_sph_r,
+         validators=[ValidateParameter("center")],
+         convert_function = _Convert_sph_r_CGS, units=r"\rm{cm}")
+
+
+### spherical coordinates: theta (angle with respect to normal)
+def _sph_theta(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The angle (theta) with respect to the normal (J), is the arccos
+    ## of the dot product of the normal with the normalized coords
+    ## vector.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=-1)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+
+add_field("sph_theta", function=_sph_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### spherical coordinates: phi (angle in the plane perpendicular to the normal)
+def _sph_phi(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+    
+    ## We have freedom with respect to what axis (xprime) to define
+    ## the disk angle. Here I've chosen to use the axis that is
+    ## perpendicular to the normal and the y-axis. When normal ==
+    ## y-hat, then set xprime = z-hat. With this definition, when
+    ## normal == z-hat (as is typical), then xprime == x-hat.
+    ##
+    ## The angle is then given by the arctan of the ratio of the
+    ## yprime-component and the xprime-component of the coords vector.
+
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
+    
+    Px = np.sum(Jx*coords,axis=-1)
+    Py = np.sum(Jy*coords,axis=-1)
+    
+    return np.arctan2(Py,Px)
+
+add_field("sph_phi", function=_sph_phi,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+
+### cylindrical coordinates: R (radius in the cylinder's plane)
+def _cyl_R(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+      
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The cross product of the normal (J) with the coords vector
+    ## gives a vector of magnitude equal to the cylindrical radius.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal,tile_shape)
+
+    JcrossCoords = np.cross(J,coords)
+    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+
+def _Convert_cyl_R_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_R", function=_cyl_R,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: z (height above the cylinder's plane)
+def _cyl_z(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The dot product of the normal (J) with the coords vector gives
+    ## the cylindrical height.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal,tile_shape)
+
+    return np.sum(J*coords,axis=-1)  
+
+def _Convert_cyl_z_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_z", function=_cyl_z,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_z_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: theta (angle in the cylinder's plane)
+### [This is identical to the spherical coordinate's 'phi' angle.]
+def _cyl_theta(field, data):
+    return data['sph_phi']
+
+add_field("cyl_theta", function=_cyl_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### The old field DiskAngle is the same as the spherical coordinates'
+### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
+def _DiskAngle(field, data):
+    return data['sph_theta']
+
+add_field("DiskAngle", function=_DiskAngle,
+          take_log=False,
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
+          display_field=False)
+
+
+### The old field Height is the same as the cylindrical coordinates' z
+### field. I'm keeping Height for backwards compatibility.
 def _Height(field, data):
-    # We take the dot product of the radius vector with the height-vector
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    h_vec = h_vec / na.sqrt(h_vec[0]**2.0+
-                            h_vec[1]**2.0+
-                            h_vec[2]**2.0)
-    height = r_vec[0,:] * h_vec[0] \
-           + r_vec[1,:] * h_vec[1] \
-           + r_vec[2,:] * h_vec[2]
-    return na.abs(height)
+    return data['cyl_z']
+
 def _convertHeight(data):
     return data.convert("cm")
 def _convertHeightAU(data):
     return data.convert("au")
 add_field("Height", function=_Height,
           convert_function=_convertHeight,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"cm", display_field=False)
 add_field("HeightAU", function=_Height,
           convert_function=_convertHeightAU,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
-def _DiskAngle(field, data):
-    # We make both r_vec and h_vec into unit vectors
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    r_vec = r_vec/na.sqrt((r_vec**2.0).sum(axis=0))
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    dp = r_vec[0,:] * h_vec[0] \
-       + r_vec[1,:] * h_vec[1] \
-       + r_vec[2,:] * h_vec[2]
-    return na.arccos(dp)
-add_field("DiskAngle", function=_DiskAngle,
-          take_log=False,
-          validators=[ValidateParameter("height_vector"),
-                      ValidateParameter("center")],
-          display_field=False)
 
 def _DynamicalTime(field, data):
     """
@@ -268,7 +399,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*np.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -371,7 +502,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*data['dx']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']*data['dx']
     return data["dx"]*data["dy"]*data["dz"]
@@ -389,7 +520,7 @@
           convert_function=_ConvertCellVolumeCGS)
 
 def _ChandraEmissivity(field, data):
-    logT0 = na.log10(data["Temperature"]) - 7
+    logT0 = np.log10(data["Temperature"]) - 7
     return ((data["NumberDensity"].astype('float64')**2.0) \
             *(10**(-0.0103*logT0**8 \
                    +0.0417*logT0**7 \
@@ -448,15 +579,15 @@
 
 def _AveragedDensity(field, data):
     nx, ny, nz = data["Density"].shape
-    new_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    weight_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    i_i, j_i, k_i = na.mgrid[0:3,0:3,0:3]
+    new_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    weight_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    i_i, j_i, k_i = np.mgrid[0:3,0:3,0:3]
     for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):
         sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]
         new_field += data["Density"][sl] * data["CellMass"][sl]
         weight_field += data["CellMass"][sl]
     # Now some fancy footwork
-    new_field2 = na.zeros((nx,ny,nz))
+    new_field2 = np.zeros((nx,ny,nz))
     new_field2[1:-1,1:-1,1:-1] = new_field/weight_field
     return new_field2
 add_field("AveragedDensity",
@@ -484,7 +615,7 @@
         ds = div_fac * data['dz'].flat[0]
         f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
         f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = f
     return new_field
 def _convertDivV(data):
@@ -496,12 +627,12 @@
           convert_function=_convertDivV)
 
 def _AbsDivV(field, data):
-    return na.abs(data['DivV'])
+    return np.abs(data['DivV'])
 add_field("AbsDivV", function=_AbsDivV,
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return -na.ones_like(data["Ones"])
+    return -np.ones_like(data["Ones"])
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
 add_field("tempContours", function=_Contours,
@@ -511,7 +642,7 @@
 def obtain_velocities(data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["x-velocity"] - bv[0]
     yv = data["y-velocity"] - bv[1]
     zv = data["z-velocity"] - bv[2]
@@ -563,18 +694,18 @@
     """
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["particle_velocity_x"] - bv[0]
     yv = data["particle_velocity_y"] - bv[1]
     zv = data["particle_velocity_z"] - bv[2]
     center = data.get_field_parameter('center')
-    coords = na.array([data['particle_position_x'],
+    coords = np.array([data['particle_position_x'],
                        data['particle_position_y'],
                        data['particle_position_z']], dtype='float64')
     new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - na.reshape(center,new_shape)
-    v_vec = na.array([xv,yv,zv], dtype='float64')
-    return na.cross(r_vec, v_vec, axis=0)
+    r_vec = coords - np.reshape(center,new_shape)
+    v_vec = np.array([xv,yv,zv], dtype='float64')
+    return np.cross(r_vec, v_vec, axis=0)
 #add_field("ParticleSpecificAngularMomentum",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
@@ -589,7 +720,7 @@
 def _ParticleSpecificAngularMomentumX(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     y = data["particle_position_y"] - center[1]
     z = data["particle_position_z"] - center[2]
@@ -599,7 +730,7 @@
 def _ParticleSpecificAngularMomentumY(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     z = data["particle_position_z"] - center[2]
@@ -609,7 +740,7 @@
 def _ParticleSpecificAngularMomentumZ(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     y = data["particle_position_y"] - center[1]
@@ -657,20 +788,20 @@
 def _ParticleRadius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["particle_position_x"].shape, dtype='float64')
+    radius = np.zeros(data["particle_position_x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data["particle_position_%s" % ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data["particle_position_%s" % ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _Radius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["x"].shape, dtype='float64')
+    radius = np.zeros(data["x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data[ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data[ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _ConvertRadiusCGS(data):
     return data.convert("cm")
@@ -755,16 +886,16 @@
     center = data.get_field_parameter("center")
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
                 + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
                 + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
                 )/data["RadiusCode"]
-    if na.any(na.isnan(new_field)): # to fix center = point
-        new_field[na.isnan(new_field)] = 0.0
+    if np.any(np.isnan(new_field)): # to fix center = point
+        new_field[np.isnan(new_field)] = 0.0
     return new_field
 def _RadialVelocityABS(field, data):
-    return na.abs(_RadialVelocity(field, data))
+    return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
     return 1e-5
 add_field("RadialVelocity", function=_RadialVelocity,
@@ -785,10 +916,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(x_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(x_vec, v_vec)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -798,15 +929,34 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(y_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(y_vec, v_vec)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,
           validators=[ValidateParameter("cp_%s_vec" % ax)
                       for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
 
+def _CuttingPlaneBx(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(x_vec, b_vec)
+add_field("CuttingPlaneBx", 
+          function=_CuttingPlaneBx,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+def _CuttingPlaneBy(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(y_vec, b_vec)
+add_field("CuttingPlaneBy", 
+          function=_CuttingPlaneBy,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+
 def _MeanMolecularWeight(field,data):
     return (data["Density"] / (mh *data["NumberDensity"]))
 add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")
@@ -824,32 +974,57 @@
 def _convertDensity(data):
     return data.convert("Density")
 def _pdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                 data["particle_position_y"].astype(na.float64),
-                 data["particle_position_z"].astype(na.float64),
-                 data["particle_mass"].astype(na.float32),
-                 na.int64(data.NumberOfParticles),
-                 blank, na.array(data.LeftEdge).astype(na.float64),
-                 na.array(data.ActiveDimensions).astype(na.int32),
-                 na.float64(data['dx']))
+    CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                 data["particle_position_y"].astype(np.float64),
+                 data["particle_position_z"].astype(np.float64),
+                 data["particle_mass"].astype(np.float32),
+                 np.int64(data.NumberOfParticles),
+                 blank, np.array(data.LeftEdge).astype(np.float64),
+                 np.array(data.ActiveDimensions).astype(np.int32),
+                 np.float64(data['dx']))
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
-          display_name=r"\mathrm{Particle}\/\mathrm{Density})")
+          display_name=r"$\mathrm{Particle}\/\mathrm{Density}$")
 
 def _MagneticEnergy(field,data):
     """This assumes that your front end has provided Bx, By, Bz in
     units of Gauss. If you use MKS, make sure to write your own
     MagneticEnergy field to deal with non-unitary \mu_0.
     """
-    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/2.
+    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/(8*np.pi)
 add_field("MagneticEnergy",function=_MagneticEnergy,
-          units=r"",
-          validators = [ValidateDataField("Bx"),
-                        ValidateDataField("By"),
-                        ValidateDataField("Bz")])
+          units=r"\rm{ergs}\/\rm{cm}^{-3}",
+          display_name=r"\rm{Magnetic}\/\rm{Energy}")
+
+def _BMagnitude(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    BMagnitude field to deal with non-unitary \mu_0.
+    """
+    return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
+add_field("BMagnitude",
+          function=_BMagnitude,
+          display_name=r"|B|", units=r"\rm{Gauss}")
+
+def _PlasmaBeta(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    PlasmaBeta field to deal with non-unitary \mu_0.
+    """
+    return data['Pressure']/data['MagneticEnergy']
+add_field("PlasmaBeta",
+          function=_PlasmaBeta,
+          display_name=r"\rm{Plasma}\/\beta", units="")
+
+def _MagneticPressure(field,data):
+    return data['MagneticEnergy']
+add_field("MagneticPressure",
+          function=_MagneticPressure,
+          display_name=r"\rm{Magnetic}\/\rm{Energy}",
+          units="\rm{ergs}\/\rm{cm}^{-3}")
 
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
@@ -862,7 +1037,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape)
+    new_field = np.zeros(data["x-velocity"].shape)
     dvzdy = (data["z-velocity"][1:-1,sl_right,1:-1] -
              data["z-velocity"][1:-1,sl_left,1:-1]) \
              / (div_fac*data["dy"].flat[0])
@@ -887,7 +1062,7 @@
              / (div_fac*data["dy"].flat[0])
     new_field[1:-1,1:-1,1:-1] += (dvydx - dvxdy)**2.0
     del dvydx, dvxdy
-    new_field = na.abs(new_field)
+    new_field = np.abs(new_field)
     return new_field
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
@@ -907,7 +1082,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
@@ -922,7 +1097,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
@@ -937,7 +1112,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
@@ -952,7 +1127,7 @@
               units=r"\rm{dyne}/\rm{cm}^{3}")
 
 def _gradPressureMagnitude(field, data):
-    return na.sqrt(data["gradPressureX"]**2 +
+    return np.sqrt(data["gradPressureX"]**2 +
                    data["gradPressureY"]**2 +
                    data["gradPressureZ"]**2)
 add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
@@ -969,7 +1144,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
@@ -984,7 +1159,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
@@ -999,7 +1174,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
@@ -1014,7 +1189,7 @@
               units=r"\rm{g}/\rm{cm}^{4}")
 
 def _gradDensityMagnitude(field, data):
-    return na.sqrt(data["gradDensityX"]**2 +
+    return np.sqrt(data["gradDensityX"]**2 +
                    data["gradDensityY"]**2 +
                    data["gradDensityZ"]**2)
 add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
@@ -1040,7 +1215,7 @@
           units=r"\rm{s}^{-1}")
 
 def _BaroclinicVorticityMagnitude(field, data):
-    return na.sqrt(data["BaroclinicVorticityX"]**2 +
+    return np.sqrt(data["BaroclinicVorticityX"]**2 +
                    data["BaroclinicVorticityY"]**2 +
                    data["BaroclinicVorticityZ"]**2)
 add_field("BaroclinicVorticityMagnitude",
@@ -1058,7 +1233,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
                                  data["z-velocity"][1:-1,sl_left,1:-1]) \
                                  / (div_fac*data["dy"].flat[0])
@@ -1076,7 +1251,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
                                  data["x-velocity"][1:-1,1:-1,sl_left]) \
                                  / (div_fac*data["dz"].flat[0])
@@ -1094,7 +1269,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
                                  data["y-velocity"][sl_left,1:-1,1:-1]) \
                                  / (div_fac*data["dx"].flat[0])
@@ -1113,7 +1288,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityMagnitude(field, data):
-    return na.sqrt(data["VorticityX"]**2 +
+    return np.sqrt(data["VorticityX"]**2 +
                    data["VorticityY"]**2 +
                    data["VorticityZ"]**2)
 add_field("VorticityMagnitude", function=_VorticityMagnitude,
@@ -1132,7 +1307,7 @@
     add_field(n, function=eval("_%s" % n),
               validators=[ValidateSpatial(0)])
 def _VorticityStretchingMagnitude(field, data):
-    return na.sqrt(data["VorticityStretchingX"]**2 +
+    return np.sqrt(data["VorticityStretchingX"]**2 +
                    data["VorticityStretchingY"]**2 +
                    data["VorticityStretchingZ"]**2)
 add_field("VorticityStretchingMagnitude", 
@@ -1154,13 +1329,13 @@
                           ["x-velocity", "y-velocity", "z-velocity"])],
               units=r"\rm{s}^{-2}")
 def _VorticityGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityGrowthX"]**2 +
+    result = np.sqrt(data["VorticityGrowthX"]**2 +
                      data["VorticityGrowthY"]**2 +
                      data["VorticityGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1168,7 +1343,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityGrowthX"]**2 +
+    return np.sqrt(data["VorticityGrowthX"]**2 +
                    data["VorticityGrowthY"]**2 +
                    data["VorticityGrowthZ"]**2)
 add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
@@ -1180,7 +1355,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],
@@ -1213,7 +1388,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityRadPressureMagnitude(field, data):
-    return na.sqrt(data["VorticityRadPressureX"]**2 +
+    return np.sqrt(data["VorticityRadPressureX"]**2 +
                    data["VorticityRadPressureY"]**2 +
                    data["VorticityRadPressureZ"]**2)
 add_field("VorticityRadPressureMagnitude",
@@ -1238,13 +1413,13 @@
                        ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
               units=r"\rm{s}^{-1}")
 def _VorticityRPGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityRPGrowthX"]**2 +
+    result = np.sqrt(data["VorticityRPGrowthX"]**2 +
                      data["VorticityRPGrowthY"]**2 +
                      data["VorticityRPGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1252,7 +1427,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityRPGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityRPGrowthX"]**2 +
+    return np.sqrt(data["VorticityRPGrowthX"]**2 +
                    data["VorticityRPGrowthY"]**2 +
                    data["VorticityRPGrowthZ"]**2)
 add_field("VorticityRPGrowthMagnitudeABS", 
@@ -1265,7 +1440,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      SkeletonGrid, \
+      SkeletonHierarchy, \
+      SkeletonStaticOutput
+
+from .fields import \
+      SkeletonFieldInfo, \
+      add_flash_field
+
+from .io import \
+      IOHandlerSkeleton


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.hierarchy import \
+    AMRHierarchy
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+    grid = SkeletonGrid
+    float_type = np.float64
+    
+    def __init__(self, pf, data_style='skeleton'):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        # This needs to set a self.field_list that contains all the available,
+        # on-disk fields.
+        pass
+    
+    def _count_grids(self):
+        # This needs to set self.num_grids
+        pass
+        
+    def _parse_hierarchy(self):
+        # This needs to fill the following arrays, where N is self.num_grids:
+        #   self.grid_left_edge         (N, 3) <= float64
+        #   self.grid_right_edge        (N, 3) <= float64
+        #   self.grid_dimensions        (N, 3) <= int
+        #   self.grid_particle_count    (N, 1) <= int
+        #   self.grid_levels            (N, 1) <= int
+        #   self.grids                  (N, 1) <= grid objects
+        #   
+        pass
+                        
+    def _populate_grid_objects(self):
+        # For each grid, this must call:
+        #   grid._prepare_grid()
+        #   grid._setup_dx()
+        # This must also set:
+        #   grid.Children <= list of child grids
+        #   grid.Parent   <= parent grid
+        # This is handled by the frontend because often the children must be
+        # identified.
+        pass
+
+class SkeletonStaticOutput(StaticOutput):
+    _hierarchy_class = SkeletonHierarchy
+    _fieldinfo_fallback = SkeletonFieldInfo
+    _fieldinfo_known = KnownSkeletonFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='skeleton',
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+    def _set_units(self):
+        # This needs to set up the dictionaries that convert from code units to
+        # CGS.  The needed items are listed in the second entry:
+        #   self.time_units         <= sec_conversion
+        #   self.conversion_factors <= mpc_conversion
+        #   self.units              <= On-disk fields
+        pass
+
+    def _parse_parameter_file(self):
+        # This needs to set up the following items:
+        #
+        #   self.unique_identifier
+        #   self.parameters             <= full of code-specific items of use
+        #   self.domain_left_edge       <= array of float64
+        #   self.domain_right_edge      <= array of float64
+        #   self.dimensionality         <= int
+        #   self.domain_dimensions      <= array of int64
+        #   self.current_time           <= simulation time in code units
+        #
+        # We also set up cosmological information.  Set these to zero if
+        # non-cosmological.
+        #
+        #   self.cosmological_simulation    <= int, 0 or 1
+        #   self.current_redshift           <= float
+        #   self.omega_lambda               <= float
+        #   self.omega_matter               <= float
+        #   self.hubble_constant            <= float
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # This accepts a filename or a set of arguments and returns True or
+        # False depending on if the file is of the type requested.
+        return False
+
+




diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+    kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt.  This
+# construct shows how to do that.  Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Temperature": "temp",
+                    "Pressure" : "pres", 
+                    "Grav_Potential" : "gpot",
+                    "particle_position_x" : "particle_posx",
+                    "particle_position_y" : "particle_posy",
+                    "particle_position_z" : "particle_posz",
+                    "particle_velocity_x" : "particle_velx",
+                    "particle_velocity_y" : "particle_vely",
+                    "particle_velocity_z" : "particle_velz",
+                    "particle_index" : "particle_tag",
+                    "Electron_Fraction" : "elec",
+                    "HI_Fraction" : "h   ",
+                    "HD_Fraction" : "hd  ",
+                    "HeI_Fraction": "hel ",
+                    "HeII_Fraction": "hep ",
+                    "HeIII_Fraction": "hepp",
+                    "HM_Fraction": "hmin",
+                    "HII_Fraction": "hp  ",
+                    "H2I_Fraction": "htwo",
+                    "H2II_Fraction": "htwp",
+                    "DI_Fraction": "deut",
+                    "DII_Fraction": "dplu",
+                    "ParticleMass": "particle_mass",
+                    "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+    if v not in KnownSkeletonFields:
+        pfield = v.startswith("particle")
+        add_skeleton_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownSkeletonFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownSkeletonFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "skeleton"
+
+    def _read_data_set(self, grid, field):
+        # This must return the array, of size/shape grid.ActiveDimensions, that
+        # corresponds to 'field'.
+        pass
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        # If this is not implemented, the IO handler will just slice a
+        # _read_data_set item.
+        pass




diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('skeleton', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -25,8 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-
-import numpy as na
+import numpy as np
 import stat
 import weakref
 import cPickle
@@ -133,7 +132,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
@@ -148,10 +147,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -169,7 +168,7 @@
         self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         if 'particle_position' in dir(self.pf):
             self._setup_particle_grids()
@@ -300,9 +299,9 @@
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_info = np.array(self.pf.level_info)        
         self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
+        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
         self.pf.level_art_child_masks = {}
@@ -312,10 +311,10 @@
         del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
-                        na.zeros(3, dtype='int64'), # left index of PSG
+                        np.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
-                        na.zeros((1,3), dtype='int64'), # left edges of grids
-                        na.zeros((1,6), dtype='int64') # empty
+                        np.zeros((1,3), dtype='int64'), # left edges of grids
+                        np.zeros((1,6), dtype='int64') # empty
                         )
         
         self.proto_grids = [[root_psg],]
@@ -351,8 +350,8 @@
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
-            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
-                              na.log10(2))
+            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                              np.log10(2))
             hilbert_indices = _ramses_reader.get_hilbert_indices(
                                     level + base_level, left_index)
             #print base_level, hilbert_indices.max(),
@@ -361,7 +360,7 @@
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             
@@ -387,15 +386,15 @@
                 #why would we ever have non-unique octs?
                 #perhaps the hilbert ordering may visit the same
                 #oct multiple times - review only unique octs 
-                #for idomain in na.unique(ddfl[:,1]):
+                #for idomain in np.unique(ddfl[:,1]):
                 #dom_ind = ddfl[:,1] == idomain
                 #dleft_index = ddleft_index[dom_ind,:]
                 #dfl = ddfl[dom_ind,:]
                 
                 dleft_index = ddleft_index
                 dfl = ddfl
-                initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                initial_left = np.min(dleft_index, axis=0)
+                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                 #this creates a grid patch that doesn't cover the whole level
                 #necessarily, but with other patches covers all the regions
                 #with octs. This object automatically shrinks its size
@@ -425,8 +424,8 @@
                 
                 step+=1
                 pbar.update(step)
-            eff_mean = na.mean(psg_eff)
-            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_mean = np.mean(psg_eff)
+            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
             eff_nall = len(psg_eff)
             mylog.info("Average subgrid efficiency %02.1f %%",
                         eff_mean*100.0)
@@ -470,14 +469,14 @@
                 self.grid_dimensions[gi,:] = gd
                 assert na.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
-                child_mask = na.zeros(props[2,:],'uint8')
+                child_mask = np.zeros(props[2,:],'uint8')
                 amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
                     start_index,le,re,gd))
                 gi += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         
 
         if self.pf.file_particle_data:
@@ -497,7 +496,7 @@
             pbar.update(1)
             npa,npb=0,0
             npb = lspecies[-1]
-            clspecies = na.concatenate(([0,],lspecies))
+            clspecies = np.concatenate(([0,],lspecies))
             if self.pf.only_particle_type is not None:
                 npb = lspecies[0]
                 if type(self.pf.only_particle_type)==type(5):
@@ -518,16 +517,15 @@
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = na.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity  = na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            self.pf.particle_type         = np.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = np.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity  = np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
 
-            
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
             self.pf.conversion_factors['particle_mass_initial'] = 1.0
@@ -635,12 +633,11 @@
         for gi,g in enumerate(grids):    
             self.grids[gi]=g
                     
-
     def _get_grid_parents(self, grid, LE, RE):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
         return self.grids[mask]
 
     def _populate_grid_objects(self):
@@ -672,7 +669,7 @@
             g._prepare_grid()
             g._setup_dx()
         pb.finish()
-        #self.max_level = self.grid_levels.max()
+        self.max_level = self.grid_levels.max()
 
     def _setup_field_list(self):
         if self.parameter_file.use_particles:
@@ -737,7 +734,7 @@
         self.spread = spread
         
         if limit_level is None:
-            self.limit_level = na.inf
+            self.limit_level = np.inf
         else:
             limit_level = int(limit_level)
             mylog.info("Using maximum level: %i",limit_level)
@@ -813,7 +810,7 @@
         wmu = self["wmu"]
         #ng = self.domain_dimensions[0]
         #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + na.sqrt(self.omega_matter))
+        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
         #v0 = r0 / t0
         #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
         #e0 = v0**2.0
@@ -824,7 +821,7 @@
         hubble = self.hubble_constant
         ng = self.domain_dimensions[0]
         self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * na.sqrt(self.omega_matter)  #cm/s
+        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         self.t0 = self.r0/self.v0
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
@@ -861,8 +858,8 @@
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
-        self.domain_left_edge = na.zeros(3, dtype="float64")
-        self.domain_right_edge = na.ones(3, dtype="float64")
+        self.domain_left_edge = np.zeros(3, dtype="float64")
+        self.domain_right_edge = np.ones(3, dtype="float64")
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters = {}
@@ -943,10 +940,10 @@
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
         # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
+        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
         # integrand_arr = integrand(spacings)
-        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
         # self.current_time *= self.hubble_time
         self.current_time = b2t(self.current_time_raw)*1.0e9*365*3600*24         
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
@@ -955,7 +952,7 @@
         
         Om0 = self.parameters['Om0']
         hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * na.sqrt(Om0)
+        dummy = 100.0 * hubble * np.sqrt(Om0)
         ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
         boxh = header_vals['boxh'] 
@@ -967,7 +964,7 @@
         self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
         #velocity velocity units in km/s
         self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                na.sqrt(self.parameters["Om0"])
+                np.sqrt(self.parameters["Om0"])
         #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
         self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
         rho0 = self.parameters["rho0"]
@@ -988,10 +985,10 @@
     
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = int(na.rint(self.ncell**(1.0/3.0)))
+        est = int(np.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64')*est 
+        self.domain_dimensions = np.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
         #_skip_record(f) # iOctCh
@@ -1059,10 +1056,10 @@
         seek_extras = 137
         fh.seek(seek_extras)
         n = self.parameters['Nspecies']
-        self.parameters['wspeciesf'] = na.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspeciesf'] = na.fromfile(fh,dtype='>i',count=10)
-        assert na.all(self.parameters['lspeciesf'][n:]==0.0)
-        assert na.all(self.parameters['wspeciesf'][n:]==0.0)
+        self.parameters['wspeciesf'] = np.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspeciesf'] = np.fromfile(fh,dtype='>i',count=10)
+        assert np.all(self.parameters['lspeciesf'][n:]==0.0)
+        assert np.all(self.parameters['wspeciesf'][n:]==0.0)
         self.parameters['wspecies'] = self.parameters['wspeciesf'][:n]
         self.parameters['lspecies'] = self.parameters['lspeciesf'][:n]
         fh.close()


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -42,7 +42,7 @@
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as na
+import numpy as np
 
 #these are just the hydro fields
 known_art_fields = [ 'Density','TotalEnergy',
@@ -179,7 +179,7 @@
     #if data.id==460:
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
-    #assert na.all(na.isfinite(tr))
+    #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
     #x = data.pf.conversion_factors["Temperature"]
@@ -252,8 +252,8 @@
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
     if na.sum(idx)>0:
-        tr /= na.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
-        tr *= na.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
+        tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
         print tr.shape
         return tr
     else:


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import struct
 
 import os
@@ -93,9 +93,9 @@
         f.seek(self.level_offsets[level])
         ncells = 8*self.level_info[level]
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
-        arr = na.fromfile(f, dtype='>f', count=nvals)
+        arr = np.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
         if field==None:
             self.level_data[level] = arr.astype('float32')
@@ -108,13 +108,13 @@
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
+        hvar = np.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
-        na.fromfile(f,dtype='>i',count=2) #throw away the pads
+        np.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
+        var = np.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
-        arr = na.concatenate((hvar,var))
+        arr = np.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
@@ -189,10 +189,10 @@
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
             return tr.swapaxes(0, 2).astype("float64")
-        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
-        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        filled = np.zeros(grid.ActiveDimensions, dtype='uint8')
         to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
@@ -219,9 +219,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -263,20 +263,20 @@
     #fortran indices start at 1
     
     #Skip all the oct hierarchy data
-    le     = na.zeros((nLevel,3),dtype='int64')
-    fl     = na.ones((nLevel,6),dtype='int64')
-    iocts  = na.zeros(nLevel+1,dtype='int64')
+    le     = np.zeros((nLevel,3),dtype='int64')
+    fl     = np.ones((nLevel,6),dtype='int64')
+    iocts  = np.zeros(nLevel+1,dtype='int64')
     idxa,idxb = 0,0
     chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
     left = nLevel
     while left > 0 :
         this_chunk = min(chunk,left)
         idxb=idxa+this_chunk
-        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data = np.fromfile(f,dtype='>i',count=this_chunk*15)
         data=data.reshape(this_chunk,15)
         left-=this_chunk
         le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        fl[idxa:idxb,1] = np.arange(idxa,idxb)
         #pad byte is last, LL2, then ioct right before it
         iocts[idxa:idxb] = data[:,-3] 
         idxa=idxa+this_chunk
@@ -293,12 +293,12 @@
     #now correct iocts for fortran indices start @ 1
     iocts = iocts-1
 
-    assert na.unique(iocts).shape[0] == nLevel
+    assert np.unique(iocts).shape[0] == nLevel
     
     #ioct tries to access arrays much larger than le & fl
     #just make sure they appear in the right order, skipping
     #the empty space in between
-    idx = na.argsort(iocts)
+    idx = np.argsort(iocts)
     
     #now rearrange le & fl in order of the ioct
     le = le[idx]
@@ -320,7 +320,7 @@
     #now read the hvars and vars arrays
     #we are looking for iOctCh
     #we record if iOctCh is >0, in which it is subdivided
-    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
     
     
     
@@ -334,9 +334,9 @@
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
-    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
-    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    f = np.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = np.vsplit(np.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
 def read_stars(file):
@@ -358,8 +358,8 @@
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
     nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
-    ioctch = na.zeros(nLevel,dtype='uint8')
-    idc = na.zeros(nLevel,dtype='int32')
+    ioctch = np.zeros(nLevel,dtype='uint8')
+    idc = np.zeros(nLevel,dtype='int32')
     
     chunk = long(1e6)
     left = nLevel
@@ -368,9 +368,9 @@
     while left > 0:
         chunk = min(chunk,left)
         b += chunk
-        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = np.fromfile(f, dtype='>i', count=chunk*width)
         arr = arr.reshape((width, chunk), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
         ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
         #zero in the mask means there is refinement available
@@ -380,12 +380,12 @@
     return idc,ioctch
     
 nchem=8+2
-dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
                 ",>%sf4"%(2)+",>i4")
 def _read_art_child(f, level_child_offsets,level,nLevel,field):
     pos=f.tell()
     f.seek(level_child_offsets[level])
-    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = np.fromfile(f, dtype='>f', count=nLevel * 8)
     arr = arr.reshape((nLevel,16), order="F")
     arr = arr[3:-1,:].astype("float64")
     f.seek(pos)
@@ -398,8 +398,8 @@
 
 def _read_frecord(f,fmt):
     s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    count = s1/na.dtype(fmt).itemsize
-    ss = na.fromfile(f,fmt,count=count)
+    count = s1/np.dtype(fmt).itemsize
+    ss = np.fromfile(f,fmt,count=count)
     s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
     assert s1==s2
     return ss
@@ -432,14 +432,14 @@
 
 #All of these functions are to convert from hydro time var to 
 #proper time
-sqrt = na.sqrt
-sign = na.sign
+sqrt = np.sqrt
+sign = np.sign
 
 def find_root(f,a,b,tol=1e-6):
     c = (a+b)/2.0
-    last = -na.inf
+    last = -np.inf
     assert(sign(f(a)) != sign(f(b)))  
-    while na.abs(f(c)-last) > tol:
+    while np.abs(f(c)-last) > tol:
         last=f(c)
         if sign(last)==sign(f(b)):
             b=c
@@ -449,9 +449,9 @@
     return c
 
 def quad(fintegrand,xmin,xmax,n=1e4):
-    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    spacings = np.logspace(np.log10(xmin),np.log10(xmax),n)
     integrand_arr = fintegrand(spacings)
-    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    val = np.trapz(integrand_arr,dx=np.diff(spacings))
     return val
 
 def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
@@ -476,14 +476,14 @@
     integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
     #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
     current_time = quad(integrand,1e-4,at)
-    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #spacings = np.logspace(-5,np.log10(at),1e5)
     #integrand_arr = integrand(spacings)
-    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
     current_time *= 9.779/h
     return current_time
 
 def b2t(tb,n = 1e2,logger=None,**kwargs):
-    tb = na.array(tb)
+    tb = np.array(tb)
     if type(tb) == type(1.1): 
         return a2t(b2a(tb))
     if tb.shape == (): 
@@ -491,14 +491,14 @@
     if len(tb) < n: n= len(tb)
     age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
     age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
-    tbs  = -1.*na.logspace(na.log10(-tb.min()),
-                          na.log10(-tb.max()),n)
+    tbs  = -1.*np.logspace(np.log10(-tb.min()),
+                          np.log10(-tb.max()),n)
     ages = []
     for i,tbi in enumerate(tbs):
         ages += a2t(b2a(tbi)),
         if logger: logger(i)
-    ages = na.array(ages)
-    fb2t = na.interp(tb,tbs,ages)
+    ages = np.array(ages)
+    fb2t = np.interp(tb,tbs,ages)
     #fb2t = interp1d(tbs,ages)
     return fb2t
 




diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/athena/api.py
--- /dev/null
+++ b/yt/frontends/athena/api.py
@@ -0,0 +1,42 @@
+"""
+API for yt.frontends.athena
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+from .data_structures import \
+      AthenaGrid, \
+      AthenaHierarchy, \
+      AthenaStaticOutput
+
+from .fields import \
+      AthenaFieldInfo, \
+      KnownAthenaFields, \
+      add_athena_field
+
+from .io import \
+      IOHandlerAthena


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/athena/data_structures.py
--- /dev/null
+++ b/yt/frontends/athena/data_structures.py
@@ -0,0 +1,356 @@
+"""
+Data structures for Athena.
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import numpy as np
+import weakref
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+           AMRGridPatch
+from yt.data_objects.hierarchy import \
+           AMRHierarchy
+from yt.data_objects.static_output import \
+           StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+from .fields import AthenaFieldInfo, KnownAthenaFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+
+def _get_convert(fname):
+    def _conv(data):
+        return data.convert(fname)
+    return _conv
+
+class AthenaGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level, start, dimensions):
+        df = hierarchy.storage_filename
+        if 'id0' not in hierarchy.parameter_file.filename:
+            gname = hierarchy.parameter_file.filename
+        else:
+            if id == 0:
+                gname = 'id0/%s.vtk' % df
+            else:
+                gname = 'id%i/%s-id%i%s.vtk' % (id, df[:-5], id, df[-5:] )
+        AMRGridPatch.__init__(self, id, filename = gname,
+                              hierarchy = hierarchy)
+        self.filename = gname
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.pf.refine_by
+        else:
+            LE, RE = self.hierarchy.grid_left_edge[id,:], \
+                     self.hierarchy.grid_right_edge[id,:]
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = 1.0
+        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+def parse_line(line, grid):
+    # grid is a dictionary
+    splitup = line.strip().split()
+    if "vtk" in splitup:
+        grid['vtk_version'] = splitup[-1]
+    elif "Really" in splitup:
+        grid['time'] = splitup[-1]
+    elif any(x in ['PRIMITIVE','CONSERVED'] for x in splitup):
+        grid['time'] = float(splitup[4].rstrip(','))
+        grid['level'] = int(splitup[6].rstrip(','))
+        grid['domain'] = int(splitup[8].rstrip(','))
+    elif "DIMENSIONS" in splitup:
+        grid['dimensions'] = np.array(splitup[-3:]).astype('int')
+    elif "ORIGIN" in splitup:
+        grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
+    elif "SPACING" in splitup:
+        grid['dds'] = np.array(splitup[-3:]).astype('float64')
+    elif "CELL_DATA" in splitup:
+        grid["ncells"] = int(splitup[-1])
+    elif "SCALARS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'scalar'
+    elif "VECTORS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'vector'
+
+class AthenaHierarchy(AMRHierarchy):
+
+    grid = AthenaGrid
+    _data_style='athena'
+    
+    def __init__(self, pf, data_style='athena'):
+        self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
+        # for now, the hierarchy file is the parameter file!
+        self.storage_filename = self.parameter_file.storage_filename
+        self.hierarchy_filename = self.parameter_file.filename
+        #self.directory = os.path.dirname(self.hierarchy_filename)
+        self._fhandle = file(self.hierarchy_filename,'rb')
+        AMRHierarchy.__init__(self, pf, data_style)
+
+        self._fhandle.close()
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        field_map = {}
+        f = open(self.hierarchy_filename,'rb')
+        line = f.readline()
+        while line != '':
+            splitup = line.strip().split()
+            if "DIMENSIONS" in splitup:
+                grid_dims = np.array(splitup[-3:]).astype('int')
+                line = f.readline()
+            elif "CELL_DATA" in splitup:
+                grid_ncells = int(splitup[-1])
+                line = f.readline()
+                if np.prod(grid_dims) != grid_ncells:
+                    grid_dims -= 1
+                    grid_dims[grid_dims==0]=1
+                if np.prod(grid_dims) != grid_ncells:
+                    mylog.error('product of dimensions %i not equal to number of cells %i' %
+                          (np.prod(grid_dims), grid_ncells))
+                    raise TypeError
+                break
+            else:
+                line = f.readline()
+        read_table = False
+        read_table_offset = f.tell()
+        while line != '':
+            splitup = line.strip().split()
+            if 'SCALARS' in splitup:
+                field = splitup[1]
+                if not read_table:
+                    line = f.readline() # Read the lookup table line
+                    read_table = True
+                field_map[field] = ('scalar', f.tell() - read_table_offset)
+                read_table=False
+
+            elif 'VECTORS' in splitup:
+                field = splitup[1]
+                for ax in 'xyz':
+                    field_map["%s_%s" % (field, ax)] =\
+                            ('vector', f.tell() - read_table_offset)
+            line = f.readline()
+
+        f.close()
+
+        self.field_list = field_map.keys()
+        self._field_map = field_map
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = self.parameter_file.nvtk
+
+    def _parse_hierarchy(self):
+        f = open(self.hierarchy_filename,'rb')
+        grid = {}
+        grid['read_field'] = None
+        grid['read_type'] = None
+        table_read=False
+        line = f.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            line = f.readline()
+        f.close()
+
+        # It seems some datasets have a mismatch between ncells and 
+        # the actual grid dimensions.
+        if np.prod(grid['dimensions']) != grid['ncells']:
+            grid['dimensions'] -= 1
+            grid['dimensions'][grid['dimensions']==0]=1
+        if np.prod(grid['dimensions']) != grid['ncells']:
+            mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                  (np.prod(grid['dimensions']), grid['ncells']))
+            raise TypeError
+
+        dxs=[]
+        self.grids = np.empty(self.num_grids, dtype='object')
+        levels = np.zeros(self.num_grids, dtype='int32')
+        single_grid_width = grid['dds']*grid['dimensions']
+        grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
+        glis = np.empty((self.num_grids,3), dtype='int64')
+        for i in range(self.num_grids):
+            procz = i/(grids_per_dim[0]*grids_per_dim[1])
+            procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
+            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[0]
+            glis[i, 0] = procx*grid['dimensions'][0]
+            glis[i, 1] = procy*grid['dimensions'][1]
+            glis[i, 2] = procz*grid['dimensions'][2]
+        gdims = np.ones_like(glis)
+        gdims[:] = grid['dimensions']
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+
+            dx = (self.parameter_file.domain_right_edge-
+                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+            dx = dx/self.parameter_file.refine_by**(levels[i])
+            dxs.append(grid['dds'])
+        dx = np.array(dxs)
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
+        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+#     def _setup_derived_fields(self):
+#         self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+class AthenaStaticOutput(StaticOutput):
+    _hierarchy_class = AthenaHierarchy
+    _fieldinfo_fallback = AthenaFieldInfo
+    _fieldinfo_known = KnownAthenaFields
+    _data_style = "athena"
+
+    def __init__(self, filename, data_style='athena',
+                 storage_filename = None, parameters = {}):
+        self.specified_parameters = parameters
+        StaticOutput.__init__(self, filename, data_style)
+        self.filename = filename
+        self.storage_filename = filename[4:-4]
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+
+    def _setup_nounits_units(self):
+        self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+    def _parse_parameter_file(self):
+        self._handle = open(self.parameter_filename, "rb")
+        # Read the start of a grid to get simulation parameters.
+        grid = {}
+        grid['read_field'] = None
+        line = self._handle.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            line = self._handle.readline()
+
+        self.domain_left_edge = grid['left_edge']
+        if 'domain_right_edge' in self.specified_parameters:
+            self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
+        else:
+            mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
+                    "if it is not equal to -domain_left_edge.")
+            self.domain_right_edge = -self.domain_left_edge
+        self.domain_width = self.domain_right_edge-self.domain_left_edge
+        self.domain_dimensions = self.domain_width/grid['dds']
+        refine_by = None
+        if refine_by is None: refine_by = 2
+        self.refine_by = refine_by
+        self.dimensionality = 3
+        self.current_time = grid["time"]
+        self.unique_identifier = self._handle.__hash__()
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.field_ordering = 'fortran'
+        self.boundary_conditions = [1]*6
+
+        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        self._handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if 'vtk' in args[0]:
+                return True
+        except:
+            pass
+        return False
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/athena/definitions.py
--- /dev/null
+++ b/yt/frontends/athena/definitions.py
@@ -0,0 +1,25 @@
+"""
+Various definitions for various other modules and routines
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/athena/fields.py
--- /dev/null
+++ b/yt/frontends/athena/fields.py
@@ -0,0 +1,88 @@
+"""
+Athena-specific fields
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
+import yt.data_objects.universal_fields
+
+log_translation_dict = {}
+
+translation_dict = {"Density": "density",
+                    "Pressure": "pressure",
+                    "x-velocity": "velocity_x",
+                    "y-velocity": "velocity_y",
+                    "z-velocity": "velocity_z",
+                    "mag_field_x": "cell_centered_B_x ",
+                    "mag_field_y": "cell_centered_B_y ",
+                    "mag_field_z": "cell_centered_B_z "}
+
+AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = AthenaFieldInfo.add_field
+
+KnownAthenaFields = FieldInfoContainer()
+add_athena_field = KnownAthenaFields.add_field
+
+add_athena_field("density", function=NullFunc, take_log=False,
+          units=r"",
+          projected_units =r"")
+
+add_athena_field("pressure", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_x", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_y", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_z", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
+          units=r"")
+
+for f,v in log_translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=True)
+
+for f,v in translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=False)
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/athena/io.py
--- /dev/null
+++ b/yt/frontends/athena/io.py
@@ -0,0 +1,107 @@
+"""
+The data-file handling functions
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+from yt.utilities.io_handler import \
+           BaseIOHandler
+import numpy as np
+
+class IOHandlerAthena(BaseIOHandler):
+    _data_style = "athena"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+    _read_table_offset = None
+
+    def _field_dict(self,fhandle):
+        keys = fhandle['field_types'].keys()
+        val = fhandle['field_types'].keys()
+        return dict(zip(keys,val))
+
+    def _read_field_names(self,grid):
+        pass
+
+    def _read_data_set(self,grid,field):
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = np.prod(grid.ActiveDimensions)
+        grid_dims = grid.ActiveDimensions
+        read_table_offset = get_read_table_offset(f)
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = np.fromfile(f, dtype='>f4',
+                    count=grid_ncells).reshape(grid_dims,order='F').copy()
+        if dtype == 'vector':
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid_dims,order='F').copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid_dims,order='F').copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid_dims,order='F').copy()
+        f.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        if grid.pf.field_ordering == 1:
+            sl.reverse()
+
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = np.prod(grid.ActiveDimensions)
+
+        read_table_offset = get_read_table_offset(f)
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = np.fromfile(f, dtype='>f4', 
+                    count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        if dtype == 'vector':
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        f.close()
+        return data
+
+def get_read_table_offset(f):
+    line = f.readline()
+    while True:
+        splitup = line.strip().split()
+        if 'CELL_DATA' in splitup:
+            f.readline()
+            read_table_offset = f.tell()
+            break
+        line = f.readline()
+    return read_table_offset
+
+




diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/athena/setup.py
--- /dev/null
+++ b/yt/frontends/athena/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('athena', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -101,18 +101,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
@@ -174,12 +167,12 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #na.array(map(int, self._global_header_lines[counter].split()))
+        #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         #domain_re.search(self._global_header_lines[counter]).groups()
@@ -187,9 +180,9 @@
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -273,8 +266,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                                        level, gfn, gfo, dims,
@@ -296,7 +289,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
 
         self.field_list += castro_particle_field_names[:]
@@ -311,7 +304,7 @@
 
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = na.fromiter((int(i)
+        grid_info = np.fromiter((int(i)
                                  for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
@@ -347,15 +340,15 @@
         self._dtype = dtype
 
     def _calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.fullplotdir
 
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
@@ -367,9 +360,9 @@
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids * [self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
                                   for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
@@ -384,9 +377,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -405,7 +398,7 @@
             grid._setup_dx()
 
     def _setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -424,10 +417,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -439,7 +432,7 @@
             except:
                 continue
 
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
 
         for field in self.field_list:
@@ -473,11 +466,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -620,9 +613,9 @@
                     else:
                         self.parameters[paramName] = t
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals.split()])
+                self.domain_right_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals.split()])
+                self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 from yt.utilities.lib import \
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
             castro_particle_field_names.index(field),
             len(castro_particle_field_names),
@@ -85,8 +85,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
+            start = np.array(map(int, start.split(',')))
+            stop = np.array(map(int, stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -126,7 +126,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
+        field = np.fromfile(inFile, count=nElements, dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
      defaultdict
@@ -81,25 +81,16 @@
         if self.Parent == []:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(AMRHierarchy):
@@ -137,18 +128,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py                                                                                                             
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
@@ -176,14 +167,16 @@
         # 'Chombo_global'
         levels = f.keys()[1:]
         grids = []
+        self.dds_list = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
             for level_id, box in enumerate(boxes):
-                si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
                 pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
                 grids.append(pg)
@@ -193,9 +186,9 @@
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-#        self.grids = na.array(self.grids, dtype='object')
+#        self.grids = np.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -212,7 +205,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
@@ -316,21 +309,21 @@
     def __calc_left_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         fileh.close()
         return LE
 
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
                   
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
-        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         return R_index - L_index
  
     @classmethod


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,7 +33,7 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-import numpy as na
+import numpy as np
 
 KnownChomboFields = FieldInfoContainer()
 add_chombo_field = KnownChomboFields.add_field
@@ -98,6 +98,21 @@
 add_field("Density",function=_Density, take_log=True,
           units=r'\rm{g}/\rm{cm^3}')
 
+def _Bx(field,data):
+    return data["X-magnfield"]
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(field,data):
+    return data["Y-magnfield"]
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(field,data):
+    return data["Z-magnfield"]
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +
             data["Y-magnfield"]**2 +
@@ -131,7 +146,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
         


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,7 +25,7 @@
 """
 import h5py
 import re
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -108,4 +108,4 @@
                     if ( (grid.LeftEdge < coord).all() and
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import weakref
-import numpy as na
+import numpy as np
 import os
 import stat
 import string
@@ -90,7 +90,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -179,7 +179,7 @@
                 if self.pf.field_info[field].particle_type: continue
                 temp = self.hierarchy.io._read_raw_data_set(self, field)
                 temp = temp.swapaxes(0, 2)
-                cube.field_data[field] = na.multiply(temp, conv_factor, temp)[sl]
+                cube.field_data[field] = np.multiply(temp, conv_factor, temp)[sl]
         return cube
 
 class EnzoHierarchy(AMRHierarchy):
@@ -291,7 +291,7 @@
         f = open(self.hierarchy_filename, "rb")
         self.grids = [self.grid(1, self)]
         self.grids[0].Level = 0
-        si, ei, LE, RE, fn, np = [], [], [], [], [], []
+        si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
         pbar = get_pbar("Parsing Hierarchy", self.num_grids)
         for grid_id in xrange(self.num_grids):
@@ -304,29 +304,29 @@
             nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
             fn.append(["-1"])
             if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
-            np.append(int(_next_token_line("NumberOfParticles", f)[0]))
-            if nb == 0 and np[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
+            npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
+            if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
             for line in f:
                 if len(line) < 2: break
                 if line.startswith("Pointer:"):
                     vv = patt.findall(line)[0]
                     self.__pointer_handler(vv)
         pbar.finish()
-        self._fill_arrays(ei, si, LE, RE, np)
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        self._fill_arrays(ei, si, LE, RE, npart)
+        temp_grids = np.empty(self.num_grids, dtype='object')
         temp_grids[:] = self.grids
         self.grids = temp_grids
         self.filenames = fn
         self._store_binary_hierarchy()
         t2 = time.time()
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions.flat[:] = ei
-        self.grid_dimensions -= na.array(si, self.float_type)
+        self.grid_dimensions -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge.flat[:] = LE
         self.grid_right_edge.flat[:] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
 
     def __pointer_handler(self, m):
         sgi = int(m[2])-1
@@ -379,7 +379,7 @@
             if Pid > -1:
                 grids[Pid-1]._children_ids.append(grid.id)
             self.filenames.append(pmap[P])
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
         f.close()
         mylog.info("Finished with binary hierarchy reading")
         return True
@@ -408,9 +408,9 @@
             procs.append(int(self.filenames[i][0][-4:]))
             levels.append(g.Level)
 
-        parents = na.array(parents, dtype='int64')
-        procs = na.array(procs, dtype='int64')
-        levels = na.array(levels, dtype='int64')
+        parents = np.array(parents, dtype='int64')
+        procs = np.array(procs, dtype='int64')
+        levels = np.array(levels, dtype='int64')
         f.create_dataset("/ParentIDs", data=parents)
         f.create_dataset("/Processor", data=procs)
         f.create_dataset("/Level", data=levels)
@@ -425,7 +425,7 @@
         mylog.info("Rebuilding grids on level %s", level)
         cmask = (self.grid_levels.flat == (level + 1))
         cmsum = cmask.sum()
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         for grid in self.select_grids(level):
             mask[:] = 0
             LE = self.grid_left_edge[grid.id - grid._id_offset]
@@ -441,11 +441,13 @@
         mylog.info("Finished rebuilding")
 
     def _populate_grid_objects(self):
+        reconstruct = ytcfg.getboolean("yt","reconstruct_hierarchy")
         for g,f in izip(self.grids, self.filenames):
             g._prepare_grid()
             g._setup_dx()
             g.set_filename(f[0])
-            #if g.Parent is not None: g._guess_properties_from_parent()
+            if reconstruct:
+                if g.Parent is not None: g._guess_properties_from_parent()
         del self.filenames # No longer needed.
         self.max_level = self.grid_levels.max()
 
@@ -475,20 +477,20 @@
 
     def _generate_random_grids(self):
         if self.num_grids > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(self.grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
             # We also add in a bit to make sure that some of the grids have
             # particles
             gwp = self.grid_particle_count > 0
-            if na.any(gwp) and not na.any(gwp[(random_sample,)]):
+            if np.any(gwp) and not np.any(gwp[(random_sample,)]):
                 # We just add one grid.  This is not terribly efficient.
-                first_grid = na.where(gwp)[0][0]
+                first_grid = np.where(gwp)[0][0]
                 random_sample.resize((21,))
                 random_sample[-1] = first_grid
                 mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(self.grids),1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
@@ -516,7 +518,7 @@
         pstore = []
         for level in range(self.max_level, -1, -1):
             for grid in self.select_grids(level):
-                index = na.where(grid['particle_type'] == ptype)[0]
+                index = np.where(grid['particle_type'] == ptype)[0]
                 total += len(index)
                 pstore.append(index)
                 if total >= max_num: break
@@ -525,7 +527,7 @@
         if total > 0:
             result = {}
             for p in pfields:
-                result[p] = na.zeros(total, 'float64')
+                result[p] = np.zeros(total, 'float64')
             # Now we retrieve data for each field
             ig = count = 0
             for level in range(self.max_level, -1, -1):
@@ -588,7 +590,7 @@
                 grids[pid-1]._children_ids.append(grids[-1].id)
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for i, grid in enumerate(grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -599,7 +601,7 @@
 
     def _initialize_grid_arrays(self):
         EnzoHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def _copy_hierarchy_structure(self):
         # Dimensions are important!
@@ -636,35 +638,35 @@
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(my_grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(my_grids)-1:20j].astype("int32")
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
         return my_grids[(random_sample,)]
 
 class EnzoHierarchy1D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions[:,:1] = ei
-        self.grid_dimensions[:,:1] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:1] = LE
         self.grid_right_edge[:,:1] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         self.grid_left_edge[:,1:] = 0.0
         self.grid_right_edge[:,1:] = 1.0
         self.grid_dimensions[:,1:] = 1
 
 class EnzoHierarchy2D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions[:,:2] = ei
-        self.grid_dimensions[:,:2] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:2] = LE
         self.grid_right_edge[:,:2] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         self.grid_left_edge[:,2] = 0.0
         self.grid_right_edge[:,2] = 1.0
         self.grid_dimensions[:,2] = 1
@@ -700,39 +702,22 @@
         StaticOutput.__init__(self, filename, data_style, file_style=file_style)
         if "InitialTime" not in self.parameters:
             self.current_time = 0.0
-        rp = os.path.join(self.directory, "rates.out")
-        if os.path.exists(rp):
-            try:
-                self.rates = EnzoTable(rp, rates_out_key)
-            except:
-                pass
-        cp = os.path.join(self.directory, "cool_rates.out")
-        if os.path.exists(cp):
-            try:
-                self.cool = EnzoTable(cp, cool_out_key)
-            except:
-                pass
-
-        # Now fixes for different types of Hierarchies
-        # This includes changing the fieldinfo class!
-        if self["TopGridRank"] == 1: self._setup_1d()
-        elif self["TopGridRank"] == 2: self._setup_2d()
 
     def _setup_1d(self):
         self._hierarchy_class = EnzoHierarchy1D
         self._fieldinfo_fallback = Enzo1DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
+            np.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
-            na.concatenate([[self.domain_right_edge], [1.0, 1.0]])
+            np.concatenate([[self.domain_right_edge], [1.0, 1.0]])
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
         self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([self["DomainLeftEdge"], [0.0]])
+            np.concatenate([self.domain_left_edge, [0.0]])
         self.domain_right_edge = \
-            na.concatenate([self["DomainRightEdge"], [1.0]])
+            np.concatenate([self.domain_right_edge, [1.0]])
 
     def get_parameter(self,parameter,type=None):
         """
@@ -825,7 +810,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
@@ -840,17 +825,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         self.current_time = self.parameters["InitialTime"]
         # To be enabled when we can break old pickles:
@@ -868,6 +853,11 @@
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
+        if self.dimensionality == 1:
+            self._setup_1d()
+        elif self.dimensionality == 2:
+            self._setup_2d()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -935,7 +925,7 @@
         with fortran code.
         """
         k = {}
-        k["utim"] = 2.52e17/na.sqrt(self.omega_matter)\
+        k["utim"] = 2.52e17/np.sqrt(self.omega_matter)\
                        / self.hubble_constant \
                        / (1+self.parameters["CosmologyInitialRedshift"])**1.5
         k["urho"] = 1.88e-29 * self.omega_matter \
@@ -947,8 +937,8 @@
                (1.0 + self.current_redshift)
         k["uaye"] = 1.0/(1.0 + self.parameters["CosmologyInitialRedshift"])
         k["uvel"] = 1.225e7*self.parameters["CosmologyComovingBoxSize"] \
-                      *na.sqrt(self.omega_matter) \
-                      *na.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
+                      *np.sqrt(self.omega_matter) \
+                      *np.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
         k["utem"] = 1.88e6 * (self.parameters["CosmologyComovingBoxSize"]**2) \
                       * self.omega_matter \
                       * (1.0 + self.parameters["CosmologyInitialRedshift"])
@@ -988,7 +978,7 @@
         self.conversion_factors.update(enzo.conversion_factors)
         for i in self.parameters:
             if isinstance(self.parameters[i], types.TupleType):
-                self.parameters[i] = na.array(self.parameters[i])
+                self.parameters[i] = np.array(self.parameters[i])
             if i.endswith("Units") and not i.startswith("Temperature"):
                 dataType = i[:-5]
                 self.conversion_factors[dataType] = self.parameters[i]
@@ -996,7 +986,7 @@
         self.domain_right_edge = self.parameters["DomainRightEdge"].copy()
         for i in self.conversion_factors:
             if isinstance(self.conversion_factors[i], types.TupleType):
-                self.conversion_factors[i] = na.array(self.conversion_factors[i])
+                self.conversion_factors[i] = np.array(self.conversion_factors[i])
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
         for p, v in self._conversion_override.items():


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
@@ -171,23 +171,29 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+
+def _TotalEnergy(field, data):
+    return data["Total_Energy"] / _convertEnergy(data)
+add_field("TotalEnergy", function=_TotalEnergy,
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
     # We can assume that we at least have Density
     # We should actually be guaranteeing the presence of a .shape attribute,
     # but I am not currently implementing that
-    fieldData = na.zeros(data["Density"].shape,
+    fieldData = np.zeros(data["Density"].shape,
                          dtype = data["Density"].dtype)
     if data.pf["MultiSpecies"] == 0:
         if data.has_field_parameter("mu"):
@@ -243,7 +249,7 @@
 KnownEnzoFields["z-velocity"].projection_conversion='1'
 
 def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
+    return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
     f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
@@ -318,39 +324,39 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
     else:
-        filter = na.ones(data.NumberOfParticles, dtype='bool')
+        filter = np.ones(data.NumberOfParticles, dtype='bool')
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
@@ -361,28 +367,28 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           particle_field_data.astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           top, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           particle_field_data.astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           top, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           data["particle_mass"].astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           bottom, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           data["particle_mass"].astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           bottom, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -400,30 +406,30 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
     particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          particle_field_data.astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          top, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          particle_field_data.astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          top, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          data["particle_mass"][filter].astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          bottom, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          data["particle_mass"][filter].astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          bottom, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -460,7 +466,7 @@
           projection_conversion="1")
 
 def _StarAge(field, data):
-    star_age = na.zeros(data['StarCreationTimeYears'].shape)
+    star_age = np.zeros(data['StarCreationTimeYears'].shape)
     with_stars = data['StarCreationTimeYears'] > 0
     star_age[with_stars] = data.pf.time_units['years'] * \
         data.pf.current_time - \
@@ -479,9 +485,9 @@
 def _Bmag(field, data):
     """ magnitude of bvec
     """
-    return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
+    return np.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
 # Particle functions
 
@@ -489,7 +495,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         try:
             return io._read_data_set(data, p_field).astype(dtype)
         except io._read_exception:
@@ -549,13 +555,13 @@
 def _convertParticleMass(data):
     return data.convert("Density")*(data.convert("cm")**3.0)
 def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
     return cf
 def _convertParticleMassMsun(data):
     return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
 def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
     return cf
 add_field("ParticleMass",
@@ -578,7 +584,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']
     return data["dx"]*data["dy"]
@@ -600,11 +606,10 @@
         Enzo2DFieldInfo["CellArea%s" % a]
 
 def _zvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_2d_field("z-velocity", function=_zvel)
 
-
 #
 # Now we do overrides for 1D fields
 #
@@ -632,7 +637,7 @@
         Enzo1DFieldInfo["CellLength%s" % a]
 
 def _yvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -25,7 +25,7 @@
 
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import glob
 import os
 
@@ -236,8 +236,8 @@
             else:
                 my_final_time = self.final_time
 
-            my_times = na.array(map(lambda a:a['time'], my_all_outputs))
-            my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+            my_times = np.array(map(lambda a:a['time'], my_all_outputs))
+            my_indices = np.digitize([my_initial_time, my_final_time], my_times)
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
@@ -294,7 +294,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         self.refine_by = self.parameters["RefineBy"]
         self.dimensionality = self.parameters["TopGridRank"]
@@ -303,17 +303,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         if self.parameters["ComovingCoordinates"]:
             cosmo_attr = {'box_size': 'CosmologyComovingBoxSize',
@@ -374,7 +374,7 @@
                     current_time * self.enzo_cosmology.TimeUnits)
 
             self.all_time_outputs.append(output)
-            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            if np.abs(self.final_time - current_time) / self.final_time < 1e-4: break
             current_time += self.parameters['dtDataDump']
             index += 1
 
@@ -476,8 +476,8 @@
         self.parameters['RedshiftDumpDir'] = "RD"
         self.parameters['ComovingCoordinates'] = 0
         self.parameters['TopGridRank'] = 3
-        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
-        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
         self.parameters['Refineby'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
@@ -491,7 +491,7 @@
 
         self.time_units = {}
         if self.cosmological_simulation:
-            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+            self.parameters['TimeUnits'] = 2.52e17 / np.sqrt(self.omega_matter) \
                 / self.hubble_constant / (1 + self.initial_redshift)**1.5
         self.time_units['1'] = 1.
         self.time_units['seconds'] = self.parameters['TimeUnits']
@@ -586,8 +586,8 @@
             outputs = self.all_outputs
         my_outputs = []
         for value in values:
-            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
-            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+            outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
+            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
                     and outputs[0] not in my_outputs:
                 my_outputs.append(outputs[0])
             else:
@@ -649,7 +649,7 @@
 
         """
 
-        times = na.array(times) / self.time_units[time_units]
+        times = np.array(times) / self.time_units[time_units]
         return self._get_outputs_by_key('time', times, tolerance=tolerance,
                                         outputs=outputs)
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import stat
-import numpy as na
+import numpy as np
 import weakref
 
 from yt.funcs import *
@@ -39,10 +39,10 @@
     mpc_conversion, sec_conversion
 from yt.utilities.io_handler import \
     io_registry
-
+from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
-     ValidateDataField
+     ValidateDataField, TranslationFunc
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -70,7 +70,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -123,36 +123,39 @@
             self.grid_particle_count[:] = f["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
-        self._particle_indices = na.zeros(self.num_grids + 1, dtype='int64')
-        na.add.accumulate(self.grid_particle_count.squeeze(),
+        self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
+        np.add.accumulate(self.grid_particle_count.squeeze(),
                           out=self._particle_indices[1:])
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
         self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i+1, self, self.grid_levels[i,0])
         
 
         # This is a possibly slow and verbose fix, and should be re-examined!
-        rdx = (self.parameter_file.domain_right_edge -
-                self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+        rdx = (self.parameter_file.domain_width /
+                self.parameter_file.domain_dimensions)
         nlevels = self.grid_levels.max()
-        dxs = na.zeros((nlevels+1,3),dtype='float64')
+        dxs = np.ones((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
-            dxs[i] = rdx/self.parameter_file.refine_by**i
+            dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
+        if ND < 3:
+            dxs[:,ND:] = rdx[ND:]
+
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = na.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = na.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)
         
         offset = 7
-        ii = na.argsort(self.grid_levels.flat)
+        ii = np.argsort(self.grid_levels.flat)
         gid = self._handle["/gid"][:]
         first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
         for g in self.grids[ii].flat:
@@ -184,11 +187,16 @@
                 self.derived_field_list.append(field)
             if (field not in KnownFLASHFields and
                 field.startswith("particle")) :
-                self.parameter_file.field_info.add_field(field,
-                                                         function=NullFunc,
-                                                         take_log=False,
-                                                         validators = [ValidateDataField(field)],
-                                                         particle_type=True)
+                self.parameter_file.field_info.add_field(
+                        field, function=NullFunc, take_log=False,
+                        validators = [ValidateDataField(field)],
+                        particle_type=True)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
                 
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
@@ -203,6 +211,7 @@
                  storage_filename = None,
                  conversion_override = None):
 
+        if self._handle is not None: return
         self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
@@ -229,13 +238,13 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         if "EOSType" not in self.parameters:
             self.parameters["EOSType"] = -1
-        if self.cosmological_simulation == 1:
-            self._setup_comoving_units()
         if "pc_unitsbase" in self.parameters:
             if self.parameters["pc_unitsbase"] == "CGS":
                 self._setup_cgs_units()
         else:
             self._setup_nounits_units()
+        if self.cosmological_simulation == 1:
+            self._setup_comoving_units()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / \
@@ -252,10 +261,10 @@
         self.conversion_factors['eint'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['ener'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
-        self.conversion_factors['velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['vely'] = self.conversion_factors['velx']
         self.conversion_factors['velz'] = self.conversion_factors['velx']
-        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['particle_vely'] = \
             self.conversion_factors['particle_velx']
         self.conversion_factors['particle_velz'] = \
@@ -265,7 +274,8 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
+            self.units[unit] /= (1.0+self.current_redshift)
+            
     def _setup_cgs_units(self):
         self.conversion_factors['dens'] = 1.0
         self.conversion_factors['pres'] = 1.0
@@ -363,9 +373,9 @@
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
-        self.domain_left_edge = na.array(
+        self.domain_left_edge = np.array(
             [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
-        self.domain_right_edge = na.array(
+        self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         self.min_level = self.parameters.get("lrefine_min", 1) - 1
 
@@ -391,7 +401,7 @@
         nblockz = self.parameters["nblockz"]
         self.dimensionality = dimensionality
         self.domain_dimensions = \
-            na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
+            np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
         try:
             self.parameters["Gamma"] = self.parameters["gamma"]
         except:
@@ -407,6 +417,7 @@
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']
+            self.hubble_constant *= cm_per_mpc * 1.0e-5 * 1.0e-2 # convert to 'h'
         except:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -97,7 +98,10 @@
     if fn1.endswith("_Fraction"):
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
-                  display_name="%s\/Density" % fn1.split("_")[0])
+                  display_name="%s\/Density" % fn1.split("_")[0],
+                  units = r"\rm{g}/\rm{cm}^3",
+                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  )
 
 def _get_convert(fname):
     def _conv(data):
@@ -106,7 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^3",
+                projected_units = r"\rm{g}/\rm{cm}^2"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -203,6 +208,7 @@
     add_field(f, TranslationFunc(v),
               take_log=KnownFLASHFields[v].take_log,
               units = ff._units, display_name=dname,
+              projected_units = ff._projected_units,
               particle_type = pfield)
 
 def _convertParticleMassMsun(data):
@@ -254,3 +260,43 @@
 
 add_field("GasEnergy", function=_GasEnergy, 
           units=r"\rm{ergs}/\rm{g}")
+
+# See http://flash.uchicago.edu/pipermail/flash-users/2012-October/001180.html
+# along with the attachment to that e-mail for details
+def GetMagRescalingFactor(pf):
+    if pf['unitsystem'].lower() == "cgs":
+         factor = 1
+    if pf['unitsystem'].lower() == "si":
+         factor = np.sqrt(4*np.pi/1e7)
+    if pf['unitsystem'].lower() == "none":
+         factor = np.sqrt(4*np.pi)
+    else:
+        raise RuntimeError("Runtime parameter unitsystem with"
+                           "value %s is unrecognized" % pf['unitsystem'])
+    return factor
+
+def _Bx(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magx']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magy']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magz']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
+def _DivB(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['divb']*factor
+add_field("DivB", function=_DivB, take_log=False,
+          units=r"\rm{Gauss}\/\rm{cm}^{-1}")
+
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 
 from yt.utilities.io_handler import \
@@ -51,27 +51,10 @@
             count_list, conv_factors):
         pass
 
-    def _select_particles(self, grid, field):
-        f = self._handle
-        npart = f["/tracer particles"].shape[0]
-        total_selected = 0
-        start = 0
-        stride = 1e6
-        blki = self._particle_fields["particle_blk"]
-        bi = grid.id - grid._id_offset
-        fi = self._particle_fields[field]
-        tr = []
-        while start < npart:
-            end = min(start + stride - 1, npart)
-            gi = f["/tracer particles"][start:end,blki] == bi
-            tr.append(f["/tracer particles"][gi,fi])
-            start = end
-        return na.concatenate(tr)
-
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:
-            if grid.NumberOfParticles == 0: return na.array([], dtype='float64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]




diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 from itertools import izip
 
 from yt.funcs import *
@@ -104,7 +104,7 @@
         
     def _parse_hierarchy(self):
         f = self._handle # shortcut
-        npa = na.array
+        npa = np.array
         DLE = self.parameter_file.domain_left_edge
         DRE = self.parameter_file.domain_right_edge
         DW = (DRE - DLE)
@@ -119,12 +119,12 @@
                                 + dxs *(1 + self.grid_dimensions)
         self.grid_particle_count.flat[:] = f['/grid_particle_count'][:].astype("int32")
         grid_parent_id = f['/grid_parent_id'][:]
-        self.max_level = na.max(self.grid_levels)
+        self.max_level = np.max(self.grid_levels)
         
         args = izip(xrange(self.num_grids), self.grid_levels.flat,
                     grid_parent_id, LI,
                     self.grid_dimensions, self.grid_particle_count.flat)
-        self.grids = na.empty(len(args), dtype='object')
+        self.grids = np.empty(len(args), dtype='object')
         for gi, (j,lvl,p, le, d, n) in enumerate(args):
             self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
         


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import \


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,9 +38,9 @@
             address = '/data/grid_%010i/particles/%s/%s' % (grid.id, ptype, field)
             data.append(fh[address][:])
         if len(data) > 0:
-            data = na.concatenate(data)
+            data = np.concatenate(data)
         fh.close()
-        return na.array(data)
+        return np.array(data)
     def _read_field_names(self,grid): 
         adr = grid.Address
         fh = h5py.File(grid.filename,mode='r')


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -28,7 +28,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -37,6 +37,8 @@
            AMRHierarchy
 from yt.data_objects.static_output import \
            StaticOutput
+from yt.utilities.lib import \
+    get_box_grids_level
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -71,7 +73,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -108,11 +110,11 @@
     def _parse_hierarchy(self):
         f = self._fhandle
         dxs = []
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         levels = (f['grid_level'][:]).copy()
         glis = (f['grid_left_index'][:]).copy()
         gdims = (f['grid_dimensions'][:]).copy()
-        active_dims = ~((na.max(gdims, axis=0) == 1) &
+        active_dims = ~((np.max(gdims, axis=0) == 1) &
                         (self.parameter_file.domain_dimensions == 1))
 
         for i in range(levels.shape[0]):
@@ -125,7 +127,7 @@
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
-        dx = na.array(dxs)
+        dx = np.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
@@ -133,21 +135,32 @@
         del levels, glis, gdims
 
     def _populate_grid_objects(self):
-        for g in self.grids:
+        mask = np.empty(self.grids.size, dtype='int32')
+        for gi, g in enumerate(self.grids):
             g._prepare_grid()
             g._setup_dx()
 
-        for g in self.grids:
+        for gi, g in enumerate(self.grids):
             g.Children = self._get_grid_children(g)
             for g1 in g.Children:
                 g1.Parent.append(g)
+            get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                self.grid_levels[gi],
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            m = mask.astype("bool")
+            m[gi] = False
+            siblings = self.grids[gi:][m[gi:]]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
         self.max_level = self.grid_levels.max()
 
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
     defaultdict
@@ -102,17 +102,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
@@ -170,9 +164,9 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self.__global_header_lines[counter].split()
         counter += 1
@@ -181,9 +175,9 @@
         counter += 1 # unused line in Maestro BoxLib
         
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
 
         counter += self.n_levels # unused line in Maestro BoxLib
         
@@ -259,8 +253,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -304,17 +298,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
         self.grid_levels = self.grid_levels.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -325,9 +319,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -354,10 +348,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -367,7 +361,7 @@
                 fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -381,11 +375,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -494,9 +488,9 @@
                 t = parameterTypes[paramName](val)
                 exec("self.%s = %s" % (paramName,t))
 
-        self.domain_dimensions = na.array([_n_cellx,_n_celly,_n_cellz])
-        self.domain_left_edge = na.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
-        self.domain_right_edge = na.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
+        self.domain_dimensions = np.array([_n_cellx,_n_celly,_n_cellz])
+        self.domain_left_edge = np.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
+        self.domain_right_edge = np.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
         
         self.cosmological_simulation = self.current_redshift = \
             self.omega_matter = self.omega_lambda = self.hubble_constant = 0


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/maestro/io.py
--- a/yt/frontends/maestro/io.py
+++ b/yt/frontends/maestro/io.py
@@ -28,7 +28,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -72,8 +72,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -113,7 +113,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -35,7 +35,7 @@
 from string import strip, rstrip
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import AMRGridPatch
@@ -100,18 +100,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz here.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
@@ -172,20 +165,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #na.array(map(int, self._global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         counter += 1
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self._global_header_lines[counter:counter + self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -269,8 +262,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                              level, gfn, gfo, dims, start, stop,
@@ -290,7 +283,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
         self.field_list += nyx_particle_field_names[:]
         header = open(os.path.join(self.parameter_file.path, "DM", "Header"))
@@ -304,7 +297,7 @@
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel + 1):dummy = header.readline()
 
-        grid_info = na.fromiter((int(i) for line in header.readlines()
+        grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
                                 count=3*self.num_grids).reshape((self.num_grids, 3))
@@ -341,15 +334,15 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.path
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
             g.particle_filename = os.path.join(basedir, "DM",
@@ -361,9 +354,9 @@
         self.grid_particle_count[:, 0] = self.pgrid_info[:, 1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids, 1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]]
                                    for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:, 0].reshape((self.num_grids, 1))
@@ -378,9 +371,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]  # why the same thing twice?
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -398,7 +391,7 @@
             grid._setup_dx()
 
     def __setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -415,10 +408,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -444,11 +437,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids, 3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids, 3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids, 3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids, 1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids, 1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids, 3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids, 3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids, 3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids, 1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids, 1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -464,7 +457,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -607,9 +600,9 @@
                         self.parameters[param_name] = vals
 
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals])
+                self.domain_right_edge = np.array([float(i) for i in vals])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals])
+                self.domain_left_edge = np.array([float(i) for i in vals])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals[0])
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -27,7 +27,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.lib import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
                               nyx_particle_field_names.index(field),
                               len(nyx_particle_field_names), tr)
@@ -68,7 +68,7 @@
         offset2 = int(nElements*bytesPerReal*field_index)
 
         dtype = grid.hierarchy._dtype
-        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        field = np.empty(nElements, dtype=grid.hierarchy._dtype)
         read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
         field = field.reshape(grid.ActiveDimensions, order='F')
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -99,17 +99,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
@@ -139,7 +133,7 @@
         simply add it to the if/elif/else block.
 
         """
-        self.grid_particle_count = na.zeros(len(self.grids))
+        self.grid_particle_count = np.zeros(len(self.grids))
 
         for particle_filename in ["StarParticles", "SinkParticles"]:
             fn = os.path.join(self.pf.fullplotdir, particle_filename)
@@ -160,18 +154,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
@@ -211,20 +205,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int,self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #np.array(map(int,self.__global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
         #domain_re.search(self.__global_header_lines[counter]).groups()
         counter += 1
         self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
         counter += self.n_levels
         self.geometry = int(self.__global_header_lines[counter])
         if self.geometry != 0:
@@ -302,8 +296,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -347,17 +341,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = na.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grid_levels = np.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -368,9 +362,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -399,10 +393,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _count_grids(self):
@@ -413,11 +407,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -551,14 +545,14 @@
                 
             elif param.startswith("geometry.prob_hi"):
                 self.domain_right_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
                 self.domain_left_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
-        self.domain_dimensions = na.array(self.parameters["TopGridDimensions"],dtype='int32')
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"],dtype='int32')
         self.refine_by = self.parameters["RefineBy"]
 
         if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.utilities.physical_constants import \
     mh, kboltz
@@ -146,7 +146,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -76,7 +76,7 @@
                     if ( (grid.LeftEdge < coord).all() and 
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)
 
     def _read_data_set(self,grid,field):
         """
@@ -109,8 +109,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -150,7 +150,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 
@@ -79,7 +79,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -93,10 +93,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -116,7 +116,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.tree_proxy = pf.ramses_tree
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -153,12 +153,12 @@
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         level_info = self.tree_proxy.count_zones()
         num_ogrids = sum(level_info)
-        ogrid_left_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_right_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_levels = na.zeros((num_ogrids,1), dtype='int32')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        ogrid_hilbert_indices = na.zeros(num_ogrids, dtype='uint64')
-        ochild_masks = na.zeros((num_ogrids, 8), dtype='int32')
+        ogrid_left_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_right_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_levels = np.zeros((num_ogrids,1), dtype='int32')
+        ogrid_file_locations = np.zeros((num_ogrids,6), dtype='int64')
+        ogrid_hilbert_indices = np.zeros(num_ogrids, dtype='uint64')
+        ochild_masks = np.zeros((num_ogrids, 8), dtype='int32')
         self.tree_proxy.fill_hierarchy_arrays(
             self.pf.domain_dimensions,
             ogrid_left_edge, ogrid_right_edge,
@@ -180,7 +180,7 @@
             if level_info[level] == 0: continue
             # Get the indices of grids on this level
             ggi = (ogrid_levels == level).ravel()
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2 
+            dims = np.ones((ggi.sum(), 3), dtype='int64') * 2 
             mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
             nd = self.pf.domain_dimensions * 2**level
             fl = ogrid_file_locations[ggi,:]
@@ -189,7 +189,7 @@
             # We want grids that cover no more than MAX_EDGE cells in every direction
             psgs = []
             # left_index is integers of the index, with respect to this level
-            left_index = na.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
+            left_index = np.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
             # we've got octs, so it's +2
             pbar = get_pbar("Re-gridding ", left_index.shape[0])
             dlp = [None, None, None]
@@ -203,18 +203,18 @@
             #print level, hilbert_indices.min(), hilbert_indices.max()
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.debug("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
             for ddleft_index, ddfl in zip(lefts, locs):
-                for idomain in na.unique(ddfl[:,0]):
+                for idomain in np.unique(ddfl[:,0]):
                     dom_ind = ddfl[:,0] == idomain
                     dleft_index = ddleft_index[dom_ind,:]
                     dfl = ddfl[dom_ind,:]
-                    initial_left = na.min(dleft_index, axis=0)
-                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                     psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
                                     dleft_index, dfl)
                     if psg.efficiency <= 0: continue
@@ -226,12 +226,12 @@
             pbar.finish()
             self.proto_grids.append(psgs)
             print sum(len(psg.grid_file_locations) for psg in psgs)
-            sums = na.zeros(3, dtype='int64')
+            sums = np.zeros(3, dtype='int64')
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
             #for g in self.proto_grids[level]:
             #    sums += [s.sum() for s in g.sigs]
-            #assert(na.all(sums == dims.prod(axis=1).sum()))
+            #assert(np.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
     def _parse_hierarchy(self):
@@ -251,11 +251,11 @@
                 grids.append(self.grid(gi, self, level, fl, props[0,:]))
                 gi += 1
         self.proto_grids = []
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
 
     def _populate_grid_objects(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         print self.grid_levels.dtype
         for gi,g in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[gi,:],
@@ -346,10 +346,10 @@
         rheader = self.ramses_tree.get_file_info()
         self.parameters.update(rheader)
         self.current_time = self.parameters['time'] * self.parameters['unit_t']
-        self.domain_right_edge = na.ones(3, dtype='float64') \
+        self.domain_right_edge = np.ones(3, dtype='float64') \
                                            * rheader['boxlen']
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_dimensions = na.ones(3, dtype='int32') * 2
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_dimensions = np.ones(3, dtype='int32') * 2
         # This is likely not true, but I am not sure how to otherwise
         # distinguish them.
         mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,7 +24,7 @@
 """
 
 from collections import defaultdict
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,8 +38,8 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
 
     def _read_data_set(self, grid, field):
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float64')
+        filled = np.zeros(grid.ActiveDimensions, dtype='int32')
         to_fill = grid.ActiveDimensions.prod()
         grids = [grid]
         l_delta = 0


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('frontends', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
+    config.add_subpackage("athena")
     config.add_subpackage("gdf")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -24,7 +24,7 @@
 """
 
 import weakref
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -71,7 +73,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -152,7 +154,6 @@
             self.pf.field_info.add_field(
                     field, lambda a, b: None,
                     convert_function=cf, take_log=False)
-            
 
     def _parse_hierarchy(self):
         self.grid_dimensions = self.stream_handler.dimensions
@@ -180,7 +181,7 @@
             self._reconstruct_parent_child()
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids = np.empty(self.num_grids, dtype='object')
         for i, grid in enumerate(self.grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -191,7 +192,7 @@
         mylog.debug("Prepared")
 
     def _reconstruct_parent_child(self):
-        mask = na.empty(len(self.grids), dtype='int32')
+        mask = np.empty(len(self.grids), dtype='int32')
         mylog.debug("First pass; identifying child grids")
         for i, grid in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[i,:],
@@ -199,7 +200,7 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = na.where(mask.astype("bool"))
+            ids = np.where(mask.astype("bool"))
             grid._children_ids = ids[0] # where is a tuple
         mylog.debug("Second pass; identifying parents")
         for i, grid in enumerate(self.grids): # Second pass
@@ -208,7 +209,7 @@
 
     def _initialize_grid_arrays(self):
         AMRHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def save_data(self, *args, **kwargs):
         pass
@@ -224,7 +225,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -296,8 +297,8 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
-                      sim_time=0.0, number_of_particles=0):
+def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                      nprocs=1, sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -313,55 +314,67 @@
     ----------
     data : dict
         This is a dict of numpy arrays, where the keys are the field names.
-    domain_dimensiosn : array_like
+    domain_dimensions : array_like
         This is the domain dimensions of the grid
-    domain_size_in_cm : float
-        The size of the domain, in centimeters
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    nprocs: integer, optional
+        If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
     number_of_particles : int, optional
         If particle fields are included, set this to the number of particles
-        
+
     Examples
     --------
 
-    >>> arr = na.random.random((256, 256, 256))
+    >>> arr = np.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
-                
+    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
     """
+
+    domain_dimensions = np.array(domain_dimensions)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
     sfh = StreamDictFieldHandler()
-    sfh.update({0:data})
-    domain_dimensions = na.array(domain_dimensions)
-    if na.unique(domain_dimensions).size != 1:
-        print "We don't support variably sized domains yet."
-        raise RuntimeError
-    domain_left_edge = na.zeros(3, 'float64')
-    domain_right_edge = na.ones(3, 'float64')
-    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
-    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
 
-    grid_levels = na.array([0], dtype='int32').reshape((1,1))
-    grid_dimensions = grid_right_edges - grid_left_edges
-
-    grid_left_edges  = grid_left_edges.astype("float64")
-    grid_left_edges /= domain_dimensions*2**grid_levels
-    grid_left_edges *= domain_right_edge - domain_left_edge
-    grid_left_edges += domain_left_edge
-
-    grid_right_edges  = grid_right_edges.astype("float64")
-    grid_right_edges /= domain_dimensions*2**grid_levels
-    grid_right_edges *= domain_right_edge - domain_left_edge
-    grid_right_edges += domain_left_edge
+    if nprocs > 1:
+        temp = {}
+        new_data = {}
+        for key in data.keys():
+            psize = get_psize(np.array(data[key].shape), nprocs)
+            grid_left_edges, grid_right_edges, temp[key] = \
+                decompose_array(data[key], psize, bbox)
+            grid_dimensions = np.array([grid.shape for grid in temp[key]],
+                                       dtype="int32")
+        for gid in range(nprocs):
+            new_data[gid] = {}
+            for key in temp.keys():
+                new_data[gid].update({key:temp[key][gid]})
+        sfh.update(new_data)
+        del new_data, temp
+    else:
+        sfh.update({0:data})
+        grid_left_edges = domain_left_edge
+        grid_right_edges = domain_right_edge
+        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
 
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        na.array([-1], dtype='int64'),
-        number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
-        na.zeros(1).reshape((1,1)),
+        -np.ones(nprocs, dtype='int64'),
+        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
     )
 
@@ -375,10 +388,10 @@
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)
-    spf.units["cm"] = domain_size_in_cm
+    spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
-    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -44,15 +44,15 @@
         self.RightEdge = right_edge
         self.Level = 0
         self.NumberOfParticles = 0
-        self.left_dims = na.array(left_dims, dtype='int32')
-        self.right_dims = na.array(right_dims, dtype='int32')
+        self.left_dims = np.array(left_dims, dtype='int32')
+        self.right_dims = np.array(right_dims, dtype='int32')
         self.ActiveDimensions = self.right_dims - self.left_dims
         self.Parent = None
         self.Children = []
 
     @property
     def child_mask(self):
-        return na.ones(self.ActiveDimensions, dtype='int32')
+        return np.ones(self.ActiveDimensions, dtype='int32')
 
     def __repr__(self):
         return "TigerGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -70,7 +70,7 @@
         # Tiger is unigrid
         self.ngdims = [i/j for i,j in
                 izip(self.pf.root_size, self.pf.max_grid_size)]
-        self.num_grids = na.prod(self.ngdims)
+        self.num_grids = np.prod(self.ngdims)
         self.max_level = 0
 
     def _setup_classes(self):
@@ -87,18 +87,18 @@
         DW = DRE - DLE
         gds = DW / self.ngdims
         rd = [self.pf.root_size[i]-self.pf.max_grid_size[i] for i in range(3)]
-        glx, gly, glz = na.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
+        glx, gly, glz = np.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
                                  DLE[1]:DRE[1]-gds[1]:self.ngdims[1]*1j,
                                  DLE[2]:DRE[2]-gds[2]:self.ngdims[2]*1j]
-        gdx, gdy, gdz = na.mgrid[0:rd[0]:self.ngdims[0]*1j,
+        gdx, gdy, gdz = np.mgrid[0:rd[0]:self.ngdims[0]*1j,
                                  0:rd[1]:self.ngdims[1]*1j,
                                  0:rd[2]:self.ngdims[2]*1j]
         LE, RE, levels, counts = [], [], [], []
         i = 0
         for glei, gldi in izip(izip(glx.flat, gly.flat, glz.flat),
                                izip(gdx.flat, gdy.flat, gdz.flat)):
-            gld = na.array(gldi)
-            gle = na.array(glei)
+            gld = np.array(gldi)
+            gle = np.array(glei)
             gre = gle + gds
             g = self.grid(i, self, gle, gre, gld, gld+self.pf.max_grid_size)
             grids.append(g)
@@ -108,13 +108,13 @@
             levels.append(g.Level)
             counts.append(g.NumberOfParticles)
             i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-        self.grid_dimensions[:] = na.array(dims, dtype='int64')
-        self.grid_left_edge[:] = na.array(LE, dtype='float64')
-        self.grid_right_edge[:] = na.array(RE, dtype='float64')
-        self.grid_levels.flat[:] = na.array(levels, dtype='int32')
-        self.grid_particle_count.flat[:] = na.array(counts, dtype='int32')
+        self.grid_dimensions[:] = np.array(dims, dtype='int64')
+        self.grid_left_edge[:] = np.array(LE, dtype='float64')
+        self.grid_right_edge[:] = np.array(RE, dtype='float64')
+        self.grid_levels.flat[:] = np.array(levels, dtype='int32')
+        self.grid_particle_count.flat[:] = np.array(counts, dtype='int32')
 
     def _populate_grid_objects(self):
         # We don't need to do anything here
@@ -186,8 +186,8 @@
         self.parameters['RefineBy'] = 2
 
     def _set_units(self):
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_right_edge = na.ones(3, dtype='float64')
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_right_edge = np.ones(3, dtype='float64')
         self.units = {}
         self.time_units = {}
         self.time_units['1'] = 1


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/frontends/tiger/io.py
--- a/yt/frontends/tiger/io.py
+++ b/yt/frontends/tiger/io.py
@@ -36,17 +36,17 @@
 
     def _read_data_set(self, grid, field):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64')
-        SS = na.array(grid.ActiveDimensions, dtype='int64')
-        RS = na.array(grid.pf.root_size, dtype='int64')
+        LD = np.array(grid.left_dims, dtype='int64')
+        SS = np.array(grid.ActiveDimensions, dtype='int64')
+        RS = np.array(grid.pf.root_size, dtype='int64')
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
         return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64').copy()
-        SS = na.array(grid.ActiveDimensions, dtype='int64').copy()
-        RS = na.array(grid.pf.root_size, dtype='int64').copy()
+        LD = np.array(grid.left_dims, dtype='int64').copy()
+        SS = np.array(grid.ActiveDimensions, dtype='int64').copy()
+        RS = np.array(grid.pf.root_size, dtype='int64').copy()
         LD[axis] += coord
         SS[axis] = 1
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -310,7 +310,8 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook"):
+       ytcfg.getboolean("yt", "ipython_notebook") or \
+       ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
         from yt.gui.reason.extdirect_repl import ExtProgressBar


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/opengl_widgets/mip_viewer.py
--- a/yt/gui/opengl_widgets/mip_viewer.py
+++ b/yt/gui/opengl_widgets/mip_viewer.py
@@ -31,7 +31,7 @@
 import OpenGL.GL.ARB.framebuffer_object as GL_fbo
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 from small_apps import ViewHandler3D, GenericGLUTScene
@@ -85,8 +85,8 @@
                     yield s[v][i]
 
     def _get_texture_vertices(self):
-        vs = [na.zeros(3, dtype='float32'),
-              na.ones(3, dtype='float32')]
+        vs = [np.zeros(3, dtype='float32'),
+              np.ones(3, dtype='float32')]
         #vs.reverse()
         for b in self.hv.bricks:
             shape = b.my_data[0].shape
@@ -126,7 +126,7 @@
 
         DW = self.hv.pf.domain_right_edge - self.hv.pf.domain_left_edge
         dds = ((brick.RightEdge - brick.LeftEdge) /
-               (na.array([ix,iy,iz], dtype='float32')-1)) / DW
+               (np.array([ix,iy,iz], dtype='float32')-1)) / DW
         BLE = brick.LeftEdge / DW - 0.5
         self._brick_textures.append(
             (id_field, (ix-1,iy-1,iz-1), dds, BLE))
@@ -135,7 +135,7 @@
 
     def _setup_colormap(self):
 
-        buffer = na.mgrid[0.0:1.0:256j]
+        buffer = np.mgrid[0.0:1.0:256j]
         colors = map_to_colors(buffer, "algae")
         
         GL.glActiveTexture(GL.GL_TEXTURE1)
@@ -165,17 +165,17 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(hv.bricks) * 6 * 4
-        self.v = na.fromiter(self._get_brick_vertices(offset),
+        self.v = np.fromiter(self._get_brick_vertices(offset),
                              dtype = 'float32', count = num * 3)
         self.vertices = vbo.VBO(self.v)
 
-        self.t = na.fromiter(self._get_texture_vertices(),
+        self.t = np.fromiter(self._get_texture_vertices(),
                              dtype = 'float32', count = num * 3)
         self.tvertices = vbo.VBO(self.t)
 
         self.ng = len(hv.bricks)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_bricks()
@@ -373,8 +373,8 @@
 
     def reset_view(self):   
         print "RESETTING"
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
     def translate(self, axis, value):


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/opengl_widgets/small_apps.py
--- a/yt/gui/opengl_widgets/small_apps.py
+++ b/yt/gui/opengl_widgets/small_apps.py
@@ -30,7 +30,7 @@
 from OpenGL.arrays import vbo, ArrayDatatype
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 ESCAPE = '\033'
@@ -235,7 +235,7 @@
 
     @classmethod
     def from_image_file(cls, fn, tex_unit = GL.GL_TEXTURE0):
-        buffer = na.array(Image.open(fn))
+        buffer = np.array(Image.open(fn))
         print "Uploading buffer", buffer.min(), buffer.max(), buffer.shape, buffer.dtype
         obj = cls(tex_unit)
         obj.upload_image(buffer)
@@ -260,8 +260,8 @@
     @classmethod
     def from_image_files(cls, left_fn, right_fn, tex_unit = GL.GL_TEXTURE0):
         print "Uploading pairs from %s and %s" % (left_fn, right_fn)
-        left_buffer = na.array(Image.open(left_fn))
-        right_buffer = na.array(Image.open(right_fn))
+        left_buffer = np.array(Image.open(left_fn))
+        right_buffer = np.array(Image.open(right_fn))
         obj = cls(tex_unit)
         obj.left_image.upload_image(left_buffer)
         obj.right_image.upload_image(right_buffer)
@@ -294,7 +294,7 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
@@ -408,7 +408,7 @@
 
         GL.glActiveTexture(GL.GL_TEXTURE0)
         id_field = GL.glGenTextures(1)
-        upload = na.log10(grid["Density"].astype("float32")).copy()
+        upload = np.log10(grid["Density"].astype("float32")).copy()
         self.mi = min(upload.min(), self.mi)
         self.ma = max(upload.max(), self.ma)
         #upload = (255*(upload - -31.0) / (-25.0 - -31.0)).astype("uint8")
@@ -452,13 +452,13 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
         self.ng = len(pf.h.grids)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float')
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float')
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_grids()


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -29,7 +29,7 @@
 import logging, threading
 import sys
 import urllib, urllib2
-import numpy as na
+import numpy as np
 
 from yt.utilities.bottle import \
     server_names, debug, route, run, request, ServerAdapter, response
@@ -134,7 +134,7 @@
         bp['binary'] = []
         for bkey in bkeys:
             bdata = bp.pop(bkey) # Get the binary data
-            if isinstance(bdata, na.ndarray):
+            if isinstance(bdata, np.ndarray):
                 bdata = bdata.tostring()
             bpserver = BinaryDelivery(bdata, bkey)
             self.binary_payloads.append(bpserver)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -30,7 +30,7 @@
 import cStringIO
 import logging
 import uuid
-import numpy as na
+import numpy as np
 import time
 import urllib
 import urllib2


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/reason/html/app/controller/Notebook.js
--- a/yt/gui/reason/html/app/controller/Notebook.js
+++ b/yt/gui/reason/html/app/controller/Notebook.js
@@ -73,9 +73,11 @@
     },
 
     addRequest: function(request_id, command) {
+        /*console.log("Adding request " + request_id);*/
         this.getRequestsStore().add({
             request_id: request_id, command: command,
         });
+        reason.pending.update([this.getRequestsStore().count()]);
     },
 
     addCell: function(cell) {
@@ -85,6 +87,7 @@
             var ind = this.getRequestsStore().find(
                 'request_id', cell['result_id']);
             if (ind != -1) {
+                /*console.log("Removing request " + cell['result_id']);*/
                 var rec = this.getRequestsStore().removeAt(ind);
             }
             reason.pending.update([this.getRequestsStore().count()]);


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import os
-import numpy as na
+import numpy as np
 import zipfile
 import sys
 
@@ -92,9 +92,9 @@
                                     dd*DW[0] / (64*256),
                                     dd*DW[0])
         if self.pf.field_info[self.field].take_log:
-            cmi = na.log10(cmi)
-            cma = na.log10(cma)
-            to_plot = apply_colormap(na.log10(frb[self.field]), color_bounds = (cmi, cma))
+            cmi = np.log10(cmi)
+            cma = np.log10(cma)
+            to_plot = apply_colormap(np.log10(frb[self.field]), color_bounds = (cmi, cma))
         else:
             to_plot = apply_colormap(frb[self.field], color_bounds = (cmi, cma))
         rv = write_png_to_string(to_plot)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/reason/pyro_queue.py
--- a/yt/gui/reason/pyro_queue.py
+++ b/yt/gui/reason/pyro_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/reason/widget_builders.py
--- a/yt/gui/reason/widget_builders.py
+++ b/yt/gui/reason/widget_builders.py
@@ -35,7 +35,7 @@
         self._tf = tf
 
         self.center = self.pf.domain_center
-        self.normal_vector = na.array([0.7,1.0,0.3])
+        self.normal_vector = np.array([0.7,1.0,0.3])
         self.north_vector = [0.,0.,1.]
         self.steady_north = True
         self.fields = ['Density']
@@ -54,7 +54,7 @@
             roi = self.pf.h.region(self.center, self.center-self.width, self.center+self.width)
             self.mi, self.ma = roi.quantities['Extrema'](self.fields[0])[0]
             if self.log_fields[0]:
-                self.mi, self.ma = na.log10(self.mi), na.log10(self.ma)
+                self.mi, self.ma = np.log10(self.mi), np.log10(self.ma)
 
         self._tf = ColorTransferFunction((self.mi-2, self.ma+2), nbins=nbins)
 
@@ -87,10 +87,10 @@
     dd = pf.h.all_data()
     if value is None or rel_val:
         if value is None: value = 0.5
-        mi, ma = na.log10(dd.quantities["Extrema"]("Density")[0])
+        mi, ma = np.log10(dd.quantities["Extrema"]("Density")[0])
         value = 10.0**(value*(ma - mi) + mi)
     vert = dd.extract_isocontours("Density", value)
-    na.multiply(vert, 100, vert)
+    np.multiply(vert, 100, vert)
     return vert
 
 def get_streamlines(pf):


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -70,7 +70,7 @@
         if onmax: 
             center = pf.h.find_max('Density')[1]
         else:
-            center = na.array(center)
+            center = np.array(center)
         axis = inv_axis_names[axis.lower()]
         coord = center[axis]
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
@@ -203,7 +203,7 @@
     def _pf_info(self):
         tr = {}
         for k, v in self.pf._mrep._attrs.items():
-            if isinstance(v, na.ndarray):
+            if isinstance(v, np.ndarray):
                 tr[k] = v.tolist()
             else:
                 tr[k] = v
@@ -237,9 +237,9 @@
     def deliver_isocontour(self, field, value, rel_val = False):
         ph = PayloadHandler()
         vert = get_isocontour(self.pf, field, value, rel_val)
-        normals = na.empty(vert.shape)
+        normals = np.empty(vert.shape)
         for i in xrange(vert.shape[0]/3):
-            n = na.cross(vert[i*3,:], vert[i*3+1,:])
+            n = np.cross(vert[i*3,:], vert[i*3+1,:])
             normals[i*3:i*3+3,:] = n[None,:]
         ph.widget_payload(self, {'ptype':'isocontour',
                                  'binary': ['vert', 'normals'],
@@ -260,20 +260,20 @@
         # Assume that path comes in as a list of matrice
         # Assume original vector is (0., 0., 1.), up is (0., 1., 0.)
         
-        views = [na.array(view).transpose() for view in views]
+        views = [np.array(view).transpose() for view in views]
 
-        times = na.linspace(0.0,1.0,len(times))
+        times = np.linspace(0.0,1.0,len(times))
                 
         # This is wrong.
-        reflect = na.array([[1,0,0],[0,1,0],[0,0,-1]])
+        reflect = np.array([[1,0,0],[0,1,0],[0,0,-1]])
 
-        rots = na.array([R[0:3,0:3] for R in views])
+        rots = np.array([R[0:3,0:3] for R in views])
 
-        rots = na.array([na.dot(reflect,rot) for rot in rots])
+        rots = np.array([np.dot(reflect,rot) for rot in rots])
 
-        centers = na.array([na.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
+        centers = np.array([np.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
 
-        ups = na.array([na.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
+        ups = np.array([np.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
 
         #print 'views'
         #for view in views: print view
@@ -284,12 +284,12 @@
         #print 'ups'
         #for up in ups: print up
 
-        pos = na.empty((N,3), dtype="float64")
-        uv = na.empty((N,3), dtype="float64")
-        f = na.zeros((N,3), dtype="float64")
+        pos = np.empty((N,3), dtype="float64")
+        uv = np.empty((N,3), dtype="float64")
+        f = np.zeros((N,3), dtype="float64")
         for i in range(3):
-            pos[:,i] = create_spline(times, centers[:,i], na.linspace(0.0,1.0,N))
-            uv[:,i] = create_spline(times, ups[:,i], na.linspace(0.0,1.0,N))
+            pos[:,i] = create_spline(times, centers[:,i], np.linspace(0.0,1.0,N))
+            uv[:,i] = create_spline(times, ups[:,i], np.linspace(0.0,1.0,N))
     
         path = [pos.tolist(), f.tolist(), uv.tolist()]
     


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -33,6 +33,7 @@
 # First module imports
 import sys, types, os, glob, cPickle, time
 import numpy as na # For historical reasons
+import numpy as np # For modern purposes
 import numpy # In case anyone wishes to use it by name
 
 # This next item will handle most of the actual startup procedures, but it will
@@ -52,7 +53,7 @@
 if __level >= int(ytcfgDefaults["loglevel"]):
     # This won't get displayed.
     mylog.debug("Turning off NumPy error reporting")
-    na.seterr(all = 'ignore')
+    np.seterr(all = 'ignore')
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
@@ -61,7 +62,7 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ParticleTrajectoryCollection
+    ParticleTrajectoryCollection, ImageArray
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info
@@ -95,6 +96,9 @@
 from yt.frontends.gdf.api import \
     GDFStaticOutput, GDFFieldInfo, add_gdf_field
 
+from yt.frontends.athena.api import \
+    AthenaStaticOutput, AthenaFieldInfo, add_athena_field
+
 from yt.frontends.art.api import \
     ARTStaticOutput, ARTFieldInfo, add_art_field
 
@@ -118,7 +122,7 @@
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
     apply_colormap, scale_image, write_projection, write_fits, \
-    SlicePlot, OffAxisSlicePlot, ProjectionPlot
+    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/testing.py
--- /dev/null
+++ b/yt/testing.py
@@ -0,0 +1,149 @@
+"""Provides utility and helper functions for testing in yt.
+
+Author: Anthony Scpatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Anthony Scopatz.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from yt.funcs import *
+from numpy.testing import assert_array_equal, assert_almost_equal, \
+    assert_approx_equal, assert_array_almost_equal, assert_equal, \
+    assert_string_equal
+
+def assert_rel_equal(a1, a2, decimels):
+    return assert_almost_equal(a1/a2, 1.0, decimels)
+
+def amrspace(extent, levels=7, cells=8):
+    """Creates two numpy arrays representing the left and right bounds of 
+    an AMR grid as well as an array for the AMR level of each cell.
+
+    Parameters
+    ----------
+    extent : array-like
+        This a sequence of length 2*ndims that is the bounds of each dimension.
+        For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
+        A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
+    levels : int or sequence of ints, optional
+        This is the number of AMR refinement levels.  If given as a sequence (of
+        length ndims), then each dimension will be refined down to this level.
+        All values in this array must be the same or zero.  A zero valued dimension
+        indicates that this dim should not be refined.  Taking the 3D cylindrical
+        example above if we don't want refine theta but want r and z at 5 we would 
+        set levels=(5, 5, 0).
+    cells : int, optional
+        This is the number of cells per refinement level.
+
+    Returns
+    -------
+    left : float ndarray, shape=(npoints, ndims)
+        The left AMR grid points.
+    right : float ndarray, shape=(npoints, ndims)
+        The right AMR grid points.
+    level : int ndarray, shape=(npoints,)
+        The AMR level for each point.
+
+    Examples
+    --------
+    >>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
+    >>> print l
+    [[ 0.     1.     0.   ]
+     [ 0.25   1.     0.   ]
+     [ 0.     1.125  0.   ]
+     [ 0.25   1.125  0.   ]
+     [ 0.5    1.     0.   ]
+     [ 0.     1.25   0.   ]
+     [ 0.5    1.25   0.   ]
+     [ 1.     1.     0.   ]
+     [ 0.     1.5    0.   ]
+     [ 1.     1.5    0.   ]]
+
+    """
+    extent = np.asarray(extent, dtype='f8')
+    dextent = extent[1::2] - extent[::2]
+    ndims = len(dextent)
+
+    if isinstance(levels, int):
+        minlvl = maxlvl = levels
+        levels = np.array([levels]*ndims, dtype='int32')
+    else:
+        levels = np.asarray(levels, dtype='int32')
+        minlvl = levels.min()
+        maxlvl = levels.max()
+        if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
+            raise ValueError("all levels must have the same value or zero.")
+    dims_zero = (levels == 0)
+    dims_nonzero = ~dims_zero
+    ndims_nonzero = dims_nonzero.sum()
+
+    npoints = (cells**ndims_nonzero - 1)*maxlvl + 1
+    left = np.empty((npoints, ndims), dtype='float64')
+    right = np.empty((npoints, ndims), dtype='float64')
+    level = np.empty(npoints, dtype='int32')
+
+    # fill zero dims
+    left[:,dims_zero] = extent[::2][dims_zero]
+    right[:,dims_zero] = extent[1::2][dims_zero]
+
+    # fill non-zero dims
+    dcell = 1.0 / cells
+    left_slice =  tuple([slice(extent[2*n], extent[2*n+1], extent[2*n+1]) if \
+        dims_zero[n] else slice(0.0,1.0,dcell) for n in range(ndims)])
+    right_slice = tuple([slice(extent[2*n+1], extent[2*n], -extent[2*n+1]) if \
+        dims_zero[n] else slice(dcell,1.0+dcell,dcell) for n in range(ndims)])
+    left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
+    lng_zero = left_norm_grid[:,dims_zero]
+    lng_nonzero = left_norm_grid[:,dims_nonzero]
+
+    right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
+    rng_zero = right_norm_grid[:,dims_zero]
+    rng_nonzero = right_norm_grid[:,dims_nonzero]
+
+    level[0] = maxlvl
+    left[0,:] = extent[::2]
+    right[0,dims_zero] = extent[1::2][dims_zero]
+    right[0,dims_nonzero] = (dcell**maxlvl)*dextent[dims_nonzero] + extent[::2][dims_nonzero]
+    for i, lvl in enumerate(range(maxlvl, 0, -1)):
+        start = (cells**ndims_nonzero - 1)*i + 1
+        stop = (cells**ndims_nonzero - 1)*(i+1) + 1
+        dsize = dcell**(lvl-1) * dextent[dims_nonzero]
+        level[start:stop] = lvl
+        left[start:stop,dims_zero] = lng_zero
+        left[start:stop,dims_nonzero] = lng_nonzero*dsize + extent[::2][dims_nonzero]
+        right[start:stop,dims_zero] = rng_zero
+        right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
+
+    return left, right, level
+
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
+                   negative = False, nprocs = 1):
+    from yt.frontends.stream.api import load_uniform_grid
+    if not iterable(ndims):
+        ndims = [ndims, ndims, ndims]
+    else:
+        assert(len(ndims) == 3)
+    if negative:
+        offset = 0.5
+    else:
+        offset = 0.0
+    data = dict((field, (np.random.random(ndims) - offset) * peak_value)
+                 for field in fields)
+    ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
+    return ug


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -25,7 +25,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.visualization.image_writer import write_image, write_bitmap
@@ -61,7 +61,7 @@
 def _rchild_id(id): return (id<<1) + 2
 def _parent_id(id): return (id-1)>>1
 
-steps = na.array([[-1, -1, -1],
+steps = np.array([[-1, -1, -1],
                   [-1, -1,  0],
                   [-1, -1,  1],
                   [-1,  0, -1],
@@ -319,31 +319,31 @@
         if l_max is None:
             self.l_max = self.pf.hierarchy.max_level+1
         else:
-            self.l_max = na.min([l_max,self.pf.hierarchy.max_level+1])
+            self.l_max = np.min([l_max,self.pf.hierarchy.max_level+1])
 
         if le is None:
             self.domain_left_edge = pf.domain_left_edge
         else:
-            self.domain_left_edge = na.array(le)
+            self.domain_left_edge = np.array(le)
 
         if re is None:
             self.domain_right_edge = pf.domain_right_edge
         else:
-            self.domain_right_edge = na.array(re)
+            self.domain_right_edge = np.array(re)
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
 
         levels = pf.hierarchy.get_levels()
         root_grids = levels.next()
         covering_grids = root_grids
-        vol_needed = na.prod(self.domain_right_edge-self.domain_left_edge)
+        vol_needed = np.prod(self.domain_right_edge-self.domain_left_edge)
 
         for i in range(self.pf.hierarchy.max_level):
-            root_l_data = na.clip(na.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
-            root_r_data = na.clip(na.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_l_data = np.clip(np.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_r_data = np.clip(np.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
             
-            vol = na.prod(root_r_data-root_l_data,axis=1).sum()
+            vol = np.prod(root_r_data-root_l_data,axis=1).sum()
             if vol >= vol_needed:
                 covering_grids = root_grids
                 root_grids = levels.next()
@@ -356,18 +356,18 @@
         self.domain_left_edge = ((self.domain_left_edge)/rgdds).astype('int64')*rgdds
         self.domain_right_edge = (((self.domain_right_edge)/rgdds).astype('int64')+1)*rgdds
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
         
         self.my_l_corner = self.domain_left_edge
         self.my_r_corner = self.domain_right_edge
 
         #mylog.info('Making kd tree from le %s to %s'% (self.domain_left_edge, self.domain_right_edge))
         
-        root_l_data = na.array([grid.LeftEdge for grid in root_grids])
-        root_r_data = na.array([grid.RightEdge for grid in root_grids])
-        root_we_want = na.all(root_l_data < self.my_r_corner,axis=1)*\
-                       na.all(root_r_data > self.my_l_corner,axis=1)
+        root_l_data = np.array([grid.LeftEdge for grid in root_grids])
+        root_r_data = np.array([grid.RightEdge for grid in root_grids])
+        root_we_want = np.all(root_l_data < self.my_r_corner,axis=1)*\
+                       np.all(root_r_data > self.my_l_corner,axis=1)
         
         root_grids = root_grids[root_we_want]
 
@@ -550,7 +550,7 @@
         center cell (i,j,k) is ommitted.
         
         """
-        position = na.array(position)
+        position = np.array(position)
         grid = self.locate_brick(position).grid
         ci = ((position-grid.LeftEdge)/grid.dds).astype('int64')
         return self.locate_neighbors(grid,ci)
@@ -583,20 +583,20 @@
         center cell (i,j,k) is ommitted.
         
         """
-        ci = na.array(ci)
+        ci = np.array(ci)
         center_dds = grid.dds
-        position = grid.LeftEdge + (na.array(ci)+0.5)*grid.dds
-        grids = na.empty(26, dtype='object')
-        cis = na.empty([26,3], dtype='int64')
+        position = grid.LeftEdge + (np.array(ci)+0.5)*grid.dds
+        grids = np.empty(26, dtype='object')
+        cis = np.empty([26,3], dtype='int64')
         offs = 0.5*(center_dds + self.sdx)
 
         new_cis = ci + steps
-        in_grid = na.all((new_cis >=0)*
+        in_grid = np.all((new_cis >=0)*
                          (new_cis < grid.ActiveDimensions),axis=1)
         new_positions = position + steps*offs
         grids[in_grid] = grid
                 
-        get_them = na.argwhere(in_grid != True).ravel()
+        get_them = np.argwhere(in_grid != True).ravel()
         cis[in_grid] = new_cis[in_grid]
 
         if (in_grid != True).sum()>0:
@@ -668,7 +668,7 @@
                     dds = []
                     for i,field in enumerate(self.fields):
                         vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                        if self.log_fields[i]: vcd = na.log10(vcd)
+                        if self.log_fields[i]: vcd = np.log10(vcd)
                         dds.append(vcd)
                     current_saved_grids.append(current_node.grid)
                     current_vcds.append(dds)
@@ -677,7 +677,7 @@
                           current_node.li[1]:current_node.ri[1]+1,
                           current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
                 
-                if na.any(current_node.r_corner-current_node.l_corner == 0):
+                if np.any(current_node.r_corner-current_node.l_corner == 0):
                     current_node.brick = None
                 else:
                     current_node.brick = PartitionedGrid(current_node.grid.id, data,
@@ -686,8 +686,8 @@
                                                          current_node.dims.astype('int64'))
                 self.bricks.append(current_node.brick)
                 self.brick_dimensions.append(current_node.dims)
-        self.bricks = na.array(self.bricks)
-        self.brick_dimensions = na.array(self.brick_dimensions)
+        self.bricks = np.array(self.bricks)
+        self.brick_dimensions = np.array(self.brick_dimensions)
         del current_saved_grids, current_vcds
         self.bricks_loaded = True
 
@@ -701,7 +701,7 @@
             dds = []
             for i,field in enumerate(self.fields):
                 vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = na.log10(vcd)
+                if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(current_node.grid)
                 self.current_vcds.append(dds)
@@ -734,14 +734,14 @@
         dds = thisnode.grid.dds
         gle = thisnode.grid.LeftEdge
         gre = thisnode.grid.RightEdge
-        thisnode.li = na.rint((thisnode.l_corner-gle)/dds).astype('int32')
-        thisnode.ri = na.rint((thisnode.r_corner-gle)/dds).astype('int32')
+        thisnode.li = np.rint((thisnode.l_corner-gle)/dds).astype('int32')
+        thisnode.ri = np.rint((thisnode.r_corner-gle)/dds).astype('int32')
         thisnode.dims = (thisnode.ri - thisnode.li).astype('int32')
         # Here the cost is actually inversely proportional to 4**Level (empirical)
-        #thisnode.cost = (na.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
+        #thisnode.cost = (np.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
         thisnode.cost = 1.0
         # Here is the old way
-        # thisnode.cost = na.prod(thisnode.dims).astype('int64')
+        # thisnode.cost = np.prod(thisnode.dims).astype('int64')
 
     def initialize_leafs(self):
         for node in self.depth_traverse():
@@ -754,7 +754,7 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(self.comm.size))
+        par_tree_depth = long(np.log2(self.comm.size))
         for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
                 # There are self.comm.size nodes that meet this criteria
@@ -767,7 +767,7 @@
                 del node.grids
             except:
                 pass
-            if not na.isreal(node.grid):
+            if not np.isreal(node.grid):
                 node.grid = node.grid.id
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
@@ -942,7 +942,7 @@
         v = 0.0
         for node in self.depth_traverse():
             if node.grid is not None:
-                v += na.prod(node.r_corner - node.l_corner)
+                v += np.prod(node.r_corner - node.l_corner)
         return v
 
     def count_cells(self):
@@ -957,10 +957,10 @@
         Total volume of the tree.
         
         """
-        c = na.int64(0)
+        c = np.int64(0)
         for node in self.depth_traverse():
             if node.grid is not None:
-                c += na.prod(node.ri - node.li).astype('int64')
+                c += np.prod(node.ri - node.li).astype('int64')
         return c
 
     def _build(self, grids, parent, l_corner, r_corner):
@@ -994,15 +994,15 @@
         current_node.r_corner = r_corner
         # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(self.comm.size))
+        par_tree_depth = int(np.log2(self.comm.size))
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
-        pbar = get_pbar("Building kd-Tree",
-                na.prod(self.domain_right_edge-self.domain_left_edge))
+        total_vol = np.prod(self.domain_right_edge-self.domain_left_edge)
+        pbar = get_pbar("Building kd-Tree", total_vol)
 
         while current_node is not None:
-            pbar.update(volume_partitioned)
+            pbar.update(min(volume_partitioned, total_vol))
 
             # If we don't have any grids, that means we are revisiting
             # a dividing node, and there is nothing to be done.
@@ -1034,12 +1034,12 @@
                     if len(thisgrid.Children) > 0 and thisgrid.Level < self.l_max:
                         # Get the children that are actually in the current volume
                         children = [child.id - self._id_offset for child in thisgrid.Children  
-                                    if na.all(child.LeftEdge < current_node.r_corner) & 
-                                    na.all(child.RightEdge > current_node.l_corner)]
+                                    if np.all(child.LeftEdge < current_node.r_corner) & 
+                                    np.all(child.RightEdge > current_node.l_corner)]
 
                         # If we have children, get all the new grids, and keep building the tree
                         if len(children) > 0:
-                            current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
+                            current_node.grids = self.pf.hierarchy.grids[np.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
                             #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
@@ -1048,7 +1048,7 @@
                     # Else make a leaf node (brick container)
                     #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    volume_partitioned += na.prod(current_node.r_corner-current_node.l_corner)
+                    volume_partitioned += np.prod(current_node.r_corner-current_node.l_corner)
                     # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
@@ -1078,7 +1078,7 @@
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1089,7 +1089,7 @@
         left and right children.
         '''
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
 
@@ -1106,8 +1106,8 @@
         current_node.split_pos = split
         #less_ids0 = (data[:,0] < split)
         #greater_ids0 = (split < data[:,1])
-        #assert(na.all(less_ids0 == less_ids))
-        #assert(na.all(greater_ids0 == greater_ids))
+        #assert(np.all(less_ids0 == less_ids))
+        #assert(np.all(greater_ids0 == greater_ids))
 
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
@@ -1143,7 +1143,7 @@
             Position of the back center from which to start moving forward.
         front_center: array_like
             Position of the front center to which the traversal progresses.
-        image: na.array
+        image: np.array
             Image plane to contain resulting ray cast.
 
         Returns
@@ -1176,12 +1176,12 @@
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(self.comm.size))
+        rounds = int(np.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+self.comm.rank)
+        path = np.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1215,7 +1215,7 @@
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = 1.0 - na.sum(self.image,axis=2)
+                    ta = 1.0 - np.sum(self.image,axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1237,8 +1237,8 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    #ta = na.exp(-na.sum(arr2,axis=2))
-                    ta = 1.0 - na.sum(arr2, axis=2)
+                    #ta = np.exp(-np.sum(arr2,axis=2))
+                    ta = 1.0 - np.sum(arr2, axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1292,8 +1292,8 @@
                     self.bricks.append(node.brick)
                     self.brick_dimensions.append(node.dims)
 
-            self.bricks = na.array(self.bricks)
-            self.brick_dimensions = na.array(self.brick_dimensions)
+            self.bricks = np.array(self.bricks)
+            self.brick_dimensions = np.array(self.brick_dimensions)
 
             self.bricks_loaded=True
             f.close()
@@ -1333,12 +1333,12 @@
         raise NotImplementedError()
         f = h5py.File(fn,"w")
         Nkd = len(self.tree)
-        kd_l_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_r_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_grids = na.zeros( (Nkd) )
-        kd_split_axs = na.zeros( (Nkd), dtype='int32')
-        kd_split_pos = na.zeros( (Nkd), dtype='float64')
-        kd_owners = na.zeros( (Nkd), dtype='int32')
+        kd_l_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_r_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_grids = np.zeros( (Nkd) )
+        kd_split_axs = np.zeros( (Nkd), dtype='int32')
+        kd_split_pos = np.zeros( (Nkd), dtype='float64')
+        kd_owners = np.zeros( (Nkd), dtype='int32')
         f.create_group("/bricks")
         for i, tree_item in enumerate(self.tree.iteritems()):
             kdid = tree_item[0]
@@ -1369,17 +1369,17 @@
         f.close()
         
     def corners_to_line(self,lc, rc):
-        x = na.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
+        x = np.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
                        rc[0], rc[0], rc[0], rc[0], rc[0],
                        rc[0], lc[0], lc[0], rc[0],
                        rc[0], lc[0], lc[0] ])
         
-        y = na.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
+        y = np.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1],
                        rc[1], rc[1], lc[1] ])
         
-        z = na.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
+        z = np.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
                        lc[2], rc[2], rc[2], lc[2], lc[2],
                        rc[2], rc[2], rc[2], rc[2],
                        lc[2], lc[2], lc[2] ])


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ b/yt/utilities/answer_testing/hydro_tests.py
@@ -99,11 +99,11 @@
     field = None
 
     def run(self):
-        na.random.seed(4333)
-        start_point = na.random.random(self.pf.dimensionality) * \
+        np.random.seed(4333)
+        start_point = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
-        end_point   = na.random.random(self.pf.dimensionality) * \
+        end_point   = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -55,10 +55,10 @@
 
 class ArrayDelta(ValueDelta):
     def __repr__(self):
-        nabove = len(na.where(self.delta > self.acceptable)[0])
+        nabove = len(np.where(self.delta > self.acceptable)[0])
         return "ArrayDelta: Delta max of %s, acceptable of %s.\n" \
                "%d of %d points above the acceptable limit" % \
-               (na.nanmax(self.delta), self.acceptable, nabove,
+               (np.nanmax(self.delta), self.acceptable, nabove,
                 self.delta.size)
 
 class ShapeMismatch(RegressionTestException):
@@ -122,8 +122,8 @@
         """
         if a1.shape != a2.shape:
             raise ShapeMismatch(a1, a2)
-        delta = na.abs(a1 - a2).astype("float64")/(a1 + a2)
-        if na.nanmax(delta) > acceptable:
+        delta = np.abs(a1 - a2).astype("float64")/(a1 + a2)
+        if np.nanmax(delta) > acceptable:
             raise ArrayDelta(delta, acceptable)
         return True
 
@@ -134,7 +134,7 @@
         difference is greater than `acceptable` it is considered a failure and
         an appropriate exception is raised.
         """
-        delta = na.abs(v1 - v2)/(v1 + v2)
+        delta = np.abs(v1 - v2)/(v1 + v2)
         if delta > acceptable:
             raise ValueDelta(delta, acceptable)
         return True


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ b/yt/utilities/answer_testing/particle_tests.py
@@ -32,13 +32,13 @@
         # Tests to make sure there are no particle positions aren't changing
         # drastically. This is very unlikely to be a problem.
         all = self.pf.h.all_data()
-        min = na.empty(3,dtype='float64')
+        min = np.empty(3,dtype='float64')
         max = min.copy()
         dims = ["particle_position_x","particle_position_y",
             "particle_position_z"]
         for i in xrange(3):
-            min[i] = na.min(all[dims[i]])
-            max[i] = na.max(all[dims[i]])
+            min[i] = np.min(all[dims[i]])
+            max[i] = np.max(all[dims[i]])
         self.result = (min,max)
     
     def compare(self, old_result):


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -152,7 +152,7 @@
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
                    action="store", type=str,
-                   dest="unit", default='unitary',
+                   dest="unit", default='1',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
                    action="store", type=float,
@@ -1095,8 +1095,12 @@
                   )
         else:
             from IPython.config.loader import Config
+            import sys
             cfg = Config()
+            # prepend sys.path with current working directory
+            sys.path.insert(0,'')
             IPython.embed(config=cfg,user_ns=local_ns)
+            
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",
@@ -1188,6 +1192,40 @@
         import yt.utilities.lodgeit as lo
         lo.main( None, download=args.number )
 
+class YTNotebookUploadCmd(YTCommand):
+    args = (dict(short="file", type=str),)
+    description = \
+        """
+        Upload an IPython notebook to hub.yt-project.org.
+        """
+
+    name = "upload_notebook"
+    def __call__(self, args):
+        filename = args.file
+        if not os.path.isfile(filename):
+            raise IOError(filename)
+        if not filename.endswith(".ipynb"):
+            print "File must be an IPython notebook!"
+            return 1
+        import json
+        try:
+            t = json.loads(open(filename).read())['metadata']['name']
+        except (ValueError, KeyError):
+            print "File does not appear to be an IPython notebook."
+        from yt.utilities.minimal_representation import MinimalNotebook
+        mn = MinimalNotebook(filename, t)
+        rv = mn.upload()
+        print "Upload successful!"
+        print
+        print "To access your raw notebook go here:"
+        print
+        print "  %s" % (rv['url'])
+        print
+        print "To view your notebook go here:"
+        print
+        print "  %s" % (rv['url'].replace("/go/", "/nb/"))
+        print
+
 class YTPlotCmd(YTCommand):
     args = ("width", "unit", "bn", "proj", "center",
             "zlim", "axis", "field", "weight", "skip",
@@ -1212,7 +1250,7 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
         if args.axis == 4:
             axes = range(3)
         else:
@@ -1266,12 +1304,12 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
 
         L = args.viewpoint
         if L is None:
             L = [1.]*3
-        L = na.array(args.viewpoint)
+        L = np.array(args.viewpoint)
 
         unit = args.unit
         if unit is None:
@@ -1302,7 +1340,7 @@
             roi = pf.h.region(center, center-width, center+width)
             mi, ma = roi.quantities['Extrema'](field)[0]
             if log:
-                mi, ma = na.log10(mi), na.log10(ma)
+                mi, ma = np.log10(mi), np.log10(ma)
         else:
             mi, ma = myrange[0], myrange[1]
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 c_kms = 2.99792458e5 # c in km/s
 G = 6.67259e-8 # cgs
@@ -49,40 +49,40 @@
 
     def ComovingTransverseDistance(self,z_i,z_f):
          if (self.OmegaCurvatureNow > 0):
-             return (self.HubbleDistance() / na.sqrt(self.OmegaCurvatureNow) * 
-                     na.sinh(na.sqrt(self.OmegaCurvatureNow) * 
+             return (self.HubbleDistance() / np.sqrt(self.OmegaCurvatureNow) * 
+                     np.sinh(np.sqrt(self.OmegaCurvatureNow) * 
                           self.ComovingRadialDistance(z_i,z_f) / 
                           self.HubbleDistance()))
          elif (self.OmegaCurvatureNow < 0):
-             return (self.HubbleDistance() / na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
-                     sin(na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
+             return (self.HubbleDistance() / np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
+                     sin(np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
                          self.ComovingRadialDistance(z_i,z_f) / self.HubbleDistance()))
          else:
              return self.ComovingRadialDistance(z_i,z_f)
 
     def ComovingVolume(self,z_i,z_f):
         if (self.OmegaCurvatureNow > 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      ana.sinh(na.fabs(self.OmegaCurvatureNow) * 
+                      anp.sinh(np.fabs(self.OmegaCurvatureNow) * 
                             self.ComovingTransverseDistance(z_i,z_f) / 
-                            self.HubbleDistance()) / na.sqrt(self.OmegaCurvatureNow)) / 1e9)
+                            self.HubbleDistance()) / np.sqrt(self.OmegaCurvatureNow)) / 1e9)
         elif (self.OmegaCurvatureNow < 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / 
-                     na.fabs(self.OmegaCurvatureNow) * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / 
+                     np.fabs(self.OmegaCurvatureNow) * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      asin(na.fabs(self.OmegaCurvatureNow) * 
+                      asin(np.fabs(self.OmegaCurvatureNow) * 
                            self.ComovingTransverseDistance(z_i,z_f) / 
                            self.HubbleDistance()) / 
-                      na.sqrt(na.fabs(self.OmegaCurvatureNow))) / 1e9)
+                      np.sqrt(np.fabs(self.OmegaCurvatureNow))) / 1e9)
         else:
-             return (4 * na.pi * na.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
+             return (4 * np.pi * np.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
                      3 / 1e9)
 
     def AngularDiameterDistance(self,z_i,z_f):
@@ -100,18 +100,18 @@
         return (romberg(self.AgeIntegrand,z,1000) / self.HubbleConstantNow * kmPerMpc)
 
     def AngularScale_1arcsec_kpc(self,z_i,z_f):
-        return (self.AngularDiameterDistance(z_i,z_f) / 648. * na.pi)
+        return (self.AngularDiameterDistance(z_i,z_f) / 648. * np.pi)
 
     def CriticalDensity(self,z):
-        return (3.0 / 8.0 / na.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
+        return (3.0 / 8.0 / np.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
                 (self.OmegaLambdaNow + ((1 + z)**3.0) * self.OmegaMatterNow))
 
     def AgeIntegrand(self,z):
         return (1 / (z + 1) / self.ExpansionFactor(z))
 
     def ExpansionFactor(self,z):
-        return na.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
-                    self.OmegaCurvatureNow * na.sqrt(1 + z) + 
+        return np.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
+                    self.OmegaCurvatureNow * np.sqrt(1 + z) + 
                     self.OmegaLambdaNow)
 
     def InverseExpansionFactor(self,z):
@@ -162,8 +162,8 @@
         """
         # Changed 2.52e17 to 2.52e19 because H_0 is in km/s/Mpc, 
         # instead of 100 km/s/Mpc.
-        return 2.52e19 / na.sqrt(self.OmegaMatterNow) / \
-            self.HubbleConstantNow / na.power(1 + self.InitialRedshift,1.5)
+        return 2.52e19 / np.sqrt(self.OmegaMatterNow) / \
+            self.HubbleConstantNow / np.power(1 + self.InitialRedshift,1.5)
 
     def ComputeRedshiftFromTime(self,time):
         """
@@ -183,18 +183,18 @@
  
         # 1) For a flat universe with OmegaMatterNow = 1, it's easy.
  
-        if ((na.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
+        if ((np.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            a = na.power(time/self.InitialTime,2.0/3.0)
+            a = np.power(time/self.InitialTime,2.0/3.0)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
         #    Actually, this is a little tricky since we must solve an equation
-        #    of the form eta - na.sinh(eta) + x = 0..
+        #    of the form eta - np.sinh(eta) + x = 0..
  
         if ((self.OmegaMatterNow < 1) and 
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            x = 2*TimeHubble0*na.power(1.0 - self.OmegaMatterNow, 1.5) / \
+            x = 2*TimeHubble0*np.power(1.0 - self.OmegaMatterNow, 1.5) / \
                 self.OmegaMatterNow;
  
             # Compute eta in a three step process, first from a third-order
@@ -203,12 +203,12 @@
             # eta.  This works well because parts 1 & 2 are an excellent approximation
             # when x is small and part 3 converges quickly when x is large. 
  
-            eta = na.power(6*x,1.0/3.0)                # part 1
-            eta = na.power(120*x/(20+eta*eta),1.0/3.0) # part 2
+            eta = np.power(6*x,1.0/3.0)                # part 1
+            eta = np.power(120*x/(20+eta*eta),1.0/3.0) # part 2
             for i in range(40):                      # part 3
                 eta_old = eta
-                eta = na.arcsinh(eta + x)
-                if (na.fabs(eta-eta_old) < ETA_TOLERANCE): 
+                eta = np.arcsinh(eta + x)
+                if (np.fabs(eta-eta_old) < ETA_TOLERANCE): 
                     break
                 if (i == 39):
                     print "No convergence after %d iterations." % i
@@ -216,7 +216,7 @@
             # Now use eta to compute the expansion factor (eq. 13-10, part 2).
  
             a = self.OmegaMatterNow/(2.0*(1.0 - self.OmegaMatterNow))*\
-                (na.cosh(eta) - 1.0)
+                (np.cosh(eta) - 1.0)
 
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
         #    Easy, but skip it for now.
@@ -228,10 +228,10 @@
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
+        if ((np.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow > OMEGA_TOLERANCE)):
-            a = na.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
-                na.power(na.sinh(1.5 * na.sqrt(1.0 - self.OmegaMatterNow)*\
+            a = np.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
+                np.power(np.sinh(1.5 * np.sqrt(1.0 - self.OmegaMatterNow)*\
                                      TimeHubble0),2.0/3.0)
 
 
@@ -249,29 +249,29 @@
         # 1) For a flat universe with OmegaMatterNow = 1, things are easy.
  
         if ((self.OmegaMatterNow == 1.0) and (self.OmegaLambdaNow == 0.0)):
-            TimeHubble0 = 2.0/3.0/na.power(1+z,1.5)
+            TimeHubble0 = 2.0/3.0/np.power(1+z,1.5)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
  
         if ((self.OmegaMatterNow < 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (na.sinh(eta) - eta)
+            eta = np.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (np.sinh(eta) - eta)
  
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
  
         if ((self.OmegaMatterNow > 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (eta - na.sin(eta))
+            eta = np.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (eta - np.sin(eta))
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
-            TimeHubble0 = 2.0/3.0/na.sqrt(1-self.OmegaMatterNow)*\
-                na.arcsinh(na.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
-                               na.power(1+z,1.5))
+        if ((np.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
+            TimeHubble0 = 2.0/3.0/np.sqrt(1-self.OmegaMatterNow)*\
+                np.arcsinh(np.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
+                               np.power(1+z,1.5))
   
         # Now convert from Time * H0 to time.
   


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/decompose.py
--- /dev/null
+++ b/yt/utilities/decompose.py
@@ -0,0 +1,156 @@
+"""
+Automagical cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Author: Artur Gawryszczak <gawrysz at gmail.com>
+Affiliation: PCSS
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+SIEVE_PRIMES = \
+    lambda l: l and l[:1] + SIEVE_PRIMES([n for n in l if n % l[0]])
+
+
+def decompose_to_primes(max_prime):
+    """ Decompose number into the primes """
+    for prime in SIEVE_PRIMES(range(2, max_prime)):
+        if prime * prime > max_prime:
+            break
+        while max_prime % prime == 0:
+            yield prime
+            max_prime /= prime
+    if max_prime > 1:
+        yield max_prime
+
+
+def decompose_array(arr, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = arr.shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    patches = split_array(arr, psize)
+    return grid_left_edges, grid_right_edges, patches
+
+
+def evaluate_domain_decomposition(n_d, pieces, ldom):
+    """ Evaluate longest to shortest edge ratio
+        BEWARE: lot's of magic here """
+    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
+    bsize = int(np.sum(
+        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    load_balance = float(np.product(n_d)) / \
+        (float(pieces) * np.product((n_d - 1) / ldom + 1))
+
+    # 0.25 is magic number
+    quality = load_balance / (1 + 0.25 * (bsize / ideal_bsize - 1.0))
+    # \todo add a factor that estimates lower cost when x-direction is
+    # not chopped too much
+    # \deprecated estimate these magic numbers
+    quality *= (1. - (0.001 * ldom[0] + 0.0001 * ldom[1]) / pieces)
+    if np.any(ldom > n_d):
+        quality = 0
+
+    return quality
+
+
+def factorize_number(pieces):
+    """ Return array consiting of prime, its power and number of different
+        decompositions in three dimensions for this prime
+    """
+    factors = [factor for factor in decompose_to_primes(pieces)]
+    temp = np.bincount(factors)
+    return np.array(
+        [(prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) / 2)
+         for prime in np.unique(factors)]
+    )
+
+
+def get_psize(n_d, pieces):
+    """ Calculate the best division of array into px*py*pz subarrays.
+        The goal is to minimize the ratio of longest to shortest edge
+        to minimize the amount of inter-process communication.
+    """
+    fac = factorize_number(pieces)
+    nfactors = len(fac[:, 2])
+    best = 0.0
+    while np.all(fac[:, 2] > 0):
+        ldom = np.ones(3, dtype=np.int)
+        for nfac in range(nfactors):
+            i = int(np.sqrt(0.25 + 2 * (fac[nfac, 2] - 1)) - 0.5)
+            k = fac[nfac, 2] - int(1 + i * (i + 1) / 2)
+            i = fac[nfac, 1] - i
+            j = fac[nfac, 1] - (i + k)
+            ldom *= fac[nfac, 0] ** np.array([i, j, k])
+
+        quality = evaluate_domain_decomposition(n_d, pieces, ldom)
+        if quality > best:
+            best = quality
+            p_size = ldom
+        # search for next unique combination
+        for j in range(nfactors):
+            if fac[j, 2] > 1:
+                fac[j, 2] -= 1
+                break
+            else:
+                if (j < nfactors - 1):
+                    fac[j, 2] = int((fac[j, 1] + 1) * (fac[j, 1] + 2) / 2)
+                else:
+                    fac[:, 2] = 0  # no more combinations to try
+
+    return p_size
+
+
+def split_array(tab, psize):
+    """ Split array into px*py*pz subarrays using internal numpy routine. """
+    temp = [np.array_split(array, psize[1], axis=1)
+            for array in np.array_split(tab, psize[2], axis=2)]
+    temp = [item for sublist in temp for item in sublist]
+    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
+    temp = [item for sublist in temp for item in sublist]
+    return temp
+
+
+if __name__ == "__main__":
+
+    NPROC = 12
+    ARRAY = np.zeros((128, 128, 129))
+    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
+    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
+
+    for idx in range(NPROC):
+        print LE[idx, :], RE[idx, :], DATA[idx].shape


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -76,6 +76,30 @@
     def __str__(self):
         return "Simulation time-series type %s not defined." % self.sim_type
 
+class YTCannotParseFieldDisplayName(YTException):
+    def __init__(self, field_name, display_name, mathtext_error):
+        self.field_name = field_name
+        self.display_name = display_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+                % (self.display_name, self.field_name) + self.mathtext_error
+
+class YTCannotParseUnitDisplayName(YTException):
+    def __init__(self, field_name, display_unit, mathtext_error):
+        self.field_name = field_name
+        self.unit_name = unit_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The unit display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+            % (self.unit_name, self.field_name) + self.mathtext_error
+
 class AmbiguousOutputs(YTException):
     def __init__(self, pf):
         YTException.__init__(self, pf)
@@ -110,3 +134,15 @@
         return "You have not declared yourself to be inside the IPython" + \
                "Notebook.  Do so with this command:\n\n" + \
                "ytcfg['yt','ipython_notebook'] = 'True'"
+
+class YTUnitNotRecognized(YTException):
+    def __init__(self, unit):
+        self.unit = unit
+
+    def __str__(self):
+        return "This parameter file doesn't recognize %s" % self.unit
+
+class YTHubRegisterError(YTException):
+    def __str__(self):
+        return "You must create an API key before uploading.  See " + \
+               "https://data.yt-project.org/getting_started.html"


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/flagging_methods.py
--- /dev/null
+++ b/yt/utilities/flagging_methods.py
@@ -0,0 +1,51 @@
+"""
+Utilities for flagging zones for refinement in a dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np # For modern purposes
+
+flagging_method_registry = {}
+
+def flag_cells(grid, methods):
+    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+    for method in methods:
+        flagged |= method(grid)
+    return flagged
+
+class FlaggingMethod(object):
+    _skip_add = False
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if hasattr(cls, "_type_name") and not cls._skip_add:
+                flagging_method_registry[cls._type_name] = cls
+
+class OverDensity(FlaggingMethod):
+    _type_name = "overdensity"
+    def __init__(self, over_density):
+        self.over_density = over_density
+
+    def __call__(self, pf, grid):
+        rho = grid["Density"] / (pf.refine_by**grid.Level)
+        return (rho > self.over_density)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -1,6 +1,6 @@
 import os
 import weakref
-import numpy as na
+import numpy as np
 import h5py as h5
 from conversion_abc import *
 from glob import glob
@@ -55,11 +55,11 @@
             grid['domain'] = int(splitup[8].rstrip(','))
             self.current_time = grid['time']
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -94,12 +94,12 @@
         proc_names = glob(self.source_dir+'id*')
         #print 'Reading a dataset from %i Processor Files' % len(proc_names)
         N = len(proc_names)
-        grid_dims = na.empty([N,3],dtype='int64')
-        grid_left_edges = na.empty([N,3],dtype='float64')
-        grid_dds = na.empty([N,3],dtype='float64')
-        grid_levels = na.zeros(N,dtype='int64')
-        grid_parent_ids = -1*na.ones(N,dtype='int64')
-        grid_particle_counts = na.zeros([N,1],dtype='int64')
+        grid_dims = np.empty([N,3],dtype='int64')
+        grid_left_edges = np.empty([N,3],dtype='float64')
+        grid_dds = np.empty([N,3],dtype='float64')
+        grid_levels = np.zeros(N,dtype='int64')
+        grid_parent_ids = -1*np.ones(N,dtype='int64')
+        grid_particle_counts = np.zeros([N,1],dtype='int64')
 
         for i in range(N):
             if i == 0:
@@ -128,12 +128,12 @@
 
             if len(line) == 0: break
             
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
                 grid['dimensions'][grid['dimensions']==0]=1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             # Append all hierachy info before reading this grid's data
@@ -149,7 +149,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -159,8 +159,8 @@
 
         gles = grid_left_edges
         gdims = grid_dims
-        dle = na.min(gles,axis=0)
-        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        dle = np.min(gles,axis=0)
+        dre = np.max(gles+grid_dims*grid_dds,axis=0)
         glis = ((gles - dle)/grid_dds).astype('int64')
         gris = glis + gdims
 
@@ -183,17 +183,17 @@
 
         ## --------- Done with top level nodes --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = ddims
         pars_g.attrs['current_time'] = self.current_time
         pars_g.attrs['domain_left_edge'] = dle
         pars_g.attrs['domain_right_edge'] = dre
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(1)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(1)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         # pars_g.attrs['n_cells'] = grid['ncells']
@@ -224,18 +224,18 @@
                 splitup = line.strip().split()
 
                 if "DIMENSIONS" in splitup:
-                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    grid_dims = np.array(splitup[-3:]).astype('int')
                     line = f.readline()
                     continue
                 elif "CELL_DATA" in splitup:
                     grid_ncells = int(splitup[-1])
                     line = f.readline()
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         grid_dims -= 1
                         grid_dims[grid_dims==0]=1
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         print 'product of dimensions %i not equal to number of cells %i' % \
-                              (na.prod(grid_dims), grid_ncells)
+                              (np.prod(grid_dims), grid_ncells)
                         raise TypeError
                     break
                 else:
@@ -250,7 +250,7 @@
                     if not read_table:
                         line = f.readline() # Read the lookup table line
                         read_table = True
-                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
                     if i == 0:
                         self.fields.append(field)
                     # print 'writing field %s' % field
@@ -259,7 +259,7 @@
 
                 elif 'VECTORS' in splitup:
                     field = splitup[1]
-                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
                     data_x = data[0::3].reshape(grid_dims,order='F')
                     data_y = data[1::3].reshape(grid_dims,order='F')
                     data_z = data[2::3].reshape(grid_dims,order='F')
@@ -291,7 +291,7 @@
             if name in self.field_conversions.keys():
                 this_field.attrs['field_to_cgs'] = self.field_conversions[name]
             else:
-                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+                this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
             
 
     def convert(self, hierarchy=True, data=True):
@@ -327,11 +327,11 @@
         elif "Really" in splitup:
             grid['time'] = splitup[-1]
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -365,19 +365,19 @@
             #    print line
 
             if len(line) == 0: break
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             if grid['read_type'] is 'scalar':
                 grid[grid['read_field']] = \
-                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                    np.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
                 self.fields.append(grid['read_field'])
             elif grid['read_type'] is 'vector':
-                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                data = np.fromfile(f, dtype='>f4', count=3*grid['ncells'])
                 grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
@@ -398,7 +398,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -406,8 +406,8 @@
         pars_g = f.create_group('simulation_parameters')
 
         dle = grid['left_edge'] # True only in this case of one grid for the domain
-        gles = na.array([grid['left_edge']])
-        gdims = na.array([grid['dimensions']])
+        gles = np.array([grid['left_edge']])
+        gdims = np.array([grid['dimensions']])
         glis = ((gles - dle)/grid['dds']).astype('int64')
         gris = glis + gdims
 
@@ -416,18 +416,18 @@
         # grid_dimensions
         gdim = f.create_dataset('grid_dimensions',data=gdims)
 
-        levels = na.array([0]).astype('int64') # unigrid example
+        levels = np.array([0]).astype('int64') # unigrid example
         # grid_level
         level = f.create_dataset('grid_level',data=levels)
 
         ## ----------QUESTIONABLE NEXT LINE--------- ##
         # This data needs two dimensions for now. 
-        n_particles = na.array([[0]]).astype('int64')
+        n_particles = np.array([[0]]).astype('int64')
         #grid_particle_count
         part_count = f.create_dataset('grid_particle_count',data=n_particles)
 
         # Assume -1 means no parent.
-        parent_ids = na.array([-1]).astype('int64')
+        parent_ids = np.array([-1]).astype('int64')
         # grid_parent_id
         pids = f.create_dataset('grid_parent_id',data=parent_ids)
 
@@ -451,8 +451,8 @@
 
         ## --------- Attribute Tables --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = grid['dimensions']
         try:
             pars_g.attrs['current_time'] = grid['time']
@@ -461,10 +461,10 @@
         pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
         pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(0)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(0)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         pars_g.attrs['n_cells'] = grid['ncells']
@@ -481,7 +481,7 @@
         if name in self.field_conversions.keys():
             this_field.attrs['field_to_cgs'] = self.field_conversions[name]
         else:
-            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
 
         # Add particle types
         # Nothing to do here


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/grid_data_format/writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/writer.py
@@ -0,0 +1,171 @@
+"""
+Writing yt data to a GDF file.
+
+Authors: Casey W. Stark <caseywstark at gmail.com>
+Affiliation: UC Berkeley
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Casey W. Stark.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+import os
+
+import h5py
+import numpy as np
+
+from yt import __version__ as yt_version
+
+
+def write_to_gdf(pf, gdf_path, data_author=None, data_comment=None,
+                 particle_type_name="dark_matter"):
+    """
+    Write a parameter file to the given path in the Grid Data Format.
+
+    Parameters
+    ----------
+    pf : StaticOutput object
+        The yt data to write out.
+    gdf_path : string
+        The path of the file to output.
+
+    """
+    # Make sure we have the absolute path to the file first
+    gdf_path = os.path.abspath(gdf_path)
+
+    # Stupid check -- is the file already there?
+    # @todo: make this a specific exception/error.
+    if os.path.exists(gdf_path):
+        raise IOError("A file already exists in the location: %s. Please provide a new one or remove that file." % gdf_path)
+
+    ###
+    # Create and open the file with h5py
+    ###
+    f = h5py.File(gdf_path, "w")
+
+    ###
+    # "gridded_data_format" group
+    ###
+    g = f.create_group("gridded_data_format")
+    g.attrs["data_software"] = "yt"
+    g.attrs["data_software_version"] = yt_version
+    if data_author is not None:
+        g.attrs["data_author"] = data_author
+    if data_comment is not None:
+        g.attrs["data_comment"] = data_comment
+
+    ###
+    # "simulation_parameters" group
+    ###
+    g = f.create_group("simulation_parameters")
+    g.attrs["refine_by"] = pf.refine_by
+    g.attrs["dimensionality"] = pf.dimensionality
+    g.attrs["domain_dimensions"] = pf.domain_dimensions
+    g.attrs["current_time"] = pf.current_time
+    g.attrs["domain_left_edge"] = pf.domain_left_edge
+    g.attrs["domain_right_edge"] = pf.domain_right_edge
+    g.attrs["unique_identifier"] = pf.unique_identifier
+    g.attrs["cosmological_simulation"] = pf.cosmological_simulation
+    # @todo: Where is this in the yt API?
+    g.attrs["num_ghost_zones"] = 0
+    # @todo: Where is this in the yt API?
+    g.attrs["field_ordering"] = 0
+    # @todo: not yet supported by yt.
+    g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')
+
+    if pf.cosmological_simulation:
+        g.attrs["current_redshift"] = pf.current_redshift
+        g.attrs["omega_matter"] = pf.omega_matter
+        g.attrs["omega_lambda"] = pf.omega_lambda
+        g.attrs["hubble_constant"] = pf.hubble_constant
+
+    ###
+    # "field_types" group
+    ###
+    g = f.create_group("field_types")
+
+    # Which field list should we iterate over?
+    for field_name in pf.h.field_list:
+        # create the subgroup with the field's name
+        sg = g.create_group(field_name)
+
+        # grab the display name and units from the field info container.
+        display_name = pf.field_info[field_name].display_name
+        units = pf.field_info[field_name].get_units()
+
+        # check that they actually contain something...
+        if display_name:
+            sg.attrs["field_name"] = display_name
+        else:
+            sg.attrs["field_name"] = field_name
+        if units:
+            sg.attrs["field_units"] = units
+        else:
+            sg.attrs["field_units"] = "None"
+        # @todo: the values must be in CGS already right?
+        sg.attrs["field_to_cgs"] = 1.0
+        # @todo: is this always true?
+        sg.attrs["staggering"] = 0
+
+    ###
+    # "particle_types" group
+    ###
+    g = f.create_group("particle_types")
+
+    # @todo: Particle type iterator
+    sg = g.create_group(particle_type_name)
+    sg["particle_type_name"] = particle_type_name
+
+    ###
+    # root datasets -- info about the grids
+    ###
+    f["grid_dimensions"] = pf.h.grid_dimensions
+    f["grid_left_index"] = np.array(
+            [g.get_global_startindex() for g in pf.h.grids]
+    ).reshape(pf.h.grid_dimensions.shape[0], 3)
+    f["grid_level"] = pf.h.grid_levels
+    # @todo: Fill with proper values
+    f["grid_parent_id"] = -np.ones(pf.h.grid_dimensions.shape[0])
+    f["grid_particle_count"] = pf.h.grid_particle_count
+
+    ###
+    # "data" group -- where we should spend the most time
+    ###
+    g = f.create_group("data")
+
+    for grid in pf.h.grids:
+        # add group for this grid
+
+        grid_group = g.create_group("grid_%010i" % grid.id)
+        # add group for the particles on this grid
+        particles_group = grid_group.create_group("particles")
+        pt_group = particles_group.create_group(particle_type_name)
+
+        # add the field data to the grid group
+        for field_name in pf.h.field_list:
+            # Check if this is a real field or particle data.
+            field_obj = pf.field_info[field_name]
+
+            if field_obj.particle_type:  # particle data
+                pt_group[field_name] = grid.get_data(field_name)
+            else:  # a field
+                grid_group[field_name] = grid.get_data(field_name)
+
+    # don't forget to close the file.
+    f.close()




diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -53,8 +53,8 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def read_and_seek(char *filename, int offset1, int offset2,
-                  np.ndarray buffer, int bytes):
+def read_and_seek(char *filename, np.int64_t offset1,
+                  np.int64_t offset2, np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
     cdef char line[1024]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -590,7 +590,7 @@
         cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
         for i in range(pos_x.shape[0]):
             kdtree_utils.kd_insert3(self.tree,
-                pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+                pos_x[i], pos_y[i], pos_z[i], <void *> (pointer + i*3))
 
     def __dealloc__(self):
         kdtree_utils.kd_free(self.tree)
@@ -616,7 +616,7 @@
     cdef np.float64_t slopes[6], dp[3], ds[3]
     cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
     cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
-    cdef int nstars
+    cdef int nstars, dti, i, j
     cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
     for i in range(3):
         dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
@@ -648,6 +648,7 @@
         dvs[i] = temp
     for dti in range(vri.n_samples): 
         # Now we add the contribution from stars
+        kdtree_utils.kd_res_rewind(ballq)
         for i in range(nstars):
             kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
             colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
@@ -655,20 +656,22 @@
             gexp = (px - pos[0])*(px - pos[0]) \
                  + (py - pos[1])*(py - pos[1]) \
                  + (pz - pos[2])*(pz - pos[2])
-            gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
-            for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+            gaussian = vri.star_coeff * exp(-gexp/vri.star_sigma_num)
+            for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
         for i in range(3):
             pos[i] += local_dds[i]
         FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
                           vri.field_table_ids, vri.grey_opacity)
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
+    kdtree_utils.kd_res_free(ballq)
 
 cdef class VolumeRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
     cdef public object tf_obj
     cdef public object my_field_tables
     cdef kdtree_utils.kdtree **trees
+    cdef object tree_containers
     def __cinit__(self, 
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,
@@ -709,6 +712,7 @@
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
         cdef star_kdtree_container skdc
+        self.tree_containers = star_list
         if star_list is None:
             self.trees = NULL
         else:
@@ -719,10 +723,15 @@
                 self.trees[i] = skdc.tree
 
     cdef void setup(self, PartitionedGrid pg):
+        cdef star_kdtree_container star_tree
         if self.trees == NULL:
             self.sampler = volume_render_sampler
         else:
+            star_tree = self.tree_containers[pg.parent_grid_id]
             self.vra.star_list = self.trees[pg.parent_grid_id]
+            self.vra.star_sigma_num = 2.0*star_tree.sigma**2.0
+            self.vra.star_er = 2.326 * star_tree.sigma
+            self.vra.star_coeff = star_tree.coeff
             self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.lib as lib
@@ -35,23 +35,23 @@
         self.truncate = truncate
         x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
         return my_vals.reshape(orig_shape)
 
@@ -61,28 +61,28 @@
         self.truncate = truncate
         x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
         y_vals = data_object[self.y_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        y_i = (na.digitize(y_vals, self.y_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        y_i = (np.digitize(y_vals, self.y_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.BilinearlyInterpolate(self.table,
                                  x_vals, y_vals, self.x_bins, self.y_bins,
                                  x_i, y_i, my_vals)
@@ -94,9 +94,9 @@
         self.truncate = truncate
         x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = na.linspace(z0, z1, table.shape[2]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -104,23 +104,23 @@
         y_vals = data_object[self.y_name].ravel().astype('float64')
         z_vals = data_object[self.z_name].ravel().astype('float64')
 
-        x_i = na.digitize(x_vals, self.x_bins) - 1
-        y_i = na.digitize(y_vals, self.y_bins) - 1
-        z_i = na.digitize(z_vals, self.z_bins) - 1
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
-            or na.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
+        x_i = np.digitize(x_vals, self.x_bins) - 1
+        y_i = np.digitize(y_vals, self.y_bins) - 1
+        z_i = np.digitize(z_vals, self.z_bins) - 1
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
+            or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
-                z_i = na.minimum(na.maximum(z_i,0), len(self.z_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
+                z_i = np.minimum(np.maximum(z_i,0), len(self.z_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.TrilinearlyInterpolate(self.table,
                                  x_vals, y_vals, z_vals,
                                  self.x_bins, self.y_bins, self.z_bins,
@@ -135,11 +135,11 @@
         xm = (self.x_bins[x_i+1] - x_vals) / (self.x_bins[x_i+1] - self.x_bins[x_i])
         ym = (self.y_bins[y_i+1] - y_vals) / (self.y_bins[y_i+1] - self.y_bins[y_i])
         zm = (self.z_bins[z_i+1] - z_vals) / (self.z_bins[z_i+1] - self.z_bins[z_i])
-        if na.any(na.isnan(self.table)):
+        if np.any(np.isnan(self.table)):
             raise ValueError
-        if na.any(na.isnan(x) | na.isnan(y) | na.isnan(z)):
+        if np.any(np.isnan(x) | np.isnan(y) | np.isnan(z)):
             raise ValueError
-        if na.any(na.isnan(xm) | na.isnan(ym) | na.isnan(zm)):
+        if np.any(np.isnan(xm) | np.isnan(ym) | np.isnan(zm)):
             raise ValueError
         my_vals  = self.table[x_i  ,y_i  ,z_i  ] * (xm*ym*zm)
         my_vals += self.table[x_i+1,y_i  ,z_i  ] * (x *ym*zm)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math
 
 def periodic_dist(a, b, period):
@@ -48,20 +48,20 @@
 
     Examples
     --------
-    >>> a = na.array([0.1, 0.1, 0.1])
-    >>> b = na.array([0.9, 0,9, 0.9])
+    >>> a = np.array([0.1, 0.1, 0.1])
+    >>> b = np.array([0.9, 0,9, 0.9])
     >>> period = 1.
     >>> dist = periodic_dist(a, b, 1.)
     >>> dist
     0.3464102
     """
-    a = na.array(a)
-    b = na.array(b)
+    a = np.array(a)
+    b = np.array(b)
     if a.size != b.size: RunTimeError("Arrays must be the same shape.")
-    c = na.empty((2, a.size), dtype="float64")
+    c = np.empty((2, a.size), dtype="float64")
     c[0,:] = abs(a - b)
     c[1,:] = period - abs(a - b)
-    d = na.amin(c, axis=0)**2
+    d = np.amin(c, axis=0)**2
     return math.sqrt(d.sum())
 
 def rotate_vector_3D(a, dim, angle):
@@ -87,8 +87,8 @@
     
     Examples
     --------
-    >>> a = na.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
-    >>> b = rotate_vector_3D(a, 2, na.pi/2)
+    >>> a = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
+    >>> b = rotate_vector_3D(a, 2, np.pi/2)
     >>> print b
     [[  1.00000000e+00  -1.00000000e+00   0.00000000e+00]
     [  6.12323400e-17  -1.00000000e+00   1.00000000e+00]
@@ -100,27 +100,27 @@
     mod = False
     if len(a.shape) == 1:
         mod = True
-        a = na.array([a])
+        a = np.array([a])
     if a.shape[1] !=3:
         raise SyntaxError("The second dimension of the array a must be == 3!")
     if dim == 0:
-        R = na.array([[1, 0,0],
-            [0, na.cos(angle), na.sin(angle)],
-            [0, -na.sin(angle), na.cos(angle)]])
+        R = np.array([[1, 0,0],
+            [0, np.cos(angle), np.sin(angle)],
+            [0, -np.sin(angle), np.cos(angle)]])
     elif dim == 1:
-        R = na.array([[na.cos(angle), 0, -na.sin(angle)],
+        R = np.array([[np.cos(angle), 0, -np.sin(angle)],
             [0, 1, 0],
-            [na.sin(angle), 0, na.cos(angle)]])
+            [np.sin(angle), 0, np.cos(angle)]])
     elif dim == 2:
-        R = na.array([[na.cos(angle), na.sin(angle), 0],
-            [-na.sin(angle), na.cos(angle), 0],
+        R = np.array([[np.cos(angle), np.sin(angle), 0],
+            [-np.sin(angle), np.cos(angle), 0],
             [0, 0, 1]])
     else:
         raise SyntaxError("dim must be 0, 1, or 2!")
     if mod:
-        return na.dot(R, a.T).T[0]
+        return np.dot(R, a.T).T[0]
     else:
-        return na.dot(R, a.T).T
+        return np.dot(R, a.T).T
     
 
 def modify_reference_frame(CoM, L, P, V):
@@ -164,9 +164,9 @@
     
     Examples
     --------
-    >>> CoM = na.array([0.5, 0.5, 0.5])
-    >>> L = na.array([1, 0, 0])
-    >>> P = na.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
+    >>> CoM = np.array([0.5, 0.5, 0.5])
+    >>> L = np.array([1, 0, 0])
+    >>> P = np.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
     >>> V = p.copy()
     >>> LL, PP, VV = modify_reference_frame(CoM, L, P, V)
     >>> LL
@@ -183,7 +183,7 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
-    if (L == na.array([0, 0, 1.])).all():
+    if (L == np.array([0, 0, 1.])).all():
         # Whew! Nothing to do!
         return L, P, V
     # First translate the positions to center of mass reference frame.
@@ -191,7 +191,7 @@
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
     LL[2] = 0.
-    theta = na.arccos(na.inner(LL, [1.,0,0])/na.inner(LL,LL)**.5)
+    theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
     if L[1] < 0:
         theta = -theta
     # Now rotate all the position, velocity, and L vectors by this much around
@@ -200,7 +200,7 @@
     V = rotate_vector_3D(V, 2, theta)
     L = rotate_vector_3D(L, 2, theta)
     # Now find the angle between L and the z-axis.
-    theta = na.arccos(na.inner(L, [0,0,1])/na.inner(L,L)**.5)
+    theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
     # This time we rotate around the y axis.
     P = rotate_vector_3D(P, 1, theta)
     V = rotate_vector_3D(V, 1, theta)
@@ -241,10 +241,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> circV = compute_rotational_velocity(CoM, L, P, V)
     >>> circV
     array([ 1.        ,  0.        ,  0.        ,  1.41421356])
@@ -254,13 +254,13 @@
     L, P, V = modify_reference_frame(CoM, L, P, V)
     # Find the vector in the plane of the galaxy for each position point
     # that is perpendicular to the radial vector.
-    radperp = na.cross([0, 0, 1], P)
+    radperp = np.cross([0, 0, 1], P)
     # Find the component of the velocity along the radperp vector.
     # Unf., I don't think there's a better way to do this.
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rp in enumerate(radperp):
-        temp = na.dot(rp, V[i]) / na.dot(rp, rp) * rp
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rp, V[i]) / np.dot(rp, rp) * rp
+        res[i] = np.dot(temp, temp)**0.5
     return res
     
 def compute_parallel_velocity(CoM, L, P, V):
@@ -296,10 +296,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> paraV = compute_parallel_velocity(CoM, L, P, V)
     >>> paraV
     array([10, -1,  1, -1])
@@ -342,10 +342,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> radV = compute_radial_velocity(CoM, L, P, V)
     >>> radV
     array([ 1.        ,  1.41421356 ,  0.        ,  0.])
@@ -357,10 +357,10 @@
     # with the cylindrical radial vector for this point.
     # Unf., I don't think there's a better way to do this.
     P[:,2] = 0
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rad in enumerate(P):
-        temp = na.dot(rad, V[i]) / na.dot(rad, rad) * rad
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rad, V[i]) / np.dot(rad, rad) * rad
+        res[i] = np.dot(temp, temp)**0.5
     return res
 
 def compute_cylindrical_radius(CoM, L, P, V):
@@ -396,10 +396,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> cyl_r = compute_cylindrical_radius(CoM, L, P, V)
     >>> cyl_r
     array([ 1.        ,  1.41421356,  0.        ,  1.41421356])
@@ -409,7 +409,7 @@
     # Demote all the positions to the z=0 plane, which makes the distance
     # calculation very easy.
     P[:,2] = 0
-    return na.sqrt((P * P).sum(axis=1))
+    return np.sqrt((P * P).sum(axis=1))
     
 def ortho_find(vec1):
     r"""Find two complementary orthonormal vectors to a given vector.
@@ -489,9 +489,9 @@
     >>> c
     array([-0.16903085,  0.84515425, -0.50709255])
     """
-    vec1 = na.array(vec1, dtype=na.float64)
+    vec1 = np.array(vec1, dtype=np.float64)
     # Normalize
-    norm = na.sqrt(na.vdot(vec1, vec1))
+    norm = np.sqrt(np.vdot(vec1, vec1))
     if norm == 0:
         raise ValueError("Zero vector used as input.")
     vec1 /= norm
@@ -513,9 +513,9 @@
         z2 = 0.0
         x2 = -(y1 / x1)
         norm2 = (1.0 + z2 ** 2.0) ** (0.5)
-    vec2 = na.array([x2,y2,z2])
+    vec2 = np.array([x2,y2,z2])
     vec2 /= norm2
-    vec3 = na.cross(vec1, vec2)
+    vec3 = np.cross(vec1, vec2)
     return vec1, vec2, vec3
 
 def quartiles(a, axis=None, out=None, overwrite_input=False):
@@ -570,7 +570,7 @@
 
     Examples
     --------
-    >>> a = na.arange(100).reshape(10,10)
+    >>> a = np.arange(100).reshape(10,10)
     >>> a
     array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
            [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
@@ -601,7 +601,7 @@
             a.sort(axis=axis)
             sorted = a
     else:
-        sorted = na.sort(a, axis=axis)
+        sorted = np.sort(a, axis=axis)
     if axis is None:
         axis = 0
     indexer = [slice(None)] * sorted.ndim
@@ -619,8 +619,8 @@
             indexer[axis] = slice(index, index+1)
         # Use mean in odd and even case to coerce data type
         # and check, use out array.
-        result.append(na.mean(sorted[indexer], axis=axis, out=out))
-    return na.array(result)
+        result.append(np.mean(sorted[indexer], axis=axis, out=out))
+    return np.array(result)
 
 def get_rotation_matrix(theta, rot_vector):
     """
@@ -656,20 +656,20 @@
     array([[ 0.70710678,  0.        ,  0.70710678],
            [ 0.        ,  1.        ,  0.        ],
            [-0.70710678,  0.        ,  0.70710678]])
-    >>> na.dot(rot,a)
+    >>> np.dot(rot,a)
     array([ 0.,  1.,  0.])
     # since a is an eigenvector by construction
-    >>> na.dot(rot,[1,0,0])
+    >>> np.dot(rot,[1,0,0])
     array([ 0.70710678,  0.        , -0.70710678])
     """
 
     ux = rot_vector[0]
     uy = rot_vector[1]
     uz = rot_vector[2]
-    cost = na.cos(theta)
-    sint = na.sin(theta)
+    cost = np.cos(theta)
+    sint = np.sin(theta)
     
-    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+    R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
                   [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -23,13 +23,14 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import abc
 import json
 import urllib2
 from tempfile import TemporaryFile
 from yt.config import ytcfg
 from yt.funcs import *
+from yt.utilities.exceptions import *
 
 from .poster.streaminghttp import register_openers
 from .poster.encode import multipart_encode
@@ -93,14 +94,15 @@
     def upload(self):
         api_key = ytcfg.get("yt","hub_api_key")
         url = ytcfg.get("yt","hub_url")
+        if api_key == '': raise YTHubRegisterError
         metadata, (final_name, chunks) = self._generate_post()
         if hasattr(self, "_pf_mrep"):
             self._pf_mrep.upload()
         for i in metadata:
-            if isinstance(metadata[i], na.ndarray):
+            if isinstance(metadata[i], np.ndarray):
                 metadata[i] = metadata[i].tolist()
             elif hasattr(metadata[i], 'dtype'):
-                metadata[i] = na.asscalar(metadata[i])
+                metadata[i] = np.asscalar(metadata[i])
         metadata['obj_type'] = self.type
         if len(chunks) == 0:
             chunk_info = {'chunks': []}
@@ -129,7 +131,7 @@
         for i, (cn, cv) in enumerate(chunks):
             remaining = cv.size * cv.itemsize
             f = TemporaryFile()
-            na.save(f, cv)
+            np.save(f, cv)
             f.seek(0)
             pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i+1, len(chunks)))
             datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)
@@ -216,3 +218,22 @@
         metadata = self._attrs
         chunks = []
         return (metadata, ("chunks", []))
+
+class MinimalNotebook(MinimalRepresentation):
+    type = "notebook"
+    _attr_list = ("title",)
+
+    def __init__(self, filename, title = None):
+        # First we read in the data
+        if not os.path.isfile(filename):
+            raise IOError(filename)
+        self.data = open(filename).read()
+        if title is None:
+            title = json.loads(self.data)['metadata']['name']
+        self.title = title
+        self.data = np.fromstring(self.data, dtype='c')
+
+    def _generate_post(self):
+        metadata = self._attrs
+        chunks = [ ("notebook", self.data) ]
+        return (metadata, ("chunks", chunks))


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import get_rotation_matrix
@@ -52,37 +52,39 @@
            
         """
         self.steady_north = steady_north
-        if na.all(north_vector == normal_vector):
+        if np.all(north_vector == normal_vector):
             mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
             north_vector = None
         if north_vector is not None: self.steady_north = True
+        self.north_vector = north_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        normal_vector /= np.sqrt( np.dot(normal_vector, normal_vector))
         if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
+            vecs = np.identity(3)
+            t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
-            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            north_vector = na.cross(normal_vector, east_vector).ravel()
+            east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            # self.north_vector must remain None otherwise rotations about a fixed axis will break.  
+            # The north_vector calculated here will still be included in self.unit_vectors.
+            north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-            east_vector = na.cross(north_vector, normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+                north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
+            east_vector = np.cross(north_vector, normal_vector).ravel()
+        north_vector /= np.sqrt(np.dot(north_vector, north_vector))
+        east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
-        self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        self.inv_mat = np.linalg.pinv(self.unit_vectors)
         
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes related
-        to a an orientable object.
+        to an orientable object.
 
         Parameters
         ----------


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/parallel_tools/controller_system.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -0,0 +1,69 @@
+"""
+A queueing system based on MPI
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+    
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+from contextmanager import contextlib
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+class WorkSplitter(object):
+    def __init__(self, controller, group1, group2):
+        self.group1 = group1
+        self.group2 = group2
+        self.controller = controller
+
+    @classmethod
+    def setup(cls, ng1, ng2):
+        pp, wg = ProcessorPool.from_sizes(
+            [(1, "controller"), (ng1, "group1"), (ng2, "group2")])
+        groupc = pp['controller']
+        group1 = pp['group1']
+        group2 = pp['group2']
+        obj = cls(groupc, group1, group2)
+        obj.run(wg.name)
+
+    def run(self, name):
+        if name == "controller":
+            self.run_controller()
+        elif name == "group1":
+            self.run_group1()
+        elif name == "group2":
+            self.run_group2()
+        else:
+            raise NotImplementedError
+
+    @abstractmethod
+    def run_controller(self):
+        pass
+
+    @abstractmethod
+    def run_group1(self):
+        pass
+
+    @abstractmethod
+    def run_group2(self):
+        pass


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/parallel_tools/io_runner.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -0,0 +1,195 @@
+"""
+A simple IO staging mechanism
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+from .parallel_analysis_interface import ProcessorPool
+from yt.utilities.io_handler import BaseIOHandler
+from contextlib import contextmanager
+import time
+
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+
+YT_TAG_MESSAGE = 317 # Cell 317 knows where to go
+
+class IOCommunicator(BaseIOHandler):
+    def __init__(self, pf, wg, pool):
+        mylog.info("Initializing IOCommunicator")
+        self.pf = pf
+        self.wg = wg # We don't need to use this!
+        self.pool = pool
+        self.comm = pool.comm
+        # We read our grids here
+        self.grids = []
+        storage = {}
+        grids = pf.h.grids.tolist()
+        grids.sort(key=lambda a:a.filename)
+        for sto, g in parallel_objects(grids, storage = storage):
+            sto.result = self.comm.rank
+            sto.result_id = g.id
+            self.grids.append(g)
+        self._id_offset = pf.h.grids[0]._id_offset
+        mylog.info("Reading from disk ...")
+        self.initialize_data()
+        mylog.info("Broadcasting ...")
+        self.comm.comm.bcast(storage, root = wg.ranks[0])
+        mylog.info("Done.")
+        self.hooks = []
+
+    def initialize_data(self):
+        pf = self.pf
+        fields = [f for f in pf.h.field_list
+                  if not pf.field_info[f].particle_type]
+        pfields = [f for f in pf.h.field_list
+                   if pf.field_info[f].particle_type]
+        # Preload is only defined for Enzo ...
+        if pf.h.io._data_style == "enzo_packed_3d":
+            self.queue = pf.h.io.queue
+            pf.h.io.preload(self.grids, fields)
+            for g in self.grids:
+                for f in fields:
+                    if f not in self.queue[g.id]:
+                        d = np.zeros(g.ActiveDimensions, dtype='float64')
+                        self.queue[g.id][f] = d
+                for f in pfields:
+                    self.queue[g.id][f] = self._read(g, f)
+        else:
+            self.queue = {}
+            for g in self.grids:
+                for f in fields + pfields:
+                    self.queue[g.id][f] = pf.h.io._read(g, f)
+
+    def _read(self, g, f):
+        fi = self.pf.field_info[f]
+        if fi.particle_type and g.NumberOfParticles == 0:
+            # because this gets upcast to float
+            return np.array([],dtype='float64')
+        try:
+            temp = self.pf.h.io._read_data_set(g, f)
+        except:# self.pf.hierarchy.io._read_exception as exc:
+            if fi.not_in_all:
+                temp = np.zeros(g.ActiveDimensions, dtype='float64')
+            else:
+                raise
+        return temp
+
+    def wait(self):
+        status = MPI.Status()
+        while 1:
+            if self.comm.comm.Iprobe(MPI.ANY_SOURCE,
+                                YT_TAG_MESSAGE,
+                                status = status):
+                msg = self.comm.comm.recv(
+                        source = status.source, tag = YT_TAG_MESSAGE)
+                if msg['op'] == "end":
+                    mylog.debug("Shutting down IO.")
+                    break
+                self._send_data(msg, status.source)
+                status = MPI.Status()
+            else:
+                time.sleep(1e-2)
+
+    def _send_data(self, msg, dest):
+        grid_id = msg['grid_id']
+        field = msg['field']
+        ts = self.queue[grid_id][field].astype("float64")
+        mylog.debug("Opening send to %s (%s)", dest, ts.shape)
+        self.hooks.append(self.comm.comm.Isend([ts, MPI.DOUBLE], dest = dest))
+
+class IOHandlerRemote(BaseIOHandler):
+    _data_style = "remote"
+
+    def __init__(self, pf, wg, pool):
+        self.pf = pf
+        self.wg = wg # probably won't need
+        self.pool = pool
+        self.comm = pool.comm
+        self.proc_map = self.comm.comm.bcast(None,
+                root = pool['io'].ranks[0])
+        super(IOHandlerRemote, self).__init__()
+
+    def _read_data_set(self, grid, field):
+        dest = self.proc_map[grid.id]
+        msg = dict(grid_id = grid.id, field = field, op="read")
+        mylog.debug("Requesting %s for %s from %s", field, grid, dest)
+        if self.pf.field_info[field].particle_type:
+            data = np.empty(grid.NumberOfParticles, 'float64')
+        else:
+            data = np.empty(grid.ActiveDimensions, 'float64')
+        hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
+        self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
+        mylog.debug("Waiting for data.")
+        MPI.Request.Wait(hook)
+        return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        #sl = tuple(reversed(sl))
+        return self._read_data_set(grid,field)[sl]
+
+    def terminate(self):
+        msg = dict(op='end')
+        if self.wg.comm.rank == 0:
+            for rank in self.pool['io'].ranks:
+                mylog.debug("Sending termination message to %s", rank)
+                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
+
+ at contextmanager
+def remote_io(pf, wg, pool):
+    original_io = pf.h.io
+    pf.h.io = IOHandlerRemote(pf, wg, pool)
+    yield
+    pf.h.io.terminate()
+    pf.h.io = original_io
+
+def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+    pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
+    rv = None
+    if wg.name == "work":
+        pf = load(fn)
+        with remote_io(pf, wg, pool):
+            rv = func(pf, *args, **kwargs)
+    elif wg.name == "io":
+        pf = load(fn)
+        io = IOCommunicator(pf, wg, pool)
+        io.wait()
+    # We should broadcast the result
+    rv = pool.comm.mpi_bcast(rv, root=pool['work'].ranks[0])
+    pool.free_all()
+    mylog.debug("Return value: %s", rv)
+    return rv
+
+# Here is an example of how to use this functionality.
+if __name__ == "__main__":
+    def gq(pf):
+        dd = pf.h.all_data()
+        return dd.quantities["TotalQuantity"]("CellMassMsun")
+    q = io_nodes("DD0087/DD0087", 8, 24, gq)
+    mylog.info(q)
+
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -27,7 +27,7 @@
 import cStringIO
 import itertools
 import logging
-import numpy as na
+import numpy as np
 import sys
 
 from yt.funcs import *
@@ -131,13 +131,13 @@
         # Note that we're doing this in advance, and with a simple means
         # of choosing them; more advanced methods will be explored later.
         if self._use_all:
-            self.my_obj_ids = na.arange(len(self._objs))
+            self.my_obj_ids = np.arange(len(self._objs))
         else:
             if not round_robin:
-                self.my_obj_ids = na.array_split(
-                                na.arange(len(self._objs)), self._skip)[self._offset]
+                self.my_obj_ids = np.array_split(
+                                np.arange(len(self._objs)), self._skip)[self._offset]
             else:
-                self.my_obj_ids = na.arange(len(self._objs))[self._offset::self._skip]
+                self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
         
     def __iter__(self):
         for gid in self.my_obj_ids:
@@ -271,7 +271,7 @@
         self.size = size
         self.ranks = ranks
         self.comm = comm
-	self.name = name
+        self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -294,11 +294,9 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-
-	# Default name to the workgroup number.
+        # Default name to the workgroup number.
         if name is None: 
-	    name = string(len(workgroups))
-	    
+            name = string(len(workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -423,14 +421,14 @@
             njobs, my_size)
         raise RuntimeError
     my_rank = my_communicator.rank
-    all_new_comms = na.array_split(na.arange(my_size), njobs)
+    all_new_comms = np.array_split(np.arange(my_size), njobs)
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
             break
     if parallel_capable:
         communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
-    obj_ids = na.arange(len(objects))
+    obj_ids = np.arange(len(objects))
 
     to_share = {}
     # If our objects object is slice-aware, like time series data objects are,
@@ -527,14 +525,14 @@
         #   cat
         #   join
         # data is selected to be of types:
-        #   na.ndarray
+        #   np.ndarray
         #   dict
         #   data field dict
         if datatype is not None:
             pass
         elif isinstance(data, types.DictType):
             datatype == "dict"
-        elif isinstance(data, na.ndarray):
+        elif isinstance(data, np.ndarray):
             datatype == "array"
         elif isinstance(data, types.ListType):
             datatype == "list"
@@ -551,14 +549,14 @@
             field_keys = data.keys()
             field_keys.sort()
             size = data[field_keys[0]].shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
@@ -583,16 +581,16 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = na.zeros(0, dtype=dtype) # This only works for
+                    data = np.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
@@ -610,7 +608,7 @@
     def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
-        if isinstance(data, na.ndarray) and \
+        if isinstance(data, np.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
             if self.comm.rank == root:
                 info = (data.shape, data.dtype)
@@ -618,7 +616,7 @@
                 info = ()
             info = self.comm.bcast(info, root=root)
             if self.comm.rank != root:
-                data = na.empty(info[0], dtype=info[1])
+                data = np.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
             self.comm.Bcast([data, mpi_type], root = root)
             return data
@@ -638,7 +636,7 @@
     @parallel_passthrough
     def mpi_allreduce(self, data, dtype=None, op='sum'):
         op = op_names[op]
-        if isinstance(data, na.ndarray) and data.dtype != na.bool:
+        if isinstance(data, np.ndarray) and data.dtype != np.bool:
             if dtype is None:
                 dtype = data.dtype
             if dtype != data.dtype:
@@ -745,7 +743,7 @@
         return (obj._owner == self.comm.rank)
 
     def send_quadtree(self, target, buf, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
         self.comm.Send([sizebuf, MPI.LONG], dest=target)
         self.comm.Send([buf[0], MPI.INT], dest=target)
@@ -753,11 +751,11 @@
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
     def recv_quadtree(self, target, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
-        buf = [na.empty((sizebuf[0],), 'int32'),
-               na.empty((sizebuf[0], args[2]),'float64'),
-               na.empty((sizebuf[0],),'float64')]
+        buf = [np.empty((sizebuf[0],), 'int32'),
+               np.empty((sizebuf[0], args[2]),'float64'),
+               np.empty((sizebuf[0],),'float64')]
         self.comm.Recv([buf[0], MPI.INT], source=target)
         self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
         self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
@@ -777,8 +775,8 @@
         sys.exit()
 
         args = qt.get_args() # Will always be the same
-        tgd = na.array([args[0], args[1]], dtype='int64')
-        sizebuf = na.zeros(1, 'int64')
+        tgd = np.array([args[0], args[1]], dtype='int64')
+        sizebuf = np.zeros(1, 'int64')
 
         while mask < size:
             if (mask & rank) != 0:
@@ -804,9 +802,9 @@
             sizebuf[0] = buf[0].size
         self.comm.Bcast([sizebuf, MPI.LONG], root=0)
         if rank != 0:
-            buf = [na.empty((sizebuf[0],), 'int32'),
-                   na.empty((sizebuf[0], args[2]),'float64'),
-                   na.empty((sizebuf[0],),'float64')]
+            buf = [np.empty((sizebuf[0],), 'int32'),
+                   np.empty((sizebuf[0], args[2]),'float64'),
+                   np.empty((sizebuf[0],),'float64')]
         self.comm.Bcast([buf[0], MPI.INT], root=0)
         self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
         self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
@@ -818,7 +816,7 @@
 
 
     def send_array(self, arr, dest, tag = 0):
-        if not isinstance(arr, na.ndarray):
+        if not isinstance(arr, np.ndarray):
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)
             return
@@ -832,7 +830,7 @@
         dt, ne = self.comm.recv(source=source, tag=tag)
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)
-        arr = na.empty(ne, dtype=dt)
+        arr = np.empty(ne, dtype=dt)
         tmp = arr.view(self.__tocast)
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
@@ -843,11 +841,11 @@
             for i in range(send.shape[0]):
                 recv.append(self.alltoallv_array(send[i,:].copy(), 
                                                  total_size, offsets, sizes))
-            recv = na.array(recv)
+            recv = np.array(recv)
             return recv
         offset = offsets[self.comm.rank]
         tmp_send = send.view(self.__tocast)
-        recv = na.empty(total_size, dtype=send.dtype)
+        recv = np.empty(total_size, dtype=send.dtype)
         recv[offset:offset+send.size] = send[:]
         dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
         roff = [off * dtr for off in offsets]
@@ -869,7 +867,7 @@
 
 communication_system = CommunicationSystem()
 if parallel_capable:
-    ranks = na.arange(MPI.COMM_WORLD.size)
+    ranks = np.arange(MPI.COMM_WORLD.size)
     communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
@@ -928,13 +926,13 @@
         xax, yax = x_dict[axis], y_dict[axis]
         cc = MPI.Compute_dims(self.comm.size, 2)
         mi = self.comm.rank
-        cx, cy = na.unravel_index(mi, cc)
-        x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
+        cx, cy = np.unravel_index(mi, cc)
+        x = np.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
 
         DLE, DRE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        LE = na.ones(3, dtype='float64') * DLE
-        RE = na.ones(3, dtype='float64') * DRE
+        LE = np.ones(3, dtype='float64') * DLE
+        RE = np.ones(3, dtype='float64') * DRE
         LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
         RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
         LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
@@ -945,7 +943,7 @@
         return True, reg
 
     def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
-        LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
+        LE, RE = np.array(ds.left_edge), np.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
         if (LE == self.pf.domain_left_edge).all() and \
@@ -975,13 +973,13 @@
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \
@@ -1002,13 +1000,13 @@
         
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import time, threading, random
 
 from yt.funcs import *
@@ -142,8 +142,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
@@ -170,8 +170,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/tests/test_flagging_methods.py
--- /dev/null
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -0,0 +1,12 @@
+from yt.testing import *
+from yt.utilities.flagging_methods import flagging_method_registry
+
+def setup():
+    global pf
+    pf = fake_random_pf(64)
+    pf.h
+
+def test_over_density():
+    od_flag = flagging_method_registry["overdensity"](0.75) 
+    criterion = (pf.h.grids[0]["Density"] > 0.75)
+    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/tests/test_interpolators.py
--- /dev/null
+++ b/yt/utilities/tests/test_interpolators.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+import yt.utilities.linear_interpolators as lin
+
+def setup():
+    pass
+
+def test_linear_interpolator_1d():
+    random_data = np.random.random(64)
+    fv = {'x': np.mgrid[0.0:1.0:64j]}
+    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
+    assert_array_equal(ufi(fv), random_data)
+
+def test_linear_interpolator_2d():
+    random_data = np.random.random((64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0), "xy", True)
+    assert_array_equal(bfi(fv), random_data)
+
+def test_linear_interpolator_3d():
+    random_data = np.random.random((64, 64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
+    assert_array_equal(tfi(fv), random_data)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/utilities/tests/test_kdtrees.py
--- /dev/null
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -0,0 +1,96 @@
+"""
+Unit test the kD trees in yt.
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: U of Colorado
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Stephen Skory.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+
+try:
+    from yt.utilities.kdtree import \
+        chainHOP_tags_dens, \
+        create_tree, fKD, find_nn_nearest_neighbors, \
+        free_tree, find_chunk_nearest_neighbors
+except ImportError:
+    mylog.debug("The Fortran kD-Tree did not import correctly.")
+
+from yt.utilities.spatial import cKDTree
+
+def setup():
+    pass
+
+def test_fortran_tree():
+    r"""This test makes sure that the fortran kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    try:
+        fKD.pos = np.empty((3, 4), dtype='float64', order='F')
+    except NameError:
+        return
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    fKD.pos[0, :] = points
+    fKD.pos[1, :] = points
+    fKD.pos[2, :] = points
+    fKD.qv = np.empty(3, dtype='float64')
+    fKD.dist = np.empty(4, dtype='float64')
+    fKD.tags = np.empty(4, dtype='int64')
+    fKD.nn = 4
+    fKD.sort = True
+    create_tree(0)
+    # Now we check to make sure that we find the correct nearest neighbors,
+    # which get stored in dist and tags.
+    fKD.qv[:] = 0.999
+    find_nn_nearest_neighbors()
+    # Fix fortran counting.
+    fKD.tags -= 1
+    # Clean up before the tests.
+    free_tree(0)
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(fKD.dist, dist)
+    assert_array_equal(fKD.tags, tags)
+
+def test_cython_tree():
+    r"""This test makes sure that the cython kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    pos = np.empty((4, 3), dtype='float64')
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    pos[:, 0] = points
+    pos[:, 1] = points
+    pos[:, 2] = points
+    kdtree = cKDTree(pos, leafsize = 2)
+    qv = np.array([0.999]*3)
+    res = kdtree.query(qv, 4, period=[1.,1.,1])
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(res[0], dist)
+    assert_array_equal(res[1], tags)
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -66,6 +66,7 @@
 from plot_window import \
     SlicePlot, \
     OffAxisSlicePlot, \
-    ProjectionPlot
+    ProjectionPlot, \
+    OffAxisProjectionPlot
     
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -21,7 +21,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 
 import matplotlib
 import matplotlib.colors as cc
@@ -83,14 +83,14 @@
 matplotlib.rc('image', cmap="algae")
 
 # This next colormap was designed by Tune Kamae and converted here by Matt
-_vs = na.linspace(0,1,255)
-_kamae_red = na.minimum(255,
-                113.9*na.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
+_vs = np.linspace(0,1,255)
+_kamae_red = np.minimum(255,
+                113.9*np.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
               + 3587.9*_vs+2563.4)/255.0
-_kamae_grn = na.minimum(255,
-                70.0*na.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
-_kamae_blu = na.minimum(255,
-                194.5*_vs**2.88+99.72*na.exp(-77.24*(_vs-0.742)**2.0)
+_kamae_grn = np.minimum(255,
+                70.0*np.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
+_kamae_blu = np.minimum(255,
+                194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
 cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
@@ -121,15 +121,15 @@
 _h_cubehelix = 1.0
 
 _cubehelix_data = {
-        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
 }
 
 add_cmap("cubehelix", _cubehelix_data)
 
 # Add colormaps in _colormap_data.py that weren't defined here
-_vs = na.linspace(0,1,255)
+_vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps:
         cdict = { 'red': zip(_vs,v[0],v[0]),
@@ -143,5 +143,5 @@
     r = cmap._lut[:-3, 0]
     g = cmap._lut[:-3, 1]
     b = cmap._lut[:-3, 2]
-    a = na.ones(b.shape)
+    a = np.ones(b.shape)
     return [r, g, b, a]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import pyx
-import numpy as na
+import numpy as np
 from matplotlib import cm
 from _mpl_imports import FigureCanvasAgg
 
@@ -243,7 +243,7 @@
             if xdata == None:
                 self.canvas.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 self.canvas.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
         else:
             plot = pyx.graph.graphxy \
@@ -253,7 +253,7 @@
             if xdata == None:
                 plot.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 plot.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
             self.canvas.insert(plot)
         self.axes_drawn = True
@@ -495,7 +495,7 @@
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
         # Convert the colormap into a string
-        x = na.linspace(1,0,256)
+        x = np.linspace(1,0,256)
         cm_string = cm.cmap_d[name](x, bytes=True)[:,0:3].tostring()
 
         cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -28,8 +28,9 @@
     x_dict, \
     y_dict, \
     axis_names
+from .volume_rendering.api import off_axis_projection
 import _MPL
-import numpy as na
+import numpy as np
 import weakref
 
 class FixedResolutionBuffer(object):
@@ -352,7 +353,7 @@
         """
         import numdisplay
         numdisplay.open()
-        if take_log: data=na.log10(self[field])
+        if take_log: data=np.log10(self[field])
         else: data=self[field]
         numdisplay.display(data)    
 
@@ -374,7 +375,7 @@
     """
     def __getitem__(self, item):
         if item in self.data: return self.data[item]
-        indices = na.argsort(self.data_source['dx'])[::-1]
+        indices = np.argsort(self.data_source['dx'])[::-1]
         buff = _MPL.CPixelize( self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
                                self.data_source['px'],  self.data_source['py'],
                                self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],
@@ -384,3 +385,28 @@
                                self.bounds).transpose()
         self[item] = buff
         return buff
+
+
+class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.data = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        if item in self.data: return self.data[item]
+        mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+            (item, self.buff_size[0], self.buff_size[1]))
+        ds = self.data_source
+        width = (self.bounds[1] - self.bounds[0],
+                 self.bounds[3] - self.bounds[2],
+                 self.bounds[5] - self.bounds[4])
+        buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                   width, ds.resolution, item,
+                                   weight=ds.weight_field, volume=ds.volume,
+                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+                                   north_vector=ds.north_vector)
+        self[item] = buff.swapaxes(0,1)
+        return buff
+
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -21,7 +21,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import types, os
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer, ObliqueFixedResolutionBuffer
@@ -163,7 +163,7 @@
         """
         self.xlim = (low[0], high[0])
         self.ylim = (low[1], high[1])
-        return na.log10(self.buffer)
+        return np.log10(self.buffer)
 
     def set_width(self, width):
         """
@@ -283,7 +283,7 @@
 
     def __call__(self, val):
         self.pylab.clf()
-        self.pylab.imshow(na.log10(val), interpolation='nearest')
+        self.pylab.imshow(np.log10(val), interpolation='nearest')
         self.pylab.savefig("wimage_%03i.png" % self.tile_id)
 
 class TransportAppender(object):
@@ -297,13 +297,13 @@
     def __call__(self, val):
         from yt.utilities.lib import write_png_to_string
         from yt.visualization.image_writer import map_to_colors
-        image = na.log10(val)
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        image = np.log10(val)
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
         image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
         to_plot = map_to_colors(image, "algae")
-        to_plot = na.clip(to_plot, 0, 255)
+        to_plot = np.clip(to_plot, 0, 255)
         s = write_png_to_string(to_plot)
         response_body = "data:image/png;base64," + base64.encodestring(s)
         tf.close()


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -23,7 +23,7 @@
 import types
 import imp
 import os
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import _colormap_data as cmd
@@ -44,7 +44,7 @@
 
         >>> image = scale_image(image, min=0, max=1000)
     """
-    if isinstance(image, na.ndarray) and image.dtype == na.uint8:
+    if isinstance(image, np.ndarray) and image.dtype == np.uint8:
         return image
     if isinstance(image, (types.TupleType, types.ListType)):
         image, mi, ma = image
@@ -52,7 +52,7 @@
         mi = image.min()
     if ma is None:
         ma = image.max()
-    image = (na.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
+    image = (np.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
     return image
 
 def multi_image_composite(fn, red_channel, blue_channel,
@@ -97,26 +97,26 @@
     Examples
     --------
 
-        >>> red_channel = na.log10(frb["Temperature"])
-        >>> blue_channel = na.log10(frb["Density"])
+        >>> red_channel = np.log10(frb["Temperature"])
+        >>> blue_channel = np.log10(frb["Density"])
         >>> multi_image_composite("multi_channel1.png", red_channel, blue_channel)
 
     """
     red_channel = scale_image(red_channel)
     blue_channel = scale_image(blue_channel)
     if green_channel is None:
-        green_channel = na.zeros(red_channel.shape, dtype='uint8')
+        green_channel = np.zeros(red_channel.shape, dtype='uint8')
     else:
         green_channel = scale_image(green_channel)
     if alpha_channel is None:
-        alpha_channel = na.zeros(red_channel.shape, dtype='uint8') + 255
+        alpha_channel = np.zeros(red_channel.shape, dtype='uint8') + 255
     else:
         alpha_channel = scale_image(alpha_channel) 
-    image = na.array([red_channel, green_channel, blue_channel, alpha_channel])
+    image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
-def write_bitmap(bitmap_array, filename, max_val = None, transpose=True):
+def write_bitmap(bitmap_array, filename, max_val = None, transpose=False):
     r"""Write out a bitmapped image directly to a PNG file.
 
     This accepts a three- or four-channel `bitmap_array`.  If the image is not
@@ -141,19 +141,18 @@
         The upper limit to clip values to in the output, if converting to uint8.
         If `bitmap_array` is already uint8, this will be ignore.
     """
-    if bitmap_array.dtype != na.uint8:
+    if bitmap_array.dtype != np.uint8:
         if max_val is None: max_val = bitmap_array.max()
-        bitmap_array = na.clip(bitmap_array / max_val, 0.0, 1.0) * 255
+        bitmap_array = np.clip(bitmap_array / max_val, 0.0, 1.0) * 255
         bitmap_array = bitmap_array.astype("uint8")
     if len(bitmap_array.shape) != 3 or bitmap_array.shape[-1] not in (3,4):
         raise RuntimeError
     if bitmap_array.shape[-1] == 3:
         s1, s2 = bitmap_array.shape[:2]
-        alpha_channel = 255*na.ones((s1,s2,1), dtype='uint8')
-        bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
+        alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
+        bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
-        for channel in range(bitmap_array.shape[2]):
-            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
+        bitmap_array = bitmap_array.swapaxes(0,1)
     if filename is not None:
         au.write_png(bitmap_array.copy(), filename)
     else:
@@ -229,14 +228,14 @@
     """
     image = func(image)
     if color_bounds is None:
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
     else:
         color_bounds = [func(c) for c in color_bounds]
     image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
     to_plot = map_to_colors(image, cmap_name)
-    to_plot = na.clip(to_plot, 0, 255)
+    to_plot = np.clip(to_plot, 0, 255)
     return to_plot
 
 def annotate_image(image, text, xpos, ypos, font_name = "Vera",
@@ -279,7 +278,7 @@
     >>> annotate_image(bitmap, "Hello!", 0, 100)
     >>> write_bitmap(bitmap, "saved.png")
     """
-    if len(image.shape) != 3 or image.dtype != na.uint8:
+    if len(image.shape) != 3 or image.dtype != np.uint8:
         raise RuntimeError("This routine requires a UINT8 bitmapped image.")
     font_path = os.path.join(imp.find_module("matplotlib")[1],
                              "mpl-data/fonts/ttf/",
@@ -295,10 +294,10 @@
         print "Your color map was not found in the extracted colormap file."
         raise KeyError(cmap_name)
     lut = cmd.color_map_luts[cmap_name]
-    x = na.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+    x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
     shape = buff.shape
-    mapped = na.dstack(
-            [(na.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
+    mapped = np.dstack(
+            [(np.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
     return mapped.copy("C")
 
 def strip_colormap_data(fn = "color_map_data.py",
@@ -380,7 +379,7 @@
                          take_log=True)
     """
     import matplotlib
-    from ._mpl_imports import *
+    from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 
     # If this is rendered as log, then apply now.
     if take_log:
@@ -421,21 +420,22 @@
     else:
         dpi = None
 
-    if filename[-4:] == '.png':
-        suffix = ''
-    else:
+    suffix = os.path.splitext(filename)[1]
+
+    if suffix == '':
         suffix = '.png'
         filename = "%s%s" % (filename, suffix)
-    mylog.info("Saving plot %s", fn)
+    mylog.info("Saving plot %s", filename)
     if suffix == ".png":
         canvas = FigureCanvasAgg(fig)
     elif suffix == ".pdf":
         canvas = FigureCanvasPdf(fig)
     elif suffix in (".eps", ".ps"):
-        canvas = FigureCanvasPS
+        canvas = FigureCanvasPS(fig)
     else:
         mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
         canvas = FigureCanvasAgg(fig)
+
     canvas.print_figure(filename)
     return filename
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -26,7 +26,7 @@
 from matplotlib import figure
 import shutil
 import tempfile
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -71,7 +71,7 @@
 
     def add_image(self, fn, descr):
         self.image_metadata.append(descr)
-        self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+        self.images.append((os.path.basename(fn), np.fromfile(fn, dtype='c')))
 
 class PlotCollection(object):
     __id_counter = 0
@@ -122,7 +122,7 @@
         elif center == "center" or center == "c":
             self.c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         else:
-            self.c = na.array(center, dtype='float64')
+            self.c = np.array(center, dtype='float64')
         mylog.info("Created plot collection with default plot-center = %s",
                     list(self.c))
 
@@ -1533,7 +1533,7 @@
     @rootonly
     def save_book(self, filename, author = None, title = None, keywords = None,
                   subject = None, creator = None, producer = None,
-                  creation_data = None):
+                  creation_date = None):
         r"""Save a multipage PDF of all the current plots, rather than
         individual image files.
 
@@ -1580,15 +1580,21 @@
         >>> dd = pf.h.all_data()
         >>> pc.add_phase_object(dd, ["Density", "Temperature", "CellMassMsun"],
         ...                     weight = None)
-        >>> pc.save_book("my_plots.pdf", author="Matthew Turk", 
+        >>> pc.save_book("my_plots.pdf", author="Yours Truly",
         ...              title="Fun plots")
         """
         from matplotlib.backends.backend_pdf import PdfPages
         outfile = PdfPages(filename)
         for plot in self.plots:
             plot.save_to_pdf(outfile)
-        if info is not None:
-            outfile._file.writeObject(outfile._file.infoObject, info)
+        pdf_keys = ['Title', 'Author', 'Subject', 'Keywords', 'Creator',
+            'Producer', 'CreationDate']
+        pdf_values = [title, author, subject, keywords, creator, producer,
+            creation_date]
+        metadata = outfile.infodict()
+        for key, val in zip(pdf_keys, pdf_values):
+            if isinstance(val, str):
+                metadata[key] = val
         outfile.close()
 
 def wrap_pylab_newplot(func):
@@ -1878,7 +1884,7 @@
         norm = matplotlib.colors.Normalize()
     ax = pylab.figure().gca()
     ax.autoscale(False)
-    axi = ax.imshow(na.random.random((npix, npix)),
+    axi = ax.imshow(np.random.random((npix, npix)),
                     extent = extent, norm = norm,
                     origin = 'lower')
     cb = pylab.colorbar(axi, norm = norm)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -52,25 +52,25 @@
     def convert_to_plot(self, plot, coord, offset = True):
         # coord should be a 2 x ncoord array-like datatype.
         try:
-            ncoord = na.array(coord).shape[1]
+            ncoord = np.array(coord).shape[1]
         except IndexError:
             ncoord = 1
 
         # Convert the data and plot limits to tiled numpy arrays so that
         # convert_to_plot is automatically vectorized.
 
-        x0 = na.tile(plot.xlim[0],ncoord)
-        x1 = na.tile(plot.xlim[1],ncoord)
-        xx0 = na.tile(plot._axes.get_xlim()[0],ncoord)
-        xx1 = na.tile(plot._axes.get_xlim()[1],ncoord)
+        x0 = np.tile(plot.xlim[0],ncoord)
+        x1 = np.tile(plot.xlim[1],ncoord)
+        xx0 = np.tile(plot._axes.get_xlim()[0],ncoord)
+        xx1 = np.tile(plot._axes.get_xlim()[1],ncoord)
         
-        y0 = na.tile(plot.ylim[0],ncoord)
-        y1 = na.tile(plot.ylim[1],ncoord)
-        yy0 = na.tile(plot._axes.get_ylim()[0],ncoord)
-        yy1 = na.tile(plot._axes.get_ylim()[1],ncoord)
+        y0 = np.tile(plot.ylim[0],ncoord)
+        y1 = np.tile(plot.ylim[1],ncoord)
+        yy0 = np.tile(plot._axes.get_ylim()[0],ncoord)
+        yy1 = np.tile(plot._axes.get_ylim()[1],ncoord)
         
         # We need a special case for when we are only given one coordinate.
-        if na.array(coord).shape == (2,):
+        if np.array(coord).shape == (2,):
             return ((coord[0]-x0)/(x1-x0)*(xx1-xx0) + xx0,
                     (coord[1]-y0)/(y1-y0)*(yy1-yy0) + yy0)
         else:
@@ -80,11 +80,11 @@
     def pixel_scale(self,plot):
         x0, x1 = plot.xlim
         xx0, xx1 = plot._axes.get_xlim()
-        dx = (xx0 - xx1)/(x1 - x0)
+        dx = (xx1 - xx0)/(x1 - x0)
         
         y0, y1 = plot.ylim
         yy0, yy1 = plot._axes.get_ylim()
-        dy = (yy0 - yy1)/(y1 - y0)
+        dy = (yy1 - yy0)/(y1 - y0)
 
         return (dx,dy)
 
@@ -146,7 +146,9 @@
     def __call__(self, plot):
         # Instantiation of these is cheap
         if plot._type_name == "CuttingPlane":
-            print "WARNING: Magnetic field on Cutting Plane Not implemented."
+            qcb = CuttingQuiverCallback("CuttingPlaneBx",
+                                        "CuttingPlaneBy",
+                                        self.factor)
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])
@@ -195,10 +197,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = na.meshgrid(na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
+            nn = np.sqrt(pixX**2 + pixY**2)
             pixX /= nn
             pixY /= nn
         plot._axes.quiver(X,Y, pixX, pixY, scale=self.scale, scale_units=self.scale_units)
@@ -250,12 +252,12 @@
         #appropriate shift to the coppied field.  
 
         #set the cumulative arrays for the periodic shifting.
-        AllX = na.zeros(plot.data["px"].size, dtype='bool')
-        AllY = na.zeros(plot.data["py"].size, dtype='bool')
+        AllX = np.zeros(plot.data["px"].size, dtype='bool')
+        AllY = np.zeros(plot.data["py"].size, dtype='bool')
         XShifted = plot.data["px"].copy()
         YShifted = plot.data["py"].copy()
         dom_x, dom_y = plot._period
-        for shift in na.mgrid[-1:1:3j]:
+        for shift in np.mgrid[-1:1:3j]:
             xlim = ((plot.data["px"] + shift*dom_x >= x0)
                  &  (plot.data["px"] + shift*dom_x <= x1))
             ylim = ((plot.data["py"] + shift*dom_y >= y0)
@@ -269,24 +271,24 @@
         wI = (AllX & AllY)
 
         # We want xi, yi in plot coordinates
-        xi, yi = na.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
+        xi, yi = np.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
                           yy0:yy1:numPoints_y/(self.factor*1j)]
 
         # This converts XShifted and YShifted into plot coordinates
         x = (XShifted[wI]-x0)*dx + xx0
         y = (YShifted[wI]-y0)*dy + yy0
         z = plot.data[self.field][wI]
-        if plot.pf.field_info[self.field].take_log: z=na.log10(z)
+        if plot.pf.field_info[self.field].take_log: z=np.log10(z)
 
         # Both the input and output from the triangulator are in plot
         # coordinates
         zi = self.triang(x,y).nn_interpolator(z)(xi,yi)
         
         if plot.pf.field_info[self.field].take_log and self.clim is not None: 
-            self.clim = (na.log10(self.clim[0]), na.log10(self.clim[1]))
+            self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = na.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
         plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -295,65 +297,69 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, annotate=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True):
         """
-        annotate_grids(alpha=1.0, min_pix=1, annotate=False, periodic=True)
+        annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
-        *annotate* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.
         """
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
-        self.annotate = annotate # put grid numbers in the corner.
+        self.min_pix_ids = min_pix_ids
+        self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
         y0, y1 = plot.ylim
-        width, height = plot.image._A.shape
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         xi = x_dict[plot.data.axis]
         yi = y_dict[plot.data.axis]
-        dx = width / (x1-x0)
-        dy = height / (y1-y0)
+        (dx, dy) = self.pixel_scale(plot)
+        (xpix, ypix) = plot.image._A.shape
         px_index = x_dict[plot.data.axis]
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
         if self.periodic:
-            pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+            pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
         else:
-            pxs, pys = na.mgrid[0:0:1j,0:0:1j]
+            pxs, pys = np.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
-            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx
-            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
-            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
-            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
-            verts = na.array(
-                [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
-                 (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
-            visible =  ( right_edge_px - left_edge_px > self.min_pix ) & \
-                       ( right_edge_px - left_edge_px > self.min_pix )
+            left_edge_x = (GLE[:,px_index]+pxo-x0)*dx + xx0
+            left_edge_y = (GLE[:,py_index]+pyo-y0)*dy + yy0
+            right_edge_x = (GRE[:,px_index]+pxo-x0)*dx + xx0
+            right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
+            visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
+                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
+            if visible.nonzero()[0].size == 0: continue
+            verts = np.array(
+                [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
+                 (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
-            if verts.size == 0: continue
             edgecolors = (0.0,0.0,0.0,self.alpha)
-            verts[:,:,0]= (xx1-xx0)*(verts[:,:,0]/width) + xx0
-            verts[:,:,1]= (yy1-yy0)*(verts[:,:,1]/height) + yy0
             grid_collection = matplotlib.collections.PolyCollection(
                 verts, facecolors="none",
                 edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
-            if self.annotate:
-                ids = [g.id for g in plot.data._grids]
-                for n in range(len(left_edge_px)):
-                    plot._axes.text(left_edge_px[n]+2,left_edge_py[n]+2,ids[n])
+            if self.draw_ids:
+                visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
+                               ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
+                active_ids = np.unique(plot.data['GridIndices'])
+                for i in np.where(visible_ids)[0]:
+                    plot._axes.text(
+                        left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
+                        left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
+                        "%d" % active_ids[i], clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):
@@ -414,20 +420,23 @@
                              plot.data[self.field_y],
                              int(nx), int(ny),
                            (x0, x1, y0, y1),)
-        r0 = na.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
+        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
                       self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = na.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
+        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
         lines[0,:,:,:] = r0
-        mag = na.sqrt(pixX**2 + pixY**2)
-        scale = na.sqrt(nx*ny) / (self.factor * mag.mean())
+        mag = np.sqrt(pixX**2 + pixY**2)
+        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
         dt = 1.0 / (self.nsample-1)
         for i in range(1,self.nsample):
             xt = lines[i-1,0,:,:]
             yt = lines[i-1,1,:,:]
-            ix = na.maximum(na.minimum((xt).astype('int'), nx-1), 0)
-            iy = na.maximum(na.minimum((yt).astype('int'), ny-1), 0)
+            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
+            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
+        # scale into data units
+        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
+        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
         for i in range(self.data_size[0]):
             for j in range(self.data_size[1]):
                 plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
@@ -452,6 +461,30 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
+class TimeCallback(PlotCallback):
+    _type_name = "time"
+    def __init__(self, format_code='10.7e'):
+        """
+        This annotates the plot with the current simulation time.
+        For now, the time is displayed in seconds.
+        *format_code* can be optionally set, allowing a custom 
+        c-style format code for the time display.
+        """
+        self.format_code = format_code
+        PlotCallback.__init__(self)
+    
+    def __call__(self, plot):
+        current_time = plot.pf.current_time/plot.pf['Time']
+        timestring = format(current_time,self.format_code)
+        base = timestring[:timestring.find('e')]
+        exponent = timestring[timestring.find('e')+1:]
+        if exponent[0] == '+':
+            exponent = exponent[1:]
+        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
+        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
+        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
+        plot._axes.add_artist(at)
+
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -489,18 +522,18 @@
         max_dx = plot.data['pdx'].max()
         w_min_x = 250.0 * min_dx
         w_max_x = 1.0 / self.factor
-        min_exp_x = na.ceil(na.log10(w_min_x*plot.data.pf[self.unit])
-                           /na.log10(self.factor))
-        max_exp_x = na.floor(na.log10(w_max_x*plot.data.pf[self.unit])
-                            /na.log10(self.factor))
+        min_exp_x = np.ceil(np.log10(w_min_x*plot.data.pf[self.unit])
+                           /np.log10(self.factor))
+        max_exp_x = np.floor(np.log10(w_max_x*plot.data.pf[self.unit])
+                            /np.log10(self.factor))
         n_x = max_exp_x - min_exp_x + 1
-        widths = na.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
+        widths = np.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
         widths /= plot.data.pf[self.unit]
         left_edge_px = (center[xi] - widths/2.0 - x0)*dx
         left_edge_py = (center[yi] - widths/2.0 - y0)*dy
         right_edge_px = (center[xi] + widths/2.0 - x0)*dx
         right_edge_py = (center[yi] + widths/2.0 - y0)*dy
-        verts = na.array(
+        verts = np.array(
                 [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                  (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
         visible =  ( right_edge_px - left_edge_px > 25 ) & \
@@ -607,7 +640,7 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0] / self.factor
         ny = plot.image._A.shape[1] / self.factor
-        indices = na.argsort(plot.data['dx'])[::-1]
+        indices = np.argsort(plot.data['dx'])[::-1]
         pixX = _MPL.CPixelize( plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
@@ -622,8 +655,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = na.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = na.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -659,7 +692,7 @@
         DomainWidth = DomainRight - DomainLeft
         
         nx, ny = plot.image._A.shape
-        buff = na.zeros((nx,ny),dtype='float64')
+        buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
@@ -673,7 +706,7 @@
                                  clump['dx']*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
-            buff = na.maximum(temp, buff)
+            buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
                                      **self.plot_args)
         plot._axes.hold(False)
@@ -695,9 +728,13 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         from matplotlib.patches import Arrow
         # Now convert the pixels to code information
-        x, y = self.convert_to_plot(plot, self.pos)
+        x, y = self.convert_to_plot(plot, pos)
         dx, dy = self.convert_to_plot(plot, self.code_size, False)
         arrow = Arrow(x, y, dx, dy, **self.plot_args)
         plot._axes.add_patch(arrow)
@@ -717,12 +754,13 @@
         self.text_args = text_args
 
     def __call__(self, plot):
-
-
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         width,height = plot.image._A.shape
-        x,y = self.convert_to_plot(plot, self.pos)
-        x,y = x/width,y/height
-
+        x,y = self.convert_to_plot(plot, pos)
+        
         plot._axes.text(x, y, self.text, **self.text_args)
 
 class MarkerAnnotateCallback(PlotCallback):
@@ -817,7 +855,7 @@
             if size < self.min_size or size > self.max_size: continue
             # This could use halo.maximum_radius() instead of width
             if self.width is not None and \
-                na.abs(halo.center_of_mass() - 
+                np.abs(halo.center_of_mass() - 
                        plot.data.center)[plot.data.axis] > \
                    self.width:
                 continue
@@ -1065,8 +1103,8 @@
         LE[zax] = data.center[zax] - self.width*0.5
         RE[zax] = data.center[zax] + self.width*0.5
         if self.region is not None \
-            and na.all(self.region.left_edge <= LE) \
-            and na.all(self.region.right_edge >= RE):
+            and np.all(self.region.left_edge <= LE) \
+            and np.all(self.region.right_edge >= RE):
             return self.region
         self.region = data.pf.h.periodic_region(
             data.center, LE, RE)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -183,21 +183,21 @@
         if (zmin in (None,'min')) or (zmax in (None,'max')):    
             imbuff = self._axes.images[-1]._A
             if zmin == 'min':
-                zmin = na.nanmin(imbuff[na.nonzero(imbuff)])
+                zmin = np.nanmin(imbuff[np.nonzero(imbuff)])
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(imbuff))
+                    zmax = min(zmin*10**(dex),np.nanmax(imbuff))
             if zmax == 'max':
-                zmax = na.nanmax(imbuff)
+                zmax = np.nanmax(imbuff)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(imbuff))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(imbuff))
         if self.colorbar is not None:
             if ticks is not None:
-                ticks = na.sort(ticks)
+                ticks = np.sort(ticks)
                 self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                 self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
             elif minmaxtick:
                 if self.log_field: 
-                    ticks = na.array(self.colorbar._ticker()[1],dtype='float')
+                    ticks = np.array(self.colorbar._ticker()[1],dtype='float')
                     ticks = [zmin] + ticks.tolist() + [zmax]
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
@@ -205,11 +205,11 @@
                     mylog.error('Sorry, we do not support minmaxtick for linear fields.  It likely comes close by default')
             elif nticks is not None:
                 if self.log_field:
-                    lin = na.linspace(na.log10(zmin),na.log10(zmax),nticks)
+                    lin = np.linspace(np.log10(zmin),np.log10(zmax),nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(10**lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (10**x) for x in lin])
                 else: 
-                    lin = na.linspace(zmin,zmax,nticks)
+                    lin = np.linspace(zmin,zmax,nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % x for x in lin])
 
@@ -218,7 +218,7 @@
                     self.colorbar.locator = self._old_locator
                 if hasattr(self,'_old_formatter'):
                     self.colorbar.formatter = self._old_formatter
-        self.norm.autoscale(na.array([zmin,zmax], dtype='float64'))
+        self.norm.autoscale(np.array([zmin,zmax], dtype='float64'))
         self.image.changed()
         if self.colorbar is not None:
             mpl_notify(self.image, self.colorbar)
@@ -343,7 +343,7 @@
             self.colorbar.formatter = ttype()
 
     def __init_temp_image(self, setup_colorbar):
-        temparray = na.ones(self.size)
+        temparray = np.ones(self.size)
         self.image = \
             self._axes.imshow(temparray, interpolation='nearest',
                              norm = self.norm, aspect=1.0, picker=True,
@@ -394,20 +394,20 @@
         if self[self.axis_names["Z"]].size == 0:
             raise YTNoDataInObjectError(self.data)
         mylog.debug("Received buffer of min %s and max %s (data: %s %s)",
-                    na.nanmin(buff), na.nanmax(buff),
+                    np.nanmin(buff), np.nanmax(buff),
                     self[self.axis_names["Z"]].min(),
                     self[self.axis_names["Z"]].max())
         if self.log_field:
-            bI = na.where(buff > 0)
+            bI = np.where(buff > 0)
             if len(bI[0]) == 0:
                 newmin = 1e-99
                 newmax = 1e-99
             else:
-                newmin = na.nanmin(buff[bI])
-                newmax = na.nanmax(buff[bI])
+                newmin = np.nanmin(buff[bI])
+                newmax = np.nanmax(buff[bI])
         else:
-            newmin = na.nanmin(buff)
-            newmax = na.nanmax(buff)
+            newmin = np.nanmin(buff)
+            newmax = np.nanmax(buff)
         aspect = (self.ylim[1]-self.ylim[0])/(self.xlim[1]-self.xlim[0])
         if self.image._A.size != buff.size:
             self._axes.clear()
@@ -418,7 +418,7 @@
             self.image.set_data(buff)
         if self._axes.get_aspect() != aspect: self._axes.set_aspect(aspect)
         if self.do_autoscale:
-            self.norm.autoscale(na.array((newmin,newmax), dtype='float64'))
+            self.norm.autoscale(np.array((newmin,newmax), dtype='float64'))
         self._reset_image_parameters()
         self._run_callbacks()
 
@@ -476,8 +476,8 @@
         self._redraw_image()
 
     def autoscale(self):
-        zmin = na.nanmin(self._axes.images[-1]._A)
-        zmax = na.nanmax(self._axes.images[-1]._A)
+        zmin = np.nanmin(self._axes.images[-1]._A)
+        zmax = np.nanmax(self._axes.images[-1]._A)
         self.set_zlim(zmin, zmax)
 
     def switch_y(self, *args, **kwargs):
@@ -558,16 +558,16 @@
         numPoints_y = int(width)
         dx = numPoints_x / (x1-x0)
         dy = numPoints_y / (y1-y0)
-        xlim = na.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
+        xlim = np.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
                               self.data["px"]-2.0*self.data['pdx'] <= x1)
-        ylim = na.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
+        ylim = np.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
                               self.data["py"]-2.0*self.data['pdy'] <= y1)
-        wI = na.where(na.logical_and(xlim,ylim))
-        xi, yi = na.mgrid[0:numPoints_x, 0:numPoints_y]
+        wI = np.where(np.logical_and(xlim,ylim))
+        xi, yi = np.mgrid[0:numPoints_x, 0:numPoints_y]
         x = (self.data["px"][wI]-x0)*dx
         y = (self.data["py"][wI]-y0)*dy
         z = self.data[self.axis_names["Z"]][wI]
-        if self.log_field: z=na.log10(z)
+        if self.log_field: z=np.log10(z)
         buff = de.Triangulation(x,y).nn_interpolator(z)(xi,yi)
         buff = buff.clip(z.min(), z.max())
         if self.log_field: buff = 10**buff
@@ -603,7 +603,7 @@
         else:
             height = width
         self.pix = (width,height)
-        indices = na.argsort(self.data['dx'])[::-1]
+        indices = np.argsort(self.data['dx'])[::-1]
         buff = _MPL.CPixelize( self.data['x'], self.data['y'], self.data['z'],
                                self.data['px'], self.data['py'],
                                self.data['pdx'], self.data['pdy'], self.data['pdz'],
@@ -756,7 +756,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)
@@ -823,7 +823,7 @@
             cb(self)
 
     def __init_colorbar(self):
-        temparray = na.ones((self.x_bins.size, self.y_bins.size))
+        temparray = np.ones((self.x_bins.size, self.y_bins.size))
         self.norm = matplotlib.colors.Normalize()
         self.image = self._axes.pcolormesh(self.x_bins, self.y_bins,
                                       temparray, shading='flat',
@@ -858,13 +858,13 @@
         #self._redraw_image()
         if (zmin is None) or (zmax is None):    
             if zmin == 'min':
-                zmin = na.nanmin(self._axes.images[-1]._A)
+                zmin = np.nanmin(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(self._axes.images[-1]._A))
+                    zmax = min(zmin*10**(dex),np.nanmax(self._axes.images[-1]._A))
             if zmax == 'max':
-                zmax = na.nanmax(self._axes.images[-1]._A)
+                zmax = np.nanmax(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(self._axes.images[-1]._A))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(self._axes.images[-1]._A))
         self._zlim = (zmin, zmax)
 
     def set_log_field(self, val):
@@ -883,8 +883,8 @@
     def _redraw_image(self):
         vals = self.data[self.fields[2]].transpose()
         used_bin = self.data["UsedBins"].transpose()
-        vmin = na.nanmin(vals[used_bin])
-        vmax = na.nanmax(vals[used_bin])
+        vmin = np.nanmin(vals[used_bin])
+        vmax = np.nanmax(vals[used_bin])
         if self._zlim is not None: vmin, vmax = self._zlim
         if self._log_z:
             # We want smallest non-zero vmin
@@ -892,10 +892,10 @@
                                                 clip=False)
             self.ticker = matplotlib.ticker.LogLocator()
             if self._zlim is None:
-                vI = na.where(vals > 0)
+                vI = np.where(vals > 0)
                 vmin = vals[vI].min()
                 vmax = vals[vI].max()
-            self.norm.autoscale(na.array((vmin,vmax), dtype='float64'))
+            self.norm.autoscale(np.array((vmin,vmax), dtype='float64'))
         else:
             self.norm=matplotlib.colors.Normalize(vmin=vmin, vmax=vmax,
                                                   clip=False)
@@ -979,7 +979,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,19 +26,25 @@
 """
 import base64
 import matplotlib.figure
+from matplotlib.mathtext import MathTextParser
+try:
+    from matplotlib.pyparsing import ParseFatalException
+except ImportError:
+    from pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__
 from functools import wraps
 
-import numpy as na
+import numpy as np
 from ._mpl_imports import *
 from .color_maps import yt_colormaps, is_colormap
 from .image_writer import \
     write_image, apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer
+    ObliqueFixedResolutionBuffer, \
+    OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
@@ -101,7 +107,10 @@
         self.pf = frb.pf
         self.xlim = viewer.xlim
         self.ylim = viewer.ylim
-        self._type_name = ''
+        if 'Cutting' in self.data.__class__.__name__:
+            self._type_name = "CuttingPlane"
+        else:
+            self._type_name = ''
 
 class FieldTransform(object):
     def __init__(self, name, func, locator):
@@ -120,7 +129,7 @@
             ticks = []
         return ticks
 
-log_transform = FieldTransform('log10', na.log10, LogLocator())
+log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
 def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
@@ -152,7 +161,7 @@
               center[y_dict[axis]]+width[1]/2]
     return (bounds,center)
 
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1'):
+def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
     if width == None:
         width = (pf.domain_width.min(),
                  pf.domain_width.min())
@@ -162,7 +171,14 @@
     if not iterable(width):
         width = (width, width)
     Wx, Wy = width
-    width = na.array((Wx/pf[unit], Wy/pf[unit]))
+    width = np.array((Wx/pf[unit], Wy/pf[unit]))
+    if depth != None:
+        if iterable(depth) and isinstance(depth[1],str):
+            d,unit = depth
+            depth = d/pf[unit]
+        elif iterable(depth):
+            raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+        width = np.append(width,depth)
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -171,16 +187,19 @@
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
-    # Transforming to the cutting plane coordinate system
-    center = na.array(center)
-    center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
-    (normal,perp1,perp2) = ortho_find(normal)
-    mat = na.transpose(na.column_stack((perp1,perp2,normal)))
-    center = na.dot(mat,center)
-    width = width/pf.domain_width.min()
+    if width.shape == (2,):
+        # Transforming to the cutting plane coordinate system
+        center = np.array(center)
+        center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
+        (normal,perp1,perp2) = ortho_find(normal)
+        mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+        center = np.dot(mat,center)
+        width = width
+    
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+    else:
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
 
-    bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
-    
     return (bounds,center)
 
 class PlotWindow(object):
@@ -189,8 +208,8 @@
     _contour_info = None
     _vector_info = None
     _frb = None
-    def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, 
-                 periodic = True, origin='center-window', oblique=False):
+    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
+                 periodic=True, origin='center-window', oblique=False):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -242,20 +261,14 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        try:
+        if hasattr(self,'zlim'):
+            bounds = self.xlim+self.ylim+self.zlim
+        else:
             bounds = self.xlim+self.ylim
-            if self.oblique == False:
-                self._frb = FixedResolutionBuffer(self.data_source, 
-                                                  bounds, self.buff_size, 
-                                                  self.antialias, 
-                                                  periodic=self._periodic)
-            else:
-                self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                         bounds, self.buff_size, 
-                                                         self.antialias, 
-                                                         periodic=self._periodic)
-        except:
-            raise RuntimeError("Failed to repixelize.")
+        self._frb = self._frb_generator(self.data_source,
+                                        bounds, self.buff_size,
+                                        self.antialias,
+                                        periodic=self._periodic)
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:
@@ -296,6 +309,7 @@
         nWx, nWy = Wx/factor, Wy/factor
         self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
         self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
+                    
 
     @invalidate_data
     def pan(self, deltas):
@@ -342,42 +356,75 @@
             dy = bounds[3] - bounds[2]
             self.xlim = (self.center[0] - dx/2., self.center[0] + dx/2.)
             self.ylim = (self.center[1] - dy/2., self.center[1] + dy/2.)
-            mylog.info("xlim = %f %f" %self.xlim)
-            mylog.info("ylim = %f %f" %self.ylim)
         else:
-            self.xlim = bounds[0:2]
-            self.ylim = bounds[2:]
-            
+            self.xlim = tuple(bounds[0:2])
+            self.ylim = tuple(bounds[2:4])
+            if len(bounds) == 6:
+                self.zlim = tuple(bounds[4:6])
+        mylog.info("xlim = %f %f" %self.xlim)
+        mylog.info("ylim = %f %f" %self.ylim)
+        if hasattr(self,'zlim'):
+            mylog.info("zlim = %f %f" %self.zlim)
+
     @invalidate_data
     def set_width(self, width, unit = '1'):
         """set the width of the plot window
 
         parameters
         ----------
-        width : float, array of floats, or (float, unit) tuple.
-            the width of the image.
+        width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
+             Width can have four different formats to support windows with variable 
+             x and y widths.  They are:
+             
+             ==================================     =======================
+             format                                 example                
+             ==================================     =======================
+             (float, string)                        (10,'kpc')
+             ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+             float                                  0.2
+             (float, float)                         (0.2, 0.3)
+             ==================================     =======================
+             
+             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
+             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
+             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+             the y axis.  In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
+             in code units.  the width of the image.
         unit : str
             the unit the width has been specified in.
             defaults to code units.  If width is a tuple this 
             argument is ignored
 
         """
-        if iterable(width) and isinstance(width[1],str):
-            unit = width[1]
-            width = width[0]
-        elif not iterable(width):
-            width = (width,width)
+        if iterable(width): 
+            if isinstance(width[1],str):
+                w, unit = width
+                width = (w, w)
+            elif isinstance(width[1], tuple):
+                wx,unitx = width[0]
+                wy,unity = width[1]
+                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        else:
+            width = (width, width)
         Wx, Wy = width
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
-        centerx = (self.xlim[1] + self.xlim[0])/2 
-        centery = (self.ylim[1] + self.ylim[0])/2 
+        centerx = (self.xlim[1] + self.xlim[0])/2.
+        centery = (self.ylim[1] + self.ylim[0])/2. 
+        
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,
                      centery + width[1]/2.)
         
+        if hasattr(self,'zlim'):
+            centerz = (self.zlim[1] + self.zlim[0])/2.
+            mw = max(width)
+            self.zlim = (centerz - mw/2.,
+                         centerz + mw/2.)
+        
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -434,10 +481,11 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
+        self._unit = None
+        self._callbacks = []
+        self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
         self.setup_callbacks()
-        self._callbacks = []
-        self._field_transform = {}
         for field in self._frb.data.keys():
             if self.pf.field_info[field].take_log:
                 self._field_transform[field] = log_transform
@@ -554,13 +602,52 @@
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
+    @invalidate_plot
+    def set_axes_unit(self, unit_name):
+        r"""Set the unit for display on the x and y axes of the image.
+
+        Parameters
+        ----------
+        unit_name : string
+            A unit, available for conversion in the parameter file, that the
+            image extents will be displayed in.  If set to None, any previous
+            units will be reset.  If the unit is None, the default is chosen.
+            If unit_name is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+
+        Raises
+        ------
+        YTUnitNotRecognized
+            If the unit is not known, this will be raised.
+
+        Examples
+        --------
+
+        >>> p = ProjectionPlot(pf, "y", "Density")
+        >>> p.show()
+        >>> p.set_axes_unit("kpc")
+        >>> p.show()
+        >>> p.set_axes_unit(None)
+        >>> p.show()
+        """
+        # blind except because it could be in conversion_factors or units
+        try:
+            self.pf[unit_name]
+        except KeyError: 
+            if unit_name is not None:
+                raise YTUnitNotRecognized(unit_name)
+        self._unit = unit_name
+
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
         mi = fval.min()
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        unit = get_smallest_appropriate_unit(x_width, self.pf)
+        if self._unit is None:
+            unit = get_smallest_appropriate_unit(x_width, self.pf)
+        else:
+            unit = self._unit
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -608,6 +695,15 @@
 
     """
     _current_field = None
+    _frb_generator = None
+    _plot_type = None
+
+    def __init__(self, *args, **kwargs):
+        if self._frb_generator == None:
+            self._frb_generator = kwargs.pop("frb_generator")
+        if self._plot_type == None:
+            self._plot_type = kwargs.pop("plot_type")
+        PWViewer.__init__(self, *args, **kwargs)
 
     def _setup_plots(self):
         if self._current_field is not None:
@@ -647,30 +743,55 @@
 
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
+            norm_size = 10.0
+            cbar_frac = 0.0
             if aspect > 1.0:
-                size = (10.0, 10.0/aspect)
+                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
             else:
-                size = (10.0*aspect, 10.0)
+                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
+            if not md['unit'] in ['1', 'u', 'unitary']:
+                axes_unit_label = '\/\/('+md['unit']+')'
+            else:
+                axes_unit_label = ''
+
             if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
-                          r'\/\/('+md['unit'].encode('string-escape')+r')}$' for i in (0,1)]
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
+                        axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$',
-                          r'$\rm{Image\/y}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$']
-                
+                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
+                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
 
+            field_name = self.data_source.pf.field_info[f].display_name
+
+            if field_name is None:
+                field_name = r'$\rm{'+f+r'}$'
+            elif field_name.find('$') == -1:
+                field_name = r'$\rm{'+field_name+r'}$'
+            
+            parser = MathTextParser('Agg')
+            try:
+                parser.parse(field_name)
+            except ParseFatalException, err:
+                raise YTCannotParseFieldDisplayName(f,field_name,str(err))
+
+            try:
+                parser.parse(r'$'+md['units']+r'$')
+            except ParseFatalException, err:
+                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
+
             if md['units'] == None or md['units'] == '':
-                label = r'$\rm{'+f.encode('string-escape')+r'}$'
+                label = field_name
             else:
-                label = r'$\rm{'+f.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+                label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 
@@ -719,7 +840,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None):
+    def save(self, name=None, mpl_kwargs=None):
         """saves the plot to disk.
 
         Parameters
@@ -727,22 +848,28 @@
         name : string
            the base of the filename.  If not set the filename of 
            the parameter file is used
+        mpl_kwargs : dict
+           A dict of keyword arguments to be passed to matplotlib.
+           
+        >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
+        names = []
+        if mpl_kwargs is None: mpl_kwargs = {}
         if name == None:
             name = str(self.pf)
-        elif name.endswith('.png'):
-            return v.save(name)
+        suffix = os.path.splitext(name)[1]
+        if suffix != '':
+            for k, v in self.plots.iteritems():
+                names.append(v.save(name,mpl_kwargs))
+            return names
         axis = axis_names[self.data_source.axis]
         weight = None
-        if 'Slice' in self.data_source.__class__.__name__:
-            type = 'Slice'
-        if 'Proj' in self.data_source.__class__.__name__:
-            type = 'Projection'
+        type = self._plot_type
+        if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
         if 'Cutting' in self.data_source.__class__.__name__:
             type = 'OffAxisSlice'
-        names = []
         for k, v in self.plots.iteritems():
             if axis:
                 n = "%s_%s_%s_%s" % (name, type, axis, k)
@@ -751,15 +878,19 @@
                 n = "%s_%s_%s" % (name, type, k)
             if weight:
                 n += "_%s" % (weight)
-            names.append(v.save(n))
+            names.append(v.save(n,mpl_kwargs))
         return names
 
     def _send_zmq(self):
-        from IPython.zmq.pylab.backend_inline import \
-                    send_figure
+        try:
+            # pre-IPython v0.14        
+            from IPython.zmq.pylab.backend_inline import send_figure as display
+        except ImportError:
+            # IPython v0.14+ 
+            from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
             canvas = FigureCanvasAgg(v.figure)
-            send_figure(v.figure)
+            display(v.figure)
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.
@@ -785,7 +916,11 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None, origin='center-window'):
+    _plot_type = 'Slice'
+    _frb_generator = FixedResolutionBuffer
+
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
+                 origin='center-window'):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -805,11 +940,12 @@
              or the axis name itself
         fields : string
              The name of the field(s) to be plotted.
-        center : two or three-element vector of sequence floats, 'c', or 'center'
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
              The coordinate of the center of the image.  If left blanck,
              the image centers on the location of the maximum density
              cell.  If set to 'c' or 'center', the plot is centered on
-             the middle of the domain.
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -829,6 +965,11 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.  
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -850,9 +991,13 @@
         (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None,
+    _plot_type = 'Projection'
+    _frb_generator = FixedResolutionBuffer
+
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
         
@@ -873,11 +1018,12 @@
              or the axis name itself
         fields : string
             The name of the field(s) to be plotted.
-        center : A two or three-element vector of sequence floats, 'c', or 'center'
-            The coordinate of the center of the image.  If left blanck,
-            the image centers on the location of the maximum density
-            cell.  If set to 'c' or 'center', the plot is centered on
-            the middle of the domain.
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+             The coordinate of the center of the image.  If left blanck,
+             the image centers on the location of the maximum density
+             cell.  If set to 'c' or 'center', the plot is centered on
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -897,6 +1043,11 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         origin : A string
             The location of the origin of the plot coordinate system.
             Currently, can be set to three options: 'left-domain', corresponding
@@ -922,9 +1073,14 @@
         (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), north_vector=None):
+    _plot_type = 'OffAxisSlice'
+    _frb_generator = ObliqueFixedResolutionBuffer
+
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -952,6 +1108,11 @@
             A tuple containing the width of image and the string key of
             the unit: (width, 'unit').  If set to a float, code units
             are assumed
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         north-vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
@@ -963,6 +1124,96 @@
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
+
+class OffAxisProjectionDummyDataSource(object):
+    _type_name = 'proj'
+    proj_style = 'integrate'
+    _key_fields = []
+    def __init__(self, center, pf, normal_vector, width, fields, 
+                 interpolated, resolution = (800,800), weight=None,  
+                 volume=None, no_ghost=False, le=None, re=None, 
+                 north_vector=None):
+        self.center = center
+        self.pf = pf
+        self.axis = 4 # always true for oblique data objects
+        self.normal_vector = normal_vector
+        self.width = width
+        self.fields = fields
+        self.interpolated = interpolated
+        self.resolution = resolution
+        self.weight_field = weight
+        self.volume = volume
+        self.no_ghost = no_ghost
+        self.le = le
+        self.re = re
+        self.north_vector = north_vector
+
+class OffAxisProjectionPlot(PWViewerMPL):
+    _plot_type = 'OffAxisProjection'
+    _frb_generator = OffAxisProjectionFixedResolutionBuffer
+
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+                 max_level=None, north_vector=None, volume=None, no_ghost=False, 
+                 le=None, re=None, interpolated=False):
+        r"""Creates an off axis projection plot from a parameter file
+
+        Given a pf object, a normal vector to project along, and
+        a field name string, this will return a PWViewrMPL object
+        containing the plot.
+        
+        The plot can be updated using one of the many helper functions
+        defined in PlotWindow.
+
+        Parameters
+        ----------
+        pf : :class:`yt.data_objects.api.StaticOutput`
+            This is the parameter file object corresponding to the
+            simulation output to be plotted.
+        normal : a sequence of floats
+            The vector normal to the slicing plane.
+        fields : string
+            The name of the field(s) to be plotted.
+        center : A two or three-element vector of sequence floats, 'c', or 'center'
+            The coordinate of the center of the image.  If left blanck,
+            the image centers on the location of the maximum density
+            cell.  If set to 'c' or 'center', the plot is centered on
+            the middle of the domain.
+        width : A tuple or a float
+            A tuple containing the width of image and the string key of
+            the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        depth : A tuple or a float
+            A tuple containing the depth to project thourhg and the string
+            key of the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        weight_field : string
+            The name of the weighting field.  Set to None for no weight.
+        max_level: int
+            The maximum level to project to.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+        north-vector : a sequence of floats
+            A vector defining the 'up' direction in the plot.  This
+            option sets the orientation of the slicing plane.  If not
+            set, an arbitrary grid-aligned north-vector is chosen.
+
+        """
+        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
+        # Hard-coding the resolution for now
+        fields = ensure_list(fields)[:]
+        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
+                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
+                                                       le=le, re=re, north_vector=north_vector)
+        # Hard-coding the origin keyword since the other two options
+        # aren't well-defined for off-axis data objects
+        PWViewerMPL.__init__(self,OffAxisProj,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
 
 _metadata_template = """
 %(pf)s<br>
@@ -983,6 +1234,7 @@
     _ext_widget_id = None
     _current_field = None
     _widget_name = "plot_window"
+    _frb_generator = FixedResolutionBuffer
 
     def _setup_plots(self):
         from yt.gui.reason.bottle_mods import PayloadHandler
@@ -1007,7 +1259,7 @@
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
             x_width = self.xlim[1] - self.xlim[0]
-            zoom_fac = na.log10(x_width*self.pf['unitary'])/na.log10(min_zoom)
+            zoom_fac = np.log10(x_width*self.pf['unitary'])/np.log10(min_zoom)
             zoom_fac = 100.0*max(0.0, zoom_fac)
             ticks = self.get_ticks(field)
             payload = {'type':'png_string',
@@ -1051,12 +1303,12 @@
 
         raw_data = self._frb.data_source
         b = self._frb.bounds
-        xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
+        xi, yi = np.mgrid[b[0]:b[1]:(vi / 8) * 1j,
                           b[2]:b[3]:(vj / 8) * 1j]
         x = raw_data['px']
         y = raw_data['py']
         z = raw_data[field]
-        if logit: z = na.log10(z)
+        if logit: z = np.log10(z)
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         ax.contour(fvals, number, colors='w')
@@ -1075,8 +1327,8 @@
         fy = "%s-velocity" % (axis_names[y_dict[axis]])
         px = new_frb[fx][::-1,:]
         py = new_frb[fy][::-1,:]
-        x = na.mgrid[0:vi-1:ny*1j]
-        y = na.mgrid[0:vj-1:nx*1j]
+        x = np.mgrid[0:vi-1:ny*1j]
+        y = np.mgrid[0:vj-1:nx*1j]
         # Always normalize, then we scale
         nn = ((px**2.0 + py**2.0)**0.5).max()
         px /= nn
@@ -1100,7 +1352,7 @@
     def _get_cbar_image(self, height = 400, width = 40, field = None):
         if field is None: field = self._current_field
         cmap_name = self._colormaps[field]
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals, cmap_name = cmap_name)
         pngs = write_png_to_string(to_plot)
@@ -1142,30 +1394,81 @@
     figure = None
     def __init__(self, field, size):
         self._plot_valid = True
-        self.figure = matplotlib.figure.Figure(figsize = size, frameon = True)
-        # Hardcoding the axis dimensions for now
-        self.axes = self.figure.add_axes((.07,.10,.8,.8))
-        self.cax = self.figure.add_axes((.87,.10,.04,.8))
+        fsize, axrect, caxrect = self._get_best_layout(size)
+        
+        if np.any(np.array(axrect) < 0):
+            self.figure = matplotlib.figure.Figure(figsize = size, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes((.07,.10,.8,.8))
+            self.cax = self.figure.add_axes((.87,.10,.04,.8))
+            mylog.warning('The axis ratio of the requested plot is very narrow.  '
+                          'There is a good chance the plot will not look very good, '
+                          'consider making the plot manually using FixedResolutionBuffer '
+                          'and matplotlib.')
+        else:
+            self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes(axrect)
+            self.cax = self.figure.add_axes(caxrect)
+            
+    def save(self, name, mpl_kwargs, canvas = None):
+        suffix = os.path.splitext(name)[1]
+        
+        if suffix == '':
+            suffix = '.png'
+            name = "%s%s" % (name, suffix)
+        mylog.info("Saving plot %s", name)
+        if suffix == ".png":
+            canvas = FigureCanvasAgg(self.figure)
+        elif suffix == ".pdf":
+            canvas = FigureCanvasPdf(self.figure)
+        elif suffix in (".eps", ".ps"):
+            canvas = FigureCanvasPS(self.figure)
+        else:
+            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+            canvas = FigureCanvasAgg(self.figure)
 
-    def save(self, name, canvas = None):
-        if name[-4:] == '.png':
-            suffix = ''
-        else:
-            suffix = '.png'
-        fn = "%s%s" % (name, suffix)
-        mylog.info("Saving plot %s", fn)
-        if canvas is None:
-            if suffix == ".png":
-                canvas = FigureCanvasAgg(self.figure)
-            elif suffix == ".pdf":
-                canvas = FigureCanvasPdf(self.figure)
-            elif suffix in (".eps", ".ps"):
-                canvas = FigureCanvasPS
-            else:
-                mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
-                canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn, bbox_inches='tight')
-        return fn
+
+        canvas.print_figure(name,**mpl_kwargs)
+        return name
+
+    def _get_best_layout(self, size):
+        aspect = 1.0*size[0]/size[1]
+
+        # add room for a colorbar
+        cbar_inches = 0.7
+        newsize = [size[0] + cbar_inches, size[1]]
+        
+        # add buffers for text, and a bit of whitespace on top
+        text_buffx = 1.0/(newsize[0])
+        text_bottomy = 0.7/size[1]
+        text_topy = 0.3/size[1]
+
+        # calculate how much room the colorbar takes
+        cbar_frac = cbar_inches/newsize[0] 
+        
+        # Calculate y fraction, then use to make x fraction.
+        yfrac = 1.0-text_bottomy-text_topy
+        ysize = yfrac*size[1]
+        xsize = aspect*ysize
+        xfrac = xsize/newsize[0]
+
+        # Now make sure it all fits!
+        xbig = xfrac + text_buffx + 2.0*cbar_frac
+        ybig = yfrac + text_bottomy + text_topy
+
+        if xbig > 1:
+            xsize /= xbig
+            ysize /= xbig
+        if ybig > 1:
+            xsize /= ybig
+            ysize /= ybig
+        xfrac = xsize/newsize[0]
+        yfrac = ysize/newsize[1]
+
+        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
+        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
+        return newsize, axrect, caxrect
 
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)
@@ -1188,3 +1491,5 @@
         self.image = self.axes.imshow(data, origin='lower', extent = extent,
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
+        self.image.axes.ticklabel_format(scilimits=(-4,3))
+


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -27,7 +27,7 @@
 import types
 
 from functools import wraps
-import numpy as na
+import numpy as np
 
 from .image_writer import \
     write_image, apply_colormap
@@ -129,19 +129,19 @@
         use_mesh = False
         xmi, xma = self.x_spec.bounds
         if self.x_spec.scale == 'log':
-            x_bins = na.logspace(na.log10(xmi), na.log10(xma),
+            x_bins = np.logspace(np.log10(xmi), np.log10(xma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            x_bins = na.logspace(xmi, xma, self.image.shape[0]+1)
+            x_bins = np.logspace(xmi, xma, self.image.shape[0]+1)
 
         ymi, yma = self.y_spec.bounds
         if self.y_spec.scale == 'log':
-            y_bins = na.logspace(na.log10(ymi), na.log10(yma),
+            y_bins = np.logspace(np.log10(ymi), np.log10(yma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            y_bins = na.logspace(ymi, yma, self.image.shape[0]+1)
+            y_bins = np.logspace(ymi, yma, self.image.shape[0]+1)
 
         im = self.image
         if self.cbar.scale == 'log':
@@ -306,7 +306,7 @@
             nz = (self.profile[self._current_field] > 0)
             mi = self.profile[self._current_field][nz].min()
         else:
-            mi = self.profile[self._current_field][nz].min()
+            mi = self.profile[self._current_field].min()
         ma = self.profile[self._current_field].max()
         cbar.bounds = (mi, ma)
         cbar.cmap = 'algae'
@@ -338,11 +338,11 @@
         raw_data = self.plot.image[::-1,:]
 
         if self.plot.cbar.scale == 'log':
-            func = na.log10
+            func = np.log10
         else:
             func = lambda a: a
-        raw_data = na.repeat(raw_data, 3, axis=0)
-        raw_data = na.repeat(raw_data, 3, axis=1)
+        raw_data = np.repeat(raw_data, 3, axis=0)
+        raw_data = np.repeat(raw_data, 3, axis=1)
         to_plot = apply_colormap(raw_data, self.plot.cbar.bounds,
                                  self.plot.cbar.cmap, func)
         if self.plot.cbar.scale == 'log':
@@ -369,7 +369,7 @@
 
     def _convert_axis(self, spec):
         func = lambda a: a
-        if spec.scale == 'log': func = na.log10
+        if spec.scale == 'log': func = np.log10
         tick_info = self._convert_ticks(spec.ticks, spec.bounds, func)
         ax = {'ticks':tick_info,
               'title': spec.title}
@@ -378,7 +378,7 @@
     def _get_cbar_image(self, height = 400, width = 40):
         # Right now there's just the single 'cmap', but that will eventually
         # change.  I think?
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals)
         pngs = write_png_to_string(to_plot)


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_passthrough
@@ -61,7 +61,7 @@
         Default: minimum dx
     length : float, optional
         Optionally specify the length of integration.  
-        Default: na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        Default: np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
     direction : real, optional
         Specifies the direction of integration.  The magnitude of this
         value has no effect, only the sign.
@@ -77,10 +77,10 @@
     >>> from yt.visualization.api import Streamlines
     >>> pf = load('DD1701') # Load pf
 
-    >>> c = na.array([0.5]*3)
+    >>> c = np.array([0.5]*3)
     >>> N = 100
     >>> scale = 1.0
-    >>> pos_dx = na.random.random((N,3))*scale-scale/2.
+    >>> pos_dx = np.random.random((N,3))*scale-scale/2.
     >>> pos = c+pos_dx
     
     >>> streamlines = Streamlines(pf,pos,'x-velocity', 'y-velocity', 'z-velocity', length=1.0) 
@@ -91,7 +91,7 @@
     >>> fig=pl.figure() 
     >>> ax = Axes3D(fig)
     >>> for stream in streamlines.streamlines:
-    >>>     stream = stream[na.all(stream != 0.0, axis=1)]
+    >>>     stream = stream[np.all(stream != 0.0, axis=1)]
     >>>     ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
     >>> pl.savefig('streamlines.png')
     """
@@ -101,13 +101,13 @@
                  get_magnitude=False):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.start_positions = na.array(positions)
+        self.start_positions = np.array(positions)
         self.N = self.start_positions.shape[0]
         self.xfield = xfield
         self.yfield = yfield
         self.zfield = zfield
         self.get_magnitude=get_magnitude
-        self.direction = na.sign(direction)
+        self.direction = np.sign(direction)
         if volume is None:
             volume = AMRKDTree(self.pf, fields=[self.xfield,self.yfield,self.zfield],
                             log_fields=[False,False,False], merge_trees=True)
@@ -116,13 +116,13 @@
             dx = self.pf.h.get_smallest_dx()
         self.dx = dx
         if length is None:
-            length = na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+            length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
         self.steps = int(length/dx)
-        self.streamlines = na.zeros((self.N,self.steps,3), dtype='float64')
+        self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
-            self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
+            self.magnitudes = np.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
         nprocs = self.comm.size
@@ -161,21 +161,21 @@
                 brick.integrate_streamline(stream[-step+1], self.direction*self.dx, marr)
                 mag[-step+1] = marr[0]
                 
-            if na.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
-                   na.any(stream[-step+1,:] >= self.pf.domain_right_edge):
+            if np.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
+                   np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if na.any(stream[-step+1,:] < node.l_corner) | \
-                   na.any(stream[-step+1,:] >= node.r_corner):
+            if np.any(stream[-step+1,:] < node.l_corner) | \
+                   np.any(stream[-step+1,:] >= node.r_corner):
                 return step-1
             step -= 1
         return step
 
     def clean_streamlines(self):
-        temp = na.empty(self.N, dtype='object')
-        temp2 = na.empty(self.N, dtype='object')
+        temp = np.empty(self.N, dtype='object')
+        temp2 = np.empty(self.N, dtype='object')
         for i,stream in enumerate(self.streamlines):
-            mask = na.all(stream != 0.0, axis=1)
+            mask = np.all(stream != 0.0, axis=1)
             temp[i] = stream[mask]
             temp2[i] = self.magnitudes[i,mask]
         self.streamlines = temp


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/tick_locators.py
--- a/yt/visualization/tick_locators.py
+++ b/yt/visualization/tick_locators.py
@@ -5,7 +5,7 @@
 ##
 
 import math
-import numpy as na
+import numpy as np
 
 def is_decade(x,base=10):
     if x == 0.0:
@@ -40,7 +40,7 @@
         if subs is None:
             self._subs = None  # autosub
         else:
-            self._subs = na.asarray(subs)+0.0
+            self._subs = np.asarray(subs)+0.0
 
     def _set_numticks(self):
         self.numticks = 15  # todo; be smart here; this is just for dev
@@ -62,9 +62,9 @@
         numdec = math.floor(vmax)-math.ceil(vmin)
 
         if self._subs is None: # autosub
-            if numdec>10: subs = na.array([1.0])
-            elif numdec>6: subs = na.arange(2.0, b, 2.0)
-            else: subs = na.arange(2.0, b)
+            if numdec>10: subs = np.array([1.0])
+            elif numdec>6: subs = np.arange(2.0, b, 2.0)
+            else: subs = np.arange(2.0, b)
         else:
             subs = self._subs
 
@@ -72,7 +72,7 @@
         while numdec/stride+1 > self.numticks:
             stride += 1
 
-        decades = na.arange(math.floor(vmin),
+        decades = np.arange(math.floor(vmin),
                              math.ceil(vmax)+stride, stride)
         if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
             ticklocs = []
@@ -81,7 +81,7 @@
         else:
             ticklocs = b**decades
 
-        return na.array(ticklocs)
+        return np.array(ticklocs)
 
 
 class LinearLocator(object):
@@ -122,7 +122,7 @@
 
 
         if self.numticks==0: return []
-        ticklocs = na.linspace(vmin, vmax, self.numticks)
+        ticklocs = np.linspace(vmin, vmax, self.numticks)
 
         #return self.raise_if_exceeds(ticklocs)
         return ticklocs


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/CUDARayCast.py
--- a/yt/visualization/volume_rendering/CUDARayCast.py
+++ b/yt/visualization/volume_rendering/CUDARayCast.py
@@ -27,7 +27,7 @@
 
 from yt.mods import *
 import yt.extensions.HierarchySubset as hs
-import numpy as na
+import numpy as np
 import h5py, time
 
 import matplotlib;matplotlib.use("Agg");import pylab
@@ -62,7 +62,7 @@
 
     print "Constructing transfer function."
     if "Data" in fn:
-        mh = na.log10(1.67e-24)
+        mh = np.log10(1.67e-24)
         tf = ColorTransferFunction((7.5+mh, 14.0+mh))
         tf.add_gaussian( 8.25+mh, 0.002, [0.2, 0.2, 0.4, 0.1])
         tf.add_gaussian( 9.75+mh, 0.002, [0.0, 0.0, 0.3, 0.1])
@@ -77,17 +77,17 @@
         tf.add_gaussian(-28.5, 0.05, [1.0, 1.0, 1.0, 1.0])
     else: raise RuntimeError
 
-    cpu['ngrids'] = na.array([cpu['dims'].shape[0]], dtype='int32')
+    cpu['ngrids'] = np.array([cpu['dims'].shape[0]], dtype='int32')
     cpu['tf_r'] = tf.red.y.astype("float32")
     cpu['tf_g'] = tf.green.y.astype("float32")
     cpu['tf_b'] = tf.blue.y.astype("float32")
     cpu['tf_a'] = tf.alpha.y.astype("float32")
 
-    cpu['tf_bounds'] = na.array(tf.x_bounds, dtype='float32')
+    cpu['tf_bounds'] = np.array(tf.x_bounds, dtype='float32')
 
-    cpu['v_dir'] = na.array([0.3, 0.5, 0.6], dtype='float32')
+    cpu['v_dir'] = np.array([0.3, 0.5, 0.6], dtype='float32')
 
-    c = na.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
+    c = np.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
 
     print "Getting cutting plane."
     cp = pf.h.cutting(cpu['v_dir'], c)
@@ -98,16 +98,16 @@
     back_c = c - cp._norm_vec * W
     front_c = c + cp._norm_vec * W
 
-    px, py = na.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
+    px, py = np.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
     xv = cp._inv_mat[0,0]*px + cp._inv_mat[0,1]*py + cp.center[0]
     yv = cp._inv_mat[1,0]*px + cp._inv_mat[1,1]*py + cp.center[1]
     zv = cp._inv_mat[2,0]*px + cp._inv_mat[2,1]*py + cp.center[2]
-    cpu['v_pos'] = na.array([xv, yv, zv], dtype='float32').transpose()
+    cpu['v_pos'] = np.array([xv, yv, zv], dtype='float32').transpose()
 
-    cpu['image_r'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_g'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_b'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_a'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_r'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_g'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_b'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_a'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
 
     print "Generating module"
     source = open("yt/extensions/volume_rendering/_cuda_caster.cu").read()
@@ -161,7 +161,7 @@
         pylab.imshow(image[-1], interpolation='nearest')
         pylab.savefig("/u/ki/mturk/public_html/vr6/%s.png" % (ii))
 
-    image = na.array(image).transpose()
+    image = np.array(image).transpose()
     image = (image - mi) / (ma - mi)
     pylab.clf()
     pylab.imshow(image, interpolation='nearest')


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/UBVRI.py
--- a/yt/visualization/volume_rendering/UBVRI.py
+++ b/yt/visualization/volume_rendering/UBVRI.py
@@ -24,21 +24,21 @@
 """
 
 
-import numpy as na
+import numpy as np
 
 johnson_filters = dict(
     B = dict(
-      wavelen = na.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
+      wavelen = np.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550, 4600,
         4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000, 5050, 5100, 5150, 5200,
         5250, 5300, 5350, 5400, 5450, 5500, 5550], dtype='float64'),
-      trans = na.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
+      trans = np.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
         0.95, 0.98, 0.99, 1.0, 0.99, 0.98, 0.96, 0.94, 0.91, 0.87, 0.83, 0.79,
         0.74, 0.69, 0.63, 0.58, 0.52, 0.46, 0.41, 0.36, 0.3, 0.25, 0.2, 0.15,
         0.12, 0.09, 0.06, 0.04, 0.02, 0.01, 0.0, ], dtype='float64'),
       ),
     I = dict(
-      wavelen = na.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
+      wavelen = np.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
         7150, 7200, 7250, 7300, 7350, 7400, 7450, 7500, 7550, 7600, 7650, 7700,
         7750, 7800, 7850, 7900, 7950, 8000, 8050, 8100, 8150, 8200, 8250, 8300,
         8350, 8400, 8450, 8500, 8550, 8600, 8650, 8700, 8750, 8800, 8850, 8900,
@@ -48,7 +48,7 @@
         10600, 10650, 10700, 10750, 10800, 10850, 10900, 10950, 11000, 11050,
         11100, 11150, 11200, 11250, 11300, 11350, 11400, 11450, 11500, 11550,
         11600, 11650, 11700, 11750, 11800, 11850, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
         0.21, 0.26, 0.3, 0.36, 0.4, 0.44, 0.49, 0.56, 0.6, 0.65, 0.72, 0.76,
         0.84, 0.9, 0.93, 0.96, 0.97, 0.97, 0.98, 0.98, 0.99, 0.99, 0.99, 0.99,
         1.0, 1.0, 1.0, 1.0, 1.0, 0.99, 0.98, 0.98, 0.97, 0.96, 0.94, 0.93, 0.9,
@@ -59,7 +59,7 @@
         0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     R = dict(
-      wavelen = na.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
+      wavelen = np.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, 7400,
@@ -67,7 +67,7 @@
         8050, 8100, 8150, 8200, 8250, 8300, 8350, 8400, 8450, 8500, 8550, 8600,
         8650, 8700, 8750, 8800, 8850, 8900, 8950, 9000, 9050, 9100, 9150, 9200,
         9250, 9300, 9350, 9400, 9450, 9500, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
+      trans = np.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
         0.34, 0.4, 0.46, 0.5, 0.55, 0.6, 0.64, 0.69, 0.71, 0.74, 0.77, 0.79,
         0.81, 0.84, 0.86, 0.88, 0.9, 0.91, 0.92, 0.94, 0.95, 0.96, 0.97, 0.98,
         0.99, 0.99, 1.0, 1.0, 0.99, 0.98, 0.96, 0.94, 0.92, 0.9, 0.88, 0.85,
@@ -77,20 +77,20 @@
         0.02, 0.01, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     U = dict(
-      wavelen = na.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
+      wavelen = np.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
         3450, 3500, 3550, 3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
+      trans = np.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
         0.95, 0.97, 0.99, 1.0, 0.99, 0.97, 0.92, 0.73, 0.56, 0.36, 0.23, 0.05,
         0.03, 0.01, 0.0, ], dtype='float64'),),
     V = dict(
-      wavelen = na.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
+      wavelen = np.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
         5050, 5100, 5150, 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, ],
           dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
         0.67, 0.78, 0.85, 0.91, 0.94, 0.96, 0.98, 0.98, 0.95, 0.87, 0.79, 0.72,
         0.71, 0.69, 0.65, 0.62, 0.58, 0.52, 0.46, 0.4, 0.34, 0.29, 0.24, 0.2,
         0.17, 0.14, 0.11, 0.08, 0.06, 0.05, 0.03, 0.02, 0.02, 0.01, 0.01, 0.01,
@@ -102,4 +102,4 @@
 for filter, vals in johnson_filters.items():
     wavelen = vals["wavelen"]
     trans = vals["trans"]
-    vals["Lchar"] = wavelen[na.argmax(trans)]
+    vals["Lchar"] = wavelen[np.argmax(trans)]


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -24,7 +24,7 @@
 """
 
 import __builtin__
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import *
@@ -37,6 +37,7 @@
     arr_ang2pix_nest, arr_fisheye_vectors
 from yt.utilities.math_utils import get_rotation_matrix
 from yt.utilities.orientation import Orientation
+from yt.data_objects.api import ImageArray
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -167,12 +168,12 @@
         >>> pf = EnzoStaticOutput('DD1701') # Load pf
         >>> c = [0.5]*3 # Center
         >>> L = [1.0,1.0,1.0] # Viewpoint
-        >>> W = na.sqrt(3) # Width
+        >>> W = np.sqrt(3) # Width
         >>> N = 1024 # Pixels (1024^2)
 
         # Get density min, max
         >>> mi, ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi, ma = na.log10(mi), na.log10(ma)
+        >>> mi, ma = np.log10(mi), np.log10(ma)
 
         # Construct transfer function
         >>> tf = vr.ColorTransferFunction((mi-2, ma+2))
@@ -195,7 +196,7 @@
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
         self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
-        self.rotation_vector = self.orienter.north_vector
+        self.rotation_vector = self.orienter.unit_vectors[1]
         self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -226,10 +227,10 @@
     def _setup_box_properties(self, width, center, unit_vectors):
         self.width = width
         self.center = center
-        self.box_vectors = na.array([unit_vectors[0]*width[0],
+        self.box_vectors = np.array([unit_vectors[0]*width[0],
                                      unit_vectors[1]*width[1],
                                      unit_vectors[2]*width[2]])
-        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.origin = center - 0.5*np.dot(width,unit_vectors)
         self.back_center =  center - 0.5*width[2]*unit_vectors[2]
         self.front_center = center + 0.5*width[2]*unit_vectors[2]         
 
@@ -282,39 +283,44 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.orienter.north_vector
+            north_vector = self.orienter.unit_vectors[1]
         if normal_vector is None:
             normal_vector = self.orienter.normal_vector
         self.orienter.switch_orientation(normal_vector = normal_vector,
                                          north_vector = north_vector)
         self._setup_box_properties(width, self.center, self.orienter.unit_vectors)
     def new_image(self):
-        image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
+        image = np.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.transfer_function, self.sub_samples)
+                np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
+    star_trees = None
     def get_sampler(self, args):
+        kwargs = {}
+        if self.star_trees is not None:
+            kwargs = {'star_list': self.star_trees}
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
-            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = np.empty(3,dtype='float64')
             temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
                     self.light_dir[1] * self.orienter.unit_vectors[2] + \
                     self.light_dir[2] * self.orienter.unit_vectors[0]
             if self.light_rgba is None:
                 self.set_default_light_rgba()
             sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
-                    light_rgba=self.light_rgba)
+                    light_rgba=self.light_rgba, **kwargs)
         else:
-            sampler = self._sampler_object(*args)
+            sampler = self._sampler_object(*args, **kwargs)
+        print sampler, kwargs
         return sampler
 
     def finalize_image(self, image):
@@ -326,13 +332,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
 
         view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
@@ -342,15 +348,21 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.comm.rank is 0 and fn is not None:
-            if clip_ratio is not None:
-                write_bitmap(image, fn, clip_ratio * image.std())
-            else:
-                write_bitmap(image, fn)
-
+            image.write_png(fn, clip_ratio=clip_ratio)
 
     def initialize_source(self):
         return self.volume.initialize_source()
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'east_vector':self.orienter.unit_vectors[0],
+                     'north_vector':self.orienter.unit_vectors[1],
+                     'normal_vector':self.orienter.unit_vectors[2],
+                     'width':self.width,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0):
         r"""Ray-cast the camera.
@@ -385,7 +397,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clip_ratio, image)
         return image
 
@@ -510,30 +524,30 @@
         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
         ...     iw.write_bitmap(snapshot, "move_%04i.png" % i)
         """
-        self.center = na.array(self.center)
+        self.center = np.array(self.center)
         dW = None
         if exponential:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back 
                 if (self.center == 0.0).all():
-                    self.center += (na.array(final) - self.center) / (10. * n_steps)
-                final_zoom = final_width/na.array(self.width)
+                    self.center += (np.array(final) - self.center) / (10. * n_steps)
+                final_zoom = final_width/np.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
             else:
-                dW = na.array([1.0,1.0,1.0])
-            position_diff = (na.array(final)/self.center)*1.0
+                dW = np.array([1.0,1.0,1.0])
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back
-                dW = (1.0*final_width-na.array(self.width))/n_steps
+                dW = (1.0*final_width-np.array(self.width))/n_steps
             else:
-                dW = na.array([0.0,0.0,0.0])
-            dx = (na.array(final)-self.center)*1.0/n_steps
+                dW = np.array([0.0,0.0,0.0])
+            dx = (np.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.switch_view(center=self.center*dx, width=self.width*dW)
@@ -559,7 +573,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.rotation_vector
@@ -568,7 +582,7 @@
 
         normal_vector = self.front_center-self.center
 
-        self.switch_view(normal_vector=na.dot(R,normal_vector))
+        self.switch_view(normal_vector=np.dot(R,normal_vector))
 
     def roll(self, theta):
         r"""Roll by a given angle
@@ -583,12 +597,12 @@
         Examples
         --------
 
-        >>> cam.roll(na.pi/4)
+        >>> cam.roll(np.pi/4)
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
-        north_vector = self.orienter.north_vector
-        self.switch_view(north_vector=na.dot(R, north_vector))
+        north_vector = self.orienter.unit_vectors[1]
+        self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
         r"""Loop over rotate, creating a rotation
@@ -613,7 +627,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -665,7 +679,7 @@
 class PerspectiveCamera(Camera):
     expand_factor = 1.0
     def __init__(self, *args, **kwargs):
-        expand_factor = kwargs.pop('expand_factor', 1.0)
+        self.expand_factor = kwargs.pop('expand_factor', 1.0)
         Camera.__init__(self, *args, **kwargs)
 
     def get_sampler_args(self, image):
@@ -676,12 +690,12 @@
         self.front_center += self.expand_factor*dl
         self.back_center -= dl
 
-        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+        px = np.linspace(-self.width[0]/2.0, self.width[0]/2.0,
                          self.resolution[0])[:,None]
-        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+        py = np.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
         inv_mat = self.orienter.inv_mat
-        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+        positions = np.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
@@ -693,22 +707,43 @@
         positions = self.front_center - 1.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
         vectors = (self.front_center - positions)
 
-        uv = na.ones(3, dtype='float64')
+        uv = np.ones(3, dtype='float64')
         image.shape = (self.resolution[0]**2,1,3)
         vectors.shape = (self.resolution[0]**2,1,3)
         positions.shape = (self.resolution[0]**2,1,3)
         args = (positions, vectors, self.back_center, 
                 (0.0,1.0,0.0,1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'), 
+                np.zeros(3, dtype='float64'), 
                 self.transfer_function, self.sub_samples)
         return args
 
+    def _render(self, double_check, num_threads, image, sampler):
+        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        total_cells = 0
+        if double_check:
+            for brick in self.volume.bricks:
+                for data in brick.my_data:
+                    if np.any(np.isnan(data)):
+                        raise RuntimeError
+
+        view_pos = self.front_center
+        for brick in self.volume.traverse(view_pos, self.front_center, image):
+            sampler(brick, num_threads=num_threads)
+            total_cells += np.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+
+        pbar.finish()
+        image = sampler.aimage
+        self.finalize_image(image)
+        return image
+
+
     def finalize_image(self, image):
         image.shape = self.resolution[0], self.resolution[0], 3
 
 def corners(left_edge, right_edge):
-    return na.array([
+    return np.array([
       [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
@@ -720,19 +755,28 @@
     ], dtype='float64')
 
 class HEALpixCamera(Camera):
+
+    _sampler_object = None 
+    
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
                  pf = None, use_kd=True, no_ghost=False, use_light=False):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.nside = nside
         self.use_kd = use_kd
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
+
+        if isinstance(self.transfer_function, ProjectionTransferFunction):
+            self._sampler_object = ProjectionSampler
+        else:
+            self._sampler_object = VolumeRenderSampler
+
         if fields is None: fields = ["Density"]
         self.fields = fields
         self.sub_samples = sub_samples
@@ -747,20 +791,20 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
+        image = np.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
         nv = 12 * self.nside ** 2
-        vs = arr_pix2vec_nest(self.nside, na.arange(nv))
+        vs = arr_pix2vec_nest(self.nside, np.arange(nv))
         vs *= self.radius
         vs.shape = nv, 1, 3
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nv, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nv, 1, 3), dtype='float64') * self.center
         args = (positions, vs, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
  
@@ -771,13 +815,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -787,6 +831,15 @@
 
         return image
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'center':self.center,
+                     'radius':self.radius,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0, clim = None, label = None):
         r"""Ray-cast the camera.
@@ -814,7 +867,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.volume.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clim, image, label = label)
         return image
 
@@ -823,14 +878,14 @@
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg
-            phi, theta = na.mgrid[0.0:2*na.pi:800j, 0:na.pi:800j]
+            phi, theta = np.mgrid[0.0:2*np.pi:800j, 0:np.pi:800j]
             pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
             image *= self.radius * self.pf['cm']
-            img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+            img = np.log10(image[:,0,0][pixi]).reshape((800,800))
 
             fig = matplotlib.figure.Figure((10, 5))
             ax = fig.add_subplot(1,1,1,projection='hammer')
-            implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
+            implot = ax.imshow(img, extent=(-np.pi,np.pi,-np.pi/2,np.pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
 
             if label == None:
@@ -852,7 +907,7 @@
                  rays_per_cell = 0.1, max_nside = 8192):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.use_kd = use_kd
         if transfer_function is None:
@@ -880,8 +935,8 @@
                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-        left_edges = na.array([b.LeftEdge for b in bricks])
-        right_edges = na.array([b.RightEdge for b in bricks])
+        left_edges = np.array([b.LeftEdge for b in bricks])
+        right_edges = np.array([b.RightEdge for b in bricks])
         min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
                      for b in bricks))
         # We jitter a bit if we're on a boundary of our initial grid
@@ -896,7 +951,7 @@
         for i,brick in enumerate(bricks):
             ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
                                        bricks)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         info, values = ray_source.get_rays()
@@ -1014,10 +1069,10 @@
         self.use_light = use_light
         self.light_dir = None
         self.light_rgba = None
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.fov = fov
         if iterable(resolution):
@@ -1036,7 +1091,7 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        image = np.zeros((self.resolution**2,1,3), dtype='float64', order='C')
         return image
         
     def get_sampler_args(self, image):
@@ -1047,13 +1102,13 @@
             vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
         del vp2
         vp *= self.radius
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
 
         args = (positions, vp, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -1067,13 +1122,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -1167,7 +1222,7 @@
         
         >>> field='Density'
         >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi,ma = na.log10(mi), na.log10(ma)
+        >>> mi,ma = np.log10(mi), np.log10(ma)
         
         # You may want to comment out the above lines and manually set the min and max
         # of the log of the Density field. For example:
@@ -1185,7 +1240,7 @@
         # the color range to the min and max values, rather than the transfer function
         # bounds.
         >>> Nc = 5
-        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=np.logspace(-2,0,Nc),
         >>>         colormap='RdBu_r')
         >>> 
         # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
@@ -1243,18 +1298,18 @@
             self.nimy = 1
         if pf is not None: self.pf = pf
         
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         
-        self.normal_vector = na.array([0.,0.,1])
-        self.north_vector = na.array([1.,0.,0.])
-        self.east_vector = na.array([0.,1.,0.])
+        self.normal_vector = np.array([0.,0.,1])
+        self.north_vector = np.array([1.,0.,0.])
+        self.east_vector = np.array([0.,1.,0.])
         self.rotation_vector = self.north_vector
 
         if iterable(resolution):
             raise RuntimeError("Resolution must be a single int")
         self.resolution = resolution
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.focal_center = focal_center
         self.radius = radius
         self.fov = fov
@@ -1274,17 +1329,17 @@
 
     def get_vector_plane(self):
         if self.focal_center is not None:
-            rvec =  na.array(self.focal_center) - na.array(self.center)
+            rvec =  np.array(self.focal_center) - np.array(self.center)
             rvec /= (rvec**2).sum()**0.5
-            angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
+            angle = np.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
                 (rvec**2).sum()**0.5))
-            rot_vector = na.cross(rvec, self.normal_vector)
+            rot_vector = np.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
             self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
-            self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
-            self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
-            self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
+            self.normal_vector = np.dot(self.rotation_matrix,self.normal_vector)
+            self.north_vector = np.dot(self.rotation_matrix,self.north_vector)
+            self.east_vector = np.dot(self.rotation_matrix,self.east_vector)
         else:
             self.focal_center = self.center + self.radius*self.normal_vector  
         dist = ((self.focal_center - self.center)**2).sum()**0.5
@@ -1307,9 +1362,9 @@
             self.get_vector_plane()
 
         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+        image = np.zeros((nx*ny,1,3), dtype='float64', order='C')
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nx*ny, 1, 3), dtype='float64') * self.center
         vector_plane = VectorPlane(positions, self.vp, self.center,
                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
         tfp = TransferFunctionProxy(self.transfer_function)
@@ -1322,15 +1377,16 @@
         total_cells = 0
         for brick in self.volume.traverse(None, self.center, image):
             brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         image.shape = (nx, ny, 3)
 
         if self.image is not None:
             del self.image
+        image = ImageArray(image,
+                           info=self.get_information())
         self.image = image
-       
         return image
 
     def save_image(self, fn, clip_ratio=None):
@@ -1348,7 +1404,7 @@
         if self.image_decomp:
             if self.comm.rank == 0:
                 if self.global_comm.rank == 0:
-                    final_image = na.empty((nx*self.nimx, 
+                    final_image = np.empty((nx*self.nimx, 
                         ny*self.nimy, 3),
                         dtype='float64',order='C')
                     final_image[:nx, :ny, :] = image
@@ -1391,7 +1447,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.north_vector
@@ -1401,9 +1457,9 @@
         R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
-        self.normal_vector = na.dot(R,self.normal_vector)
-        self.north_vector = na.dot(R,self.north_vector)
-        self.east_vector = na.dot(R,self.east_vector)
+        self.normal_vector = np.dot(R,self.normal_vector)
+        self.north_vector = np.dot(R,self.north_vector)
+        self.east_vector = np.dot(R,self.east_vector)
 
         if keep_focus:
             self.center = self.focal_center - dist*self.normal_vector
@@ -1428,7 +1484,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -1460,10 +1516,10 @@
         ...     cam.save_image('move_%04i.png' % i)
         """
         if exponential:
-            position_diff = (na.array(final)/self.center)*1.0
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
-            dx = (na.array(final) - self.center)*1.0/n_steps
+            dx = (np.array(final) - self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.center *= dx
@@ -1505,7 +1561,7 @@
         effects of nearby cells.
     rotation : optional, 3x3 array
         If supplied, the vectors will be rotated by this.  You can construct
-        this by, for instance, calling na.array([v1,v2,v3]) where those are the
+        this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
 
     Returns
@@ -1524,7 +1580,7 @@
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
-    center = na.array(center, dtype='float64')
+    center = np.array(center, dtype='float64')
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
         def _make_wf(f, w):
@@ -1536,8 +1592,8 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     nv = 12*nside**2
-    image = na.zeros((nv,1,3), dtype='float64', order='C')
-    vs = arr_pix2vec_nest(nside, na.arange(nv))
+    image = np.zeros((nv,1,3), dtype='float64', order='C')
+    vs = arr_pix2vec_nest(nside, np.arange(nv))
     vs.shape = (nv,1,3)
     if rotation is not None:
         vs2 = vs.copy()
@@ -1545,14 +1601,14 @@
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     else:
         vs += 1e-8
-    positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
+    positions = np.ones((nv, 1, 3), dtype='float64', order='C') * center
     dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
     positions += inner_radius * dx * vs
     vs *= radius
-    uv = na.ones(3, dtype='float64')
+    uv = np.ones(3, dtype='float64')
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
-                                image, uv, uv, na.zeros(3, dtype='float64'))
+                                image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
         data = [grid[field] * grid.child_mask.astype('float64')
@@ -1581,15 +1637,15 @@
                         take_log = True, resolution=512, cmin=None, cmax=None):
     import matplotlib.figure
     import matplotlib.backends.backend_agg
-    if rotation is None: rotation = na.eye(3).astype("float64")
+    if rotation is None: rotation = np.eye(3).astype("float64")
 
     img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
 
     fig = matplotlib.figure.Figure((10, 5))
     ax = fig.add_subplot(1,1,1,projection='aitoff')
-    if take_log: func = na.log10
+    if take_log: func = np.log10
     else: func = lambda a: a
-    implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
+    implot = ax.imshow(func(img), extent=(-np.pi,np.pi,-np.pi/2,np.pi/2),
                        clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
     cb = fig.colorbar(implot, orientation='horizontal')
     cb.set_label(label)
@@ -1647,12 +1703,12 @@
             pass
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
             (-self.width[0]/2, self.width[0]/2,
              -self.width[1]/2, self.width[1]/2),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.sub_samples)
+                np.array(self.width), self.sub_samples)
         return args
 
     def finalize_image(self,image):
@@ -1686,8 +1742,8 @@
                     this_point = (self.center + width/2. * off1 * north_vector
                                          + width/2. * off2 * east_vector
                                          + width/2. * off3 * normal_vector)
-                    na.minimum(mi, this_point, mi)
-                    na.maximum(ma, this_point, ma)
+                    np.minimum(mi, this_point, mi)
+                    np.maximum(ma, this_point, ma)
         # Now we have a bounding box.
         grids = pf.h.region(self.center, mi, ma)._grids
 
@@ -1709,7 +1765,7 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.pf.field_info[self.field].take_log:
-            im = na.log10(image)
+            im = np.log10(image)
         else:
             im = image
         if self.comm.rank is 0 and fn is not None:
@@ -1735,7 +1791,9 @@
 
         self.initialize_source()
 
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
 
         self.save_image(fn, clip_ratio, image)
 
@@ -1746,7 +1804,8 @@
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, 
-                        volume = None, no_ghost = False, interpolated = False):
+                        volume = None, no_ghost = False, interpolated = False,
+                        north_vector = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1801,12 +1860,13 @@
 
     >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
                       0.2, N, "Temperature", "Density")
-    >>> write_image(na.log10(image), "offaxis.png")
+    >>> write_image(np.log10(image), "offaxis.png")
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
-            field, weight=weight, pf=pf, volume=volume,
-            no_ghost=no_ghost, interpolated=interpolated)
+                               field, weight=weight, pf=pf, volume=volume,
+                               no_ghost=no_ghost, interpolated=interpolated, 
+                               north_vector=north_vector)
     image = projcam.snapshot()
     if weight is not None:
         pf.field_info.pop("temp_weightfield")


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -24,7 +24,7 @@
 """
 
 import random
-import numpy as na
+import numpy as np
 from .create_spline import create_spline
 
 class Keyframes(object):
@@ -67,12 +67,12 @@
         Examples
         --------
 
-        >>> import numpy as na
+        >>> import numpy as np
         >>> import matplotlib.pyplot as plt
         >>> from yt.visualization.volume_rendering.camera_path import *
 
         # Make a camera path from 10 random (x,y,z) keyframes
-        >>> data = na.random.random.((10,3))
+        >>> data = np.random.random.((10,3))
         >>> kf = Keyframes(data[:,0], data[:,1], data[:,2])
         >>> path = kf.create_path(250, shortest_path=False)
 
@@ -93,7 +93,7 @@
             print "Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz)
             sys.exit()
         self.nframes = Nx
-        self.pos = na.zeros((Nx,3))
+        self.pos = np.zeros((Nx,3))
         self.pos[:,0] = x
         self.pos[:,1] = y
         if z != None:
@@ -103,7 +103,7 @@
         self.north_vectors = north_vectors
         self.up_vectors = up_vectors
         if times == None:
-            self.times = na.arange(self.nframes)
+            self.times = np.arange(self.nframes)
         else:
             self.times = times
         self.cartesian_matrix()
@@ -131,7 +131,7 @@
         """
         # randomize tour
         self.tour = range(self.nframes)
-        na.random.shuffle(self.tour)
+        np.random.shuffle(self.tour)
         if fixed_start:
             first = self.tour.index(0)
             self.tour[0], self.tour[first] = self.tour[first], self.tour[0]
@@ -191,17 +191,17 @@
         Create a distance matrix for the city coords that uses
         straight line distance
         """
-        self.dist_matrix = na.zeros((self.nframes, self.nframes))
-        xmat = na.zeros((self.nframes, self.nframes))
+        self.dist_matrix = np.zeros((self.nframes, self.nframes))
+        xmat = np.zeros((self.nframes, self.nframes))
         xmat[:,:] = self.pos[:,0]
         dx = xmat - xmat.T
-        ymat = na.zeros((self.nframes, self.nframes))
+        ymat = np.zeros((self.nframes, self.nframes))
         ymat[:,:] = self.pos[:,1]
         dy = ymat - ymat.T
-        zmat = na.zeros((self.nframes, self.nframes))
+        zmat = np.zeros((self.nframes, self.nframes))
         zmat[:,:] = self.pos[:,2]
         dz = zmat - zmat.T
-        self.dist_matrix = na.sqrt(dx*dx + dy*dy + dz*dz)
+        self.dist_matrix = np.sqrt(dx*dx + dy*dy + dz*dz)
 
     def tour_length(self, tour):
         r"""
@@ -227,7 +227,7 @@
         if next > prev:
             return 1.0
         else:
-            return na.exp( -abs(next-prev) / temperature )
+            return np.exp( -abs(next-prev) / temperature )
 
     def get_shortest_path(self):
         r"""Determine shortest path between all keyframes.
@@ -294,14 +294,14 @@
             path.  Also saved to self.path.
         """
         self.npoints = npoints
-        self.path = {"time": na.zeros(npoints),
-                     "position": na.zeros((npoints, 3)),
-                     "north_vectors": na.zeros((npoints,3)),
-                     "up_vectors": na.zeros((npoints,3))}
+        self.path = {"time": np.zeros(npoints),
+                     "position": np.zeros((npoints, 3)),
+                     "north_vectors": np.zeros((npoints,3)),
+                     "up_vectors": np.zeros((npoints,3))}
         if shortest_path:
             self.get_shortest_path()
         if path_time == None:
-            path_time = na.linspace(0, self.nframes, npoints)
+            path_time = np.linspace(0, self.nframes, npoints)
         self.path["time"] = path_time
         for dim in range(3):
             self.path["position"][:,dim] = create_spline(self.times, self.pos[:,dim],


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/create_spline.py
--- a/yt/visualization/volume_rendering/create_spline.py
+++ b/yt/visualization/volume_rendering/create_spline.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def create_spline(old_x, old_y, new_x, tension=0.5, sorted=False):
     """
@@ -45,18 +45,18 @@
     """
     ndata = len(old_x)
     N = len(new_x)
-    result = na.zeros(N)
+    result = np.zeros(N)
     if not sorted:
-        isort = na.argsort(old_x)
+        isort = np.argsort(old_x)
         old_x = old_x[isort]
         old_y = old_y[isort]
     # Floor/ceiling of values outside of the original data
-    new_x = na.minimum(new_x, old_x[-1])
-    new_x = na.maximum(new_x, old_x[0])
-    ind = na.searchsorted(old_x, new_x)
-    im2 = na.maximum(ind-2, 0)
-    im1 = na.maximum(ind-1, 0)
-    ip1 = na.minimum(ind+1, ndata-1)
+    new_x = np.minimum(new_x, old_x[-1])
+    new_x = np.maximum(new_x, old_x[0])
+    ind = np.searchsorted(old_x, new_x)
+    im2 = np.maximum(ind-2, 0)
+    im1 = np.maximum(ind-1, 0)
+    ip1 = np.minimum(ind+1, ndata-1)
     for i in range(N):
         if ind[i] != im1[i]:
             u = (new_x[i] - old_x[im1[i]]) / (old_x[ind[i]] - old_x[im1[i]])


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 import h5py
 
@@ -63,10 +63,10 @@
                    len(self.bricks), back_point, front_point)
         if self.bricks is None: self.initialize_source()
         vec = front_point - back_point
-        dist = na.minimum(
-             na.sum((self.brick_left_edges - back_point) * vec, axis=1),
-             na.sum((self.brick_right_edges - back_point) * vec, axis=1))
-        ind = na.argsort(dist)
+        dist = np.minimum(
+             np.sum((self.brick_left_edges - back_point) * vec, axis=1),
+             np.sum((self.brick_right_edges - back_point) * vec, axis=1))
+        ind = np.argsort(dist)
         for b in self.bricks[ind]:
             #print b.LeftEdge, b.RightEdge
             yield b
@@ -79,7 +79,7 @@
         for field, log_field in zip(self.fields, self.log_fields):
             vcd = grid.get_vertex_centered_data(field, no_ghost = self.no_ghost)
             vcd = vcd.astype("float64")
-            if log_field: vcd = na.log10(vcd)
+            if log_field: vcd = np.log10(vcd)
             vcds.append(vcd)
 
         GF = GridFaces(grid.Children + [grid])
@@ -121,11 +121,11 @@
         # intersection, we only need to do the left edge & right edge.
         #
         # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.bricks = na.empty(len(bricks), dtype='object')
+        self.brick_left_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_right_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_parents = np.zeros( NB, dtype='int64')
+        self.brick_dimensions = np.zeros( (NB, 3), dtype='int64')
+        self.bricks = np.empty(len(bricks), dtype='object')
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
             self.brick_right_edges[i,:] = b.RightEdge
@@ -143,12 +143,12 @@
             for j in [-1, 1]:
                 for k in [-1, 1]:
                     for b in self.bricks:
-                        BB = na.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
-                        LE, RE = na.min(BB, axis=0), na.max(BB, axis=0)
+                        BB = np.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
+                        LE, RE = np.min(BB, axis=0), np.max(BB, axis=0)
                         nb.append(
                             PartitionedGrid(b.parent_grid_id, len(b.my_data), 
                                 [md[::i,::j,::k].copy("C") for md in b.my_data],
-                                LE, RE, na.array(b.my_data[0].shape) - 1))
+                                LE, RE, np.array(b.my_data[0].shape) - 1))
         # Replace old bricks
         self.initialize_bricks(nb)
 
@@ -183,7 +183,7 @@
                                 self.brick_right_edges[i,:],
                                 self.brick_dimensions[i,:],
                                 ))
-        self.bricks = na.array(bricks, dtype='object')
+        self.bricks = np.array(bricks, dtype='object')
         f.close()
 
     def reset_cast(self):
@@ -194,10 +194,10 @@
     def __init__(self, data_array):
         self.bricks = [PartitionedGrid(-1, 1, 
                        [data_array.astype("float64")],
-                       na.zeros(3, dtype='float64'),
-                       na.ones(3, dtype='float64'),
-                       na.array(data_array.shape, dtype='int64')-1)]
-        self.brick_dimensions = na.ones((1, 3), dtype='int64')*data_array.shape
+                       np.zeros(3, dtype='float64'),
+                       np.ones(3, dtype='float64'),
+                       np.array(data_array.shape, dtype='int64')-1)]
+        self.brick_dimensions = np.ones((1, 3), dtype='int64')*data_array.shape
 
     def initialize_source(self):
         pass
@@ -221,24 +221,24 @@
     def __getitem__(self, item):
         return self.faces[item]
 
-def export_partitioned_grids(grid_list, fn, int_type=na.int64, float_type=na.float64):
+def export_partitioned_grids(grid_list, fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "w")
     pbar = get_pbar("Writing Grids", len(grid_list))
     nelem = sum((grid.my_data.size for grid in grid_list))
     ngrids = len(grid_list)
     group = f.create_group("/PGrids")
-    left_edge = na.concatenate([[grid.LeftEdge,] for grid in grid_list])
+    left_edge = np.concatenate([[grid.LeftEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/LeftEdges", data=left_edge, dtype=float_type); del left_edge
-    right_edge = na.concatenate([[grid.RightEdge,] for grid in grid_list])
+    right_edge = np.concatenate([[grid.RightEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/RightEdges", data=right_edge, dtype=float_type); del right_edge
-    dims = na.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
+    dims = np.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
     f.create_dataset("/PGrids/Dims", data=dims, dtype=int_type); del dims
-    data = na.concatenate([grid.my_data.ravel() for grid in grid_list])
+    data = np.concatenate([grid.my_data.ravel() for grid in grid_list])
     f.create_dataset("/PGrids/Data", data=data, dtype=float_type); del data
     f.close()
     pbar.finish()
 
-def import_partitioned_grids(fn, int_type=na.int64, float_type=na.float64):
+def import_partitioned_grids(fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "r")
     n_groups = len(f)
     grid_list = []
@@ -258,4 +258,4 @@
         pbar.update(i)
     pbar.finish()
     f.close()
-    return na.array(grid_list, dtype='object')
+    return np.array(grid_list, dtype='object')


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -25,7 +25,7 @@
 import h5py
 try: import pyfits
 except: pass
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -67,7 +67,7 @@
         f.close()
     else:
         print 'No support for fits import.'
-    return na.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
+    return np.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
 
 def plot_channel(image, name, cmap='gist_heat', log=True, dex=3, zero_factor=1.0e-10, 
                  label=None, label_color='w', label_size='large'):
@@ -84,7 +84,7 @@
     import matplotlib
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     ma = image[image>0.0].max()
     image[image==0.0] = ma*zero_factor
     if log:
@@ -113,7 +113,7 @@
     """
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     if image.shape[2] >= 4:
         image = image[:,:,:3]
     pylab.clf()


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -261,7 +261,7 @@
         tex_coord.Append((t1,t0,t1)); ver_coord.Append((x1, y0, z1)) # 7
         
         # Store quads
-        self._quads[tex_id] = (tex_coord, ver_coord, na.array(indices,dtype=na.uint8))
+        self._quads[tex_id] = (tex_coord, ver_coord, np.array(indices,dtype=np.uint8))
 
 def visvis_plot(vp):
     """
@@ -280,10 +280,10 @@
     ax = vv.gca()
 
     for i,g in enumerate(gs):
-        ss = ((g.RightEdge - g.LeftEdge) / (na.array(g.my_data[0].shape)-1)).tolist()
+        ss = ((g.RightEdge - g.LeftEdge) / (np.array(g.my_data[0].shape)-1)).tolist()
         origin = g.LeftEdge.astype("float32").tolist()
         dd = (g.my_data[0].astype("float32") - mi)/(ma - mi)
-        dd = na.clip(dd, 0.0, 1.0)
+        dd = np.clip(dd, 0.0, 1.0)
         print ss
         texes.append(vv.Aarray(dd, origin = origin, sampling = ss))
 


diff -r 8dd74a5ab7df83179c8432c064eeda8989830877 -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from matplotlib.cm import get_cmap
 
 from yt.funcs import *
@@ -59,10 +59,10 @@
         self.pass_through = 0
         self.nbins = nbins
         self.x_bounds = x_bounds
-        self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
-        self.y = na.zeros(nbins, dtype='float64')
+        self.x = np.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
+        self.y = np.zeros(nbins, dtype='float64')
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -88,8 +88,8 @@
         >>> tf = TransferFunction( (-10.0, -5.0) )
         >>> tf.add_gaussian(-9.0, 0.01, 1.0)
         """
-        vals = height * na.exp(-(self.x - location)**2.0/width)
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        vals = height * np.exp(-(self.x - location)**2.0/width)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_line(self, start, stop):
         r"""Add a line between two points to the transmission function.
@@ -122,7 +122,7 @@
         # not satisfy our bounding box arguments
         vals = slope * (self.x - x0) + y0
         vals[~((self.x >= x0) & (self.x <= x1))] = 0.0
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.
@@ -154,12 +154,12 @@
         >>> tf.add_gaussian(-7.0, 0.01, 1.0)
         >>> tf.add_step(-8.0, -6.0, 0.5)
         """
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         vals[(self.x >= start) & (self.x <= stop)] = value
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_filtered_planck(self, wavelength, trans):
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         nu = clight/(wavelength*1e-8)
         nu = nu[::-1]
 
@@ -167,15 +167,15 @@
             T = 10**logT
             # Black body at this nu, T
             Bnu = ((2.0 * hcgs * nu**3) / clight**2.0) / \
-                    (na.exp(hcgs * nu / (kboltz * T)) - 1.0)
+                    (np.exp(hcgs * nu / (kboltz * T)) - 1.0)
             # transmission
             f = Bnu * trans[::-1]
             # integrate transmission over nu
-            vals[i] = na.trapz(f,nu)
+            vals[i] = np.trapz(f,nu)
 
         # normalize by total transmission over filter
-        self.y = vals/trans.sum() #/na.trapz(trans[::-1],nu)
-        #self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = vals/trans.sum() #/np.trapz(trans[::-1],nu)
+        #self.y = np.clip(np.maximum(vals, self.y), 0.0, 1.0)
 
     def plot(self, filename):
         r"""Save an image file of the transfer function.
@@ -245,7 +245,7 @@
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):
@@ -459,20 +459,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -500,20 +500,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -574,7 +574,7 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
-        tomap = na.linspace(0.,1.,num=rel1-rel0)
+        tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)*scale
         if scale_func is None:
@@ -640,17 +640,17 @@
             if ma is None: ma = col_bounds[1] - dist/(10.0*N)
         if w is None: w = 0.001 * (ma-mi)/N
         if alpha is None and self.grey_opacity:
-            alpha = na.ones(N, dtype="float64")
+            alpha = np.ones(N, dtype="float64")
         elif alpha is None and not self.grey_opacity:
-            alpha = na.logspace(-3, 0, N)
-        for v, a in zip(na.mgrid[mi:ma:N*1j], alpha):
+            alpha = np.logspace(-3, 0, N)
+        for v, a in zip(np.mgrid[mi:ma:N*1j], alpha):
             self.sample_colormap(v, w, a, colormap=colormap, col_bounds=col_bounds)
 
     def get_colormap_image(self, height, width):
-        image = na.zeros((height, width, 3), dtype='uint8')
-        hvals = na.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
+        image = np.zeros((height, width, 3), dtype='uint8')
+        hvals = np.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
         for i,f in enumerate(self.funcs[:3]):
-            vals = na.interp(hvals, f.x, f.y)
+            vals = np.interp(hvals, f.x, f.y)
             image[:,:,i] = (vals[:,None] * 255).astype('uint8')
         image = image[::-1,:,:]
         return image
@@ -736,7 +736,7 @@
         self._normalize()
 
     def _normalize(self):
-        fmax  = na.array([f.y for f in self.tables[:3]])
+        fmax  = np.array([f.y for f in self.tables[:3]])
         normal = fmax.max(axis=0)
         for f in self.tables[:3]:
             f.y = f.y/normal



https://bitbucket.org/yt_analysis/yt/changeset/79a04b9de7bf/
changeset:   79a04b9de7bf
branch:      yt
user:        Christopher Moody
date:        2012-10-12 23:55:16
summary:     modified io to use a lookup dictionary
affected #:  1 file

diff -r 8780c913832f5e4855c2cbc68fc71cdb02859d0f -r 79a04b9de7bf6bf486a925f210c548404070d680 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -122,61 +122,35 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        if field == 'particle_index':
-            return grid.particle_id
-        if field == 'particle_type':
-            return grid.particle_type
-        if field == 'particle_position_x':
-            return grid.particle_position_x
-        if field == 'particle_position_y':
-            return grid.particle_position_y
-        if field == 'particle_position_z':
-            return grid.particle_position_z
-        if field == 'particle_age':
-            return grid.particle_age
-        if field == 'particle_mass':
-            return grid.particle_mass
-        if field == 'particle_mass_initial':
-            return grid.particle_mass_initial
-        if field == 'particle_metallicity':
-            return grid.particle_metallicity
-        if field == 'particle_velocity_x':
-            return grid.particle_velocity_x
-        if field == 'particle_velocity_y':
-            return grid.particle_velocity_y
-        if field == 'particle_velocity_z':
-            return grid.particle_velocity_z
+
+        fields={'particle_index': grid.particle_id,
+        'particle_type':grid.particle_type,
+        'particle_position_x':grid.particle_position_x,
+        'particle_position_y':grid.particle_position_y,
+        'particle_position_z':grid.particle_position_z,
+        'particle_age':grid.particle_age,
+        'particle_mass':grid.particle_mass,
+        'particle_mass_initial':grid.particle_mass_initial,
+        'particle_metallicity':grid.particle_metallicity,
+        'particle_velocity_x':grid.particle_velocity_x,
+        'particle_velocity_y':grid.particle_velocity_y,
+        'particle_velocity_z':grid.particle_velocity_z,
         
         #stellar fields
-        if field == 'star_position_x':
-            return grid.star_position_x
-        if field == 'star_position_y':
-            return grid.star_position_y
-        if field == 'star_position_z':
-            return grid.star_position_z
-        if field == 'star_mass':
-            return grid.star_mass
-        if field == 'star_velocity_x':
-            return grid.star_velocity_x
-        if field == 'star_velocity_y':
-            return grid.star_velocity_y
-        if field == 'star_velocity_z':
-            return grid.star_velocity_z
-        if field == 'star_age':
-            return grid.star_age
-        if field == 'star_metallicity':
-            return grid.star_metallicity1 +\
-                   grid.star_metallicity2
-        if field == 'star_metallicity1':
-            return grid.star_metallicity1
-        if field == 'star_metallicity2':
-            return grid.star_metallicity2
-        if field == 'star_mass_initial':
-            return grid.star_mass_initial
-        if field == 'star_mass':
-            return grid.star_mass
-        
-        raise 'Should have matched one of the particle fields...'
+        'star_position_x':grid.star_position_x,
+        'star_position_y':grid.star_position_y,
+        'star_position_z':grid.star_position_z,
+        'star_mass':grid.star_mass,
+        'star_velocity_x':grid.star_velocity_x,
+        'star_velocity_y':grid.star_velocity_y,
+        'star_velocity_z':grid.star_velocity_z,
+        'star_age':grid.star_age,
+        'star_metallicity':grid.star_metallicity1 + grid.star_metallicity2,
+        'star_metallicity1':grid.star_metallicity1,
+        'star_metallicity2':grid.star_metallicity2,
+        'star_mass_initial':grid.star_mass_initial,
+        'star_mass':grid.star_mass}
+        return fields[field] 
 
         
     def _read_data_set(self, grid, field):



https://bitbucket.org/yt_analysis/yt/changeset/ff309c8b0d07/
changeset:   ff309c8b0d07
branch:      yt
user:        Christopher Moody
date:        2012-10-13 00:04:29
summary:     putting field dict in the Grid definition not in the IO
affected #:  1 file

diff -r 79a04b9de7bf6bf486a925f210c548404070d680 -r ff309c8b0d0740a6fa3acc175cb45cf122ec8f7d yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -119,6 +119,34 @@
         self.star_metallicity2 = na.array([])
         self.star_mass_initial = na.array([])
         self.star_mass = na.array([])
+
+        self.field_dict = { 'particle_index': self.particle_id,
+            'particle_type':self.particle_type,
+            'particle_position_x':self.particle_position_x,
+            'particle_position_y':self.particle_position_y,
+            'particle_position_z':self.particle_position_z,
+            'particle_age':self.particle_age,
+            'particle_mass':self.particle_mass,
+            'particle_mass_initial':self.particle_mass_initial,
+            'particle_metallicity':self.particle_metallicity,
+            'particle_velocity_x':self.particle_velocity_x,
+            'particle_velocity_y':self.particle_velocity_y,
+            'particle_velocity_z':self.particle_velocity_z,
+            
+            #stellar fields
+            'star_position_x':self.star_position_x,
+            'star_position_y':self.star_position_y,
+            'star_position_z':self.star_position_z,
+            'star_mass':self.star_mass,
+            'star_velocity_x':self.star_velocity_x,
+            'star_velocity_y':self.star_velocity_y,
+            'star_velocity_z':self.star_velocity_z,
+            'star_age':self.star_age,
+            'star_metallicity':self.star_metallicity1 + grid.star_metallicity2,
+            'star_metallicity1':self.star_metallicity1,
+            'star_metallicity2':self.star_metallicity2,
+            'star_mass_initial':self.star_mass_initial,
+            'star_mass':self.star_mass}
          
         #if child_mask is not None:
         #    self._set_child_mask(child_mask)



https://bitbucket.org/yt_analysis/yt/changeset/f529db5c7531/
changeset:   f529db5c7531
branch:      yt
user:        Christopher Moody
date:        2012-10-13 00:06:42
summary:     Removing the field dict from IO
affected #:  1 file

diff -r ff309c8b0d0740a6fa3acc175cb45cf122ec8f7d -r f529db5c75314f3590137d55ed9c8629adc956be yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -122,35 +122,7 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-
-        fields={'particle_index': grid.particle_id,
-        'particle_type':grid.particle_type,
-        'particle_position_x':grid.particle_position_x,
-        'particle_position_y':grid.particle_position_y,
-        'particle_position_z':grid.particle_position_z,
-        'particle_age':grid.particle_age,
-        'particle_mass':grid.particle_mass,
-        'particle_mass_initial':grid.particle_mass_initial,
-        'particle_metallicity':grid.particle_metallicity,
-        'particle_velocity_x':grid.particle_velocity_x,
-        'particle_velocity_y':grid.particle_velocity_y,
-        'particle_velocity_z':grid.particle_velocity_z,
-        
-        #stellar fields
-        'star_position_x':grid.star_position_x,
-        'star_position_y':grid.star_position_y,
-        'star_position_z':grid.star_position_z,
-        'star_mass':grid.star_mass,
-        'star_velocity_x':grid.star_velocity_x,
-        'star_velocity_y':grid.star_velocity_y,
-        'star_velocity_z':grid.star_velocity_z,
-        'star_age':grid.star_age,
-        'star_metallicity':grid.star_metallicity1 + grid.star_metallicity2,
-        'star_metallicity1':grid.star_metallicity1,
-        'star_metallicity2':grid.star_metallicity2,
-        'star_mass_initial':grid.star_mass_initial,
-        'star_mass':grid.star_mass}
-        return fields[field] 
+        return grid.field_dict[field]
 
         
     def _read_data_set(self, grid, field):



https://bitbucket.org/yt_analysis/yt/changeset/2eea26144564/
changeset:   2eea26144564
branch:      yt
user:        Christopher Moody
date:        2012-10-13 01:09:01
summary:     na->np
affected #:  1 file

diff -r f529db5c75314f3590137d55ed9c8629adc956be -r 2eea261445644edc38977860266ce696d8ce9717 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -84,7 +84,7 @@
     _id_offset = 0
 
     def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
-            child_mask=None,np=0):
+            child_mask=None,nop=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
         start_index =start_index 
@@ -97,28 +97,28 @@
         self.LeftEdge = le
         self.RightEdge = re
         self.ActiveDimensions = gd
-        self.NumberOfParticles=np
-        self.particle_type = na.array([])
-        self.particle_id= na.array([])
-        self.particle_age= na.array([])
-        self.particle_position_x = na.array([])
-        self.particle_position_y = na.array([])
-        self.particle_position_z = na.array([])
-        self.particle_velocity_x = na.array([])
-        self.particle_velocity_y = na.array([])
-        self.particle_velocity_z = na.array([])
-        self.particle_mass= na.array([])
-        self.star_position_x = na.array([])
-        self.star_position_y = na.array([])
-        self.star_position_z = na.array([])
-        self.star_velocity_x = na.array([])
-        self.star_velocity_y = na.array([])
-        self.star_velocity_z = na.array([])
-        self.star_age = na.array([])
-        self.star_metallicity1 = na.array([])
-        self.star_metallicity2 = na.array([])
-        self.star_mass_initial = na.array([])
-        self.star_mass = na.array([])
+        self.NumberOfParticles=nop
+        self.particle_type = np.array([])
+        self.particle_id= np.array([])
+        self.particle_age= np.array([])
+        self.particle_position_x = np.array([])
+        self.particle_position_y = np.array([])
+        self.particle_position_z = np.array([])
+        self.particle_velocity_x = np.array([])
+        self.particle_velocity_y = np.array([])
+        self.particle_velocity_z = np.array([])
+        self.particle_mass= np.array([])
+        self.star_position_x = np.array([])
+        self.star_position_y = np.array([])
+        self.star_position_z = np.array([])
+        self.star_velocity_x = np.array([])
+        self.star_velocity_y = np.array([])
+        self.star_velocity_z = np.array([])
+        self.star_age = np.array([])
+        self.star_metallicity1 = np.array([])
+        self.star_metallicity2 = np.array([])
+        self.star_mass_initial = np.array([])
+        self.star_mass = np.array([])
 
         self.field_dict = { 'particle_index': self.particle_id,
             'particle_type':self.particle_type,
@@ -203,12 +203,12 @@
         self._setup_field_list()
         
     def _setup_particle_grids(self):
-        grid_particle_count = na.zeros(len(self.grids),dtype='int64')
+        grid_particle_count = np.zeros(len(self.grids),dtype='int64')
         npt = self.pf.particle_position.shape[0]
         if self.pf.do_grid_particles:
             nps = self.pf.star_position.shape[0]
-            grid_indices = na.zeros(nps,dtype='int64')
-            particle_id= na.arange(nps,dtype='int64')
+            grid_indices = np.zeros(nps,dtype='int64')
+            particle_id= np.arange(nps,dtype='int64')
             pbar = get_pbar("Gridding Particles",len(self.grids))
             grid_indices,grid_particle_count,grids_done = \
                     particle_assignment(self.grids,
@@ -227,7 +227,7 @@
                 if gi==0:
                     #attach all the particles to the root grid
                     g.particle_type = self.pf.particle_type
-                    g.particle_id = na.arange(npt)
+                    g.particle_id = np.arange(npt)
                     g.particle_mass = self.pf.particle_mass
                     g.particle_mass_initial = self.pf.particle_mass_initial
                     g.particle_age = self.pf.particle_age
@@ -261,7 +261,7 @@
                 if gi==0:
                     #attach all the particles to the root grid
                     g.particle_type = self.pf.particle_type
-                    g.particle_id = na.arange(npt)
+                    g.particle_id = np.arange(npt)
                     g.particle_mass = self.pf.particle_mass
                     g.particle_mass_initial = self.pf.particle_mass_initial
                     g.particle_age = self.pf.particle_age
@@ -461,7 +461,7 @@
                         eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
             
         
-            mylog.info("Done with level % 2i; max LE %i", level,na.max(left_index))
+            mylog.info("Done with level % 2i; max LE %i", level,np.max(left_index))
             pbar.finish()
             self.proto_grids.append(psgs)
             #print sum(len(psg.grid_file_locations) for psg in psgs)
@@ -489,13 +489,13 @@
                 re = props[1,:].astype('float64')/dds
                 gd = props[2,:].astype('int64')
                 if level==0:
-                    le = na.zeros(3,dtype='float64')
-                    re = na.ones(3,dtype='float64')
+                    le = np.zeros(3,dtype='float64')
+                    re = np.ones(3,dtype='float64')
                     gd = dd
                 self.grid_left_edge[gi,:] = le
                 self.grid_right_edge[gi,:] = re
                 self.grid_dimensions[gi,:] = gd
-                assert na.all(self.grid_left_edge[gi,:]<=1.0)    
+                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = np.zeros(props[2,:],'uint8')
                 amr_utils.fill_child_mask(fl,start_index,
@@ -511,7 +511,7 @@
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
-            nstars = na.diff(lspecies)[-1]
+            nstars = np.diff(lspecies)[-1]
             a = self.pf.parameters['aexpn']
             hubble = self.pf.parameters['hubble']
             ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
@@ -530,10 +530,9 @@
                 if type(self.pf.only_particle_type)==type(5):
                     npa = clspecies[self.pf.only_particle_type]
                     npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
-            nparticles = np
+            nparticles = npb-npa
             #make sure we aren't going to throw out good particles
-            if not na.all(self.pf.particle_position[npb:]==0.0):
+            if not np.all(self.pf.particle_position[npb:]==0.0):
                 print 'WARNING: unused particles discovered from lspecies'
             self.pf.particle_position   = self.pf.particle_position[npa:npb]
             #do NOT correct by an offset of 1.0
@@ -591,8 +590,8 @@
             pbar.finish()
 
             lparticles = [0,]+list(lspecies)
-            for j,np in enumerate(lparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
+            for j,npi in enumerate(lparticles):
+                mylog.debug('found %i of particle type %i'%(j,npi))
             
             
             do_stars = (self.pf.only_particle_type is None) or \
@@ -606,7 +605,7 @@
                      = read_stars(self.pf.file_star_data)
                 self.pf.nstars_rs = nstars_rs     
                 self.pf.nstars_pa = nstars_pa
-                if not nstars_rs==na.sum(self.pf.particle_type==self.pf.particle_star_index):
+                if not nstars_rs==np.sum(self.pf.particle_type==self.pf.particle_star_index):
                     print 'WARNING!: nstars is inconsistent!'
                 if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
@@ -625,7 +624,7 @@
                     else:
                         ages = spread_ages(ages)
                     idx = self.pf.particle_type == self.pf.particle_star_index    
-                    assert na.sum(idx)==nstars_pa
+                    assert np.sum(idx)==nstars_pa
                     self.pf.star_position = self.pf.particle_position[idx]
                     self.pf.star_velocity = self.pf.particle_velocity[idx]
                     self.pf.particle_age[idx] = ages
@@ -640,7 +639,7 @@
                     self.pf.star_metallicity2 = metallicity2
                     self.pf.star_mass_initial = imass*um
                     self.pf.star_mass = mass*um
-                    self.pf.star_data = na.array([
+                    self.pf.star_data = np.array([
                         self.pf.star_position[:,0],
                         self.pf.star_position[:,1],
                         self.pf.star_position[:,2],
@@ -657,7 +656,7 @@
             init = self.pf.particle_position.shape[0]
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
-            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #pos = np.vstack((na.arange(pos.shape[0]),pos.T)).T 
         for gi,g in enumerate(grids):    
             self.grids[gi]=g
                     
@@ -669,7 +668,7 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
             pb.update(gi)
@@ -1121,15 +1120,15 @@
     #we may get negative indices or indices outside this grid
     #mask them out
     exp = domain_dimensions*subdiv**this_grid.Level
-    lei= na.floor((pos-this_grid.LeftEdge)*exp).astype('int64')
+    lei= np.floor((pos-this_grid.LeftEdge)*exp).astype('int64')
 
     #now lookup these indices in the child index mask
     #throw out child grids = -1 and particles outside the range
     #default state is to not grid a particle
-    child_idx = na.zeros(lei.shape[0],dtype='int64')-1
+    child_idx = np.zeros(lei.shape[0],dtype='int64')-1
     #remove particles to the left or right of the grid
-    lei_out  = na.any(lei>=this_grid.ActiveDimensions,axis=1)
-    lei_out |= na.any(lei<0,axis=1)
+    lei_out  = np.any(lei>=this_grid.ActiveDimensions,axis=1)
+    lei_out |= np.any(lei<0,axis=1)
     #lookup grids for every particle except the ones to the 
     leio=lei[~lei_out]
     #child_idx[~lei_out]= \
@@ -1140,18 +1139,18 @@
     grid_indices[particle_id[mask]] = child_idx[mask]
     #the number of particles on this grid is equal to those
     #that point to -1
-    grid_particle_count[this_grid.id] = na.sum(~mask)
+    grid_particle_count[this_grid.id] = np.sum(~mask)
     grids_done +=1
     if logger:
         logger.update(grids_done)
 
-    for child_grid_index in na.unique(this_grid.child_index_mask):
+    for child_grid_index in np.unique(this_grid.child_index_mask):
         if child_grid_index == -1: 
             continue
         if grids[child_grid_index].Level == max_level:
             continue
         mask = child_idx == child_grid_index
-        if na.sum(mask)==0:continue
+        if np.sum(mask)==0:continue
         grid_indices,grid_particle_count,grids_done = \
         particle_assignment(grids,grids[child_grid_index],
                 pos[mask],particle_id[mask],



https://bitbucket.org/yt_analysis/yt/changeset/559e3b8e5168/
changeset:   559e3b8e5168
branch:      yt
user:        Christopher Moody
date:        2012-10-13 01:09:10
summary:     na->np
affected #:  1 file

diff -r 2eea261445644edc38977860266ce696d8ce9717 -r 559e3b8e51680c4090dd329ea86a60bc8fcd2821 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -257,7 +257,7 @@
     #le = le/2**(root_level-1-level)-1
 
     #try to find the root_level first
-    root_level=na.floor(na.log2(le.max()*1.0/coarse_grid))
+    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
     root_level = root_level.astype('int64')
 
     #try without the -1
@@ -450,11 +450,11 @@
 
 def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
     #stars are formed in lumps; spread out the ages linearly
-    da= na.diff(ages)
-    assert na.all(da<=0)
+    da= np.diff(ages)
+    assert np.all(da<=0)
     #ages should always be decreasing, and ordered so
-    agesd = na.zeros(ages.shape)
-    idx, = na.where(da<0)
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da<0)
     idx+=1 #mark the right edges
     #spread this age evenly out to the next age
     lidx=0
@@ -463,7 +463,7 @@
         n = i-lidx #n stars affected
         rage = ages[i]
         lage = max(rage-spread,0.0)
-        agesd[lidx:i]=na.linspace(lage,rage,n)
+        agesd[lidx:i]=np.linspace(lage,rage,n)
         lidx=i
         #lage=rage
         if logger: logger(i)
@@ -472,5 +472,5 @@
     n = i-lidx #n stars affected
     rage = ages[i]
     lage = max(rage-spread,0.0)
-    agesd[lidx:i]=na.linspace(lage,rage,n)
+    agesd[lidx:i]=np.linspace(lage,rage,n)
     return agesd



https://bitbucket.org/yt_analysis/yt/changeset/17e4d917be49/
changeset:   17e4d917be49
branch:      yt
user:        Christopher Moody
date:        2012-10-13 01:34:32
summary:     Removed total particles from rockstar.py; it's updated in the cython code
Finishing particles in ART
affected #:  3 files

diff -r 559e3b8e51680c4090dd329ea86a60bc8fcd2821 -r 17e4d917be49884ab6acb6d7e938102e72945dec yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -113,9 +113,12 @@
         if self.comm.size > 1: 
             self.comm.barrier()            
         tpf = ts.__iter__().next()
-        dd = tpf.h.all_data()
-        total_particles = na.sum(dd['particle_type']==dm_type).astype('int64')
-        mylog.info("Found %i halo particles",total_particles)
+        def _particle_count(field,data):
+            return (data["particle_type"]==0).sum()
+        add_field("particle_count",function=_particle_count,particle_type=True)
+        #d = tpf.h.all_data()
+        #total_particles = dd.quantities['TotalQuantity']("particle_count")
+        #mylog.info("Found %i halo particles",total_particles)
         self.total_particles = -1
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
@@ -170,7 +173,8 @@
             raise NotImplementedError
         self._get_hosts()
         self.handler.setup_rockstar(self.server_address, self.port,
-                    len(self.ts), self.total_particles, self.dm_type,
+                    len(self.ts), #self.total_particles, 
+                    self.dm_type,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,


diff -r 559e3b8e51680c4090dd329ea86a60bc8fcd2821 -r 17e4d917be49884ab6acb6d7e938102e72945dec yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -310,7 +310,7 @@
         self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
-                       int num_snaps, np.int64_t total_particles,
+                       int num_snaps, #np.int64_t total_particles,
                        int dm_type,
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
@@ -343,7 +343,7 @@
         NUM_WRITERS = num_writers
         NUM_BLOCKS = num_readers
         MIN_HALO_OUTPUT_SIZE=min_halo_size
-        TOTAL_PARTICLES = total_particles
+        #TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
         
         tpf = self.ts[0]


diff -r 559e3b8e51680c4090dd329ea86a60bc8fcd2821 -r 17e4d917be49884ab6acb6d7e938102e72945dec yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -127,8 +127,6 @@
             'particle_position_z':self.particle_position_z,
             'particle_age':self.particle_age,
             'particle_mass':self.particle_mass,
-            'particle_mass_initial':self.particle_mass_initial,
-            'particle_metallicity':self.particle_metallicity,
             'particle_velocity_x':self.particle_velocity_x,
             'particle_velocity_y':self.particle_velocity_y,
             'particle_velocity_z':self.particle_velocity_z,
@@ -142,7 +140,7 @@
             'star_velocity_y':self.star_velocity_y,
             'star_velocity_z':self.star_velocity_z,
             'star_age':self.star_age,
-            'star_metallicity':self.star_metallicity1 + grid.star_metallicity2,
+            'star_metallicity':self.star_metallicity1 + self.star_metallicity2,
             'star_metallicity1':self.star_metallicity1,
             'star_metallicity2':self.star_metallicity2,
             'star_mass_initial':self.star_mass_initial,
@@ -531,6 +529,7 @@
                     npa = clspecies[self.pf.only_particle_type]
                     npb = clspecies[self.pf.only_particle_type+1]
             nparticles = npb-npa
+            npt = nparticles
             #make sure we aren't going to throw out good particles
             if not np.all(self.pf.particle_position[npb:]==0.0):
                 print 'WARNING: unused particles discovered from lspecies'
@@ -544,14 +543,14 @@
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = np.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity  = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
+            self.pf.particle_type         = np.zeros(nparticles,dtype='uint8')
+            self.pf.particle_mass         = np.zeros(nparticles,dtype='float64')
+            self.pf.particle_mass_initial = np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_creation_time= np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_metallicity  = np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_metallicity1 = np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_metallicity2 = np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_age          = np.zeros(nparticles,dtype='float64')-1
 
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
@@ -597,7 +596,7 @@
             do_stars = (self.pf.only_particle_type is None) or \
                        (self.pf.only_particle_type == -1) or \
                        (self.pf.only_particle_type == len(lspecies))
-            self.pf.do_stars = do_stars           
+            self.pf.do_stars = False
             if self.pf.file_star_data and do_stars: 
                 nstars_pa = nstars
                 (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \



https://bitbucket.org/yt_analysis/yt/changeset/0ab0943550ef/
changeset:   0ab0943550ef
branch:      yt
user:        Christopher Moody
date:        2012-10-13 01:36:09
summary:     removed commented out lines
affected #:  1 file

diff -r 17e4d917be49884ab6acb6d7e938102e72945dec -r 0ab0943550ef92ea282da807cefea68558aebeea yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -116,9 +116,6 @@
         def _particle_count(field,data):
             return (data["particle_type"]==0).sum()
         add_field("particle_count",function=_particle_count,particle_type=True)
-        #d = tpf.h.all_data()
-        #total_particles = dd.quantities['TotalQuantity']("particle_count")
-        #mylog.info("Found %i halo particles",total_particles)
         self.total_particles = -1
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 



https://bitbucket.org/yt_analysis/yt/changeset/52dca3a2655a/
changeset:   52dca3a2655a
branch:      yt
user:        Christopher Moody
date:        2012-10-19 04:31:25
summary:     io to a dict field
affected #:  2 files

diff -r 0ab0943550ef92ea282da807cefea68558aebeea -r 52dca3a2655ac496ba65529ba8468ae92e7aed86 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -120,32 +120,6 @@
         self.star_mass_initial = np.array([])
         self.star_mass = np.array([])
 
-        self.field_dict = { 'particle_index': self.particle_id,
-            'particle_type':self.particle_type,
-            'particle_position_x':self.particle_position_x,
-            'particle_position_y':self.particle_position_y,
-            'particle_position_z':self.particle_position_z,
-            'particle_age':self.particle_age,
-            'particle_mass':self.particle_mass,
-            'particle_velocity_x':self.particle_velocity_x,
-            'particle_velocity_y':self.particle_velocity_y,
-            'particle_velocity_z':self.particle_velocity_z,
-            
-            #stellar fields
-            'star_position_x':self.star_position_x,
-            'star_position_y':self.star_position_y,
-            'star_position_z':self.star_position_z,
-            'star_mass':self.star_mass,
-            'star_velocity_x':self.star_velocity_x,
-            'star_velocity_y':self.star_velocity_y,
-            'star_velocity_z':self.star_velocity_z,
-            'star_age':self.star_age,
-            'star_metallicity':self.star_metallicity1 + self.star_metallicity2,
-            'star_metallicity1':self.star_metallicity1,
-            'star_metallicity2':self.star_metallicity2,
-            'star_mass_initial':self.star_mass_initial,
-            'star_mass':self.star_mass}
-         
         #if child_mask is not None:
         #    self._set_child_mask(child_mask)
 


diff -r 0ab0943550ef92ea282da807cefea68558aebeea -r 52dca3a2655ac496ba65529ba8468ae92e7aed86 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -121,7 +121,32 @@
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        #This will be cleaned up later
+        field_dict = { 'particle_index': grid.particle_id,
+            'particle_type':grid.particle_type,
+            'particle_position_x':grid.particle_position_x,
+            'particle_position_y':grid.particle_position_y,
+            'particle_position_z':grid.particle_position_z,
+            'particle_age':grid.particle_age,
+            'particle_mass':grid.particle_mass,
+            'particle_velocity_x':grid.particle_velocity_x,
+            'particle_velocity_y':grid.particle_velocity_y,
+            'particle_velocity_z':grid.particle_velocity_z,
+            
+            #stellar fields
+            'star_position_x':grid.star_position_x,
+            'star_position_y':grid.star_position_y,
+            'star_position_z':grid.star_position_z,
+            'star_mass':grid.star_mass,
+            'star_velocity_x':grid.star_velocity_x,
+            'star_velocity_y':grid.star_velocity_y,
+            'star_velocity_z':grid.star_velocity_z,
+            'star_age':grid.star_age,
+            'star_metallicity':grid.star_metallicity1 + grid.star_metallicity2,
+            'star_metallicity1':grid.star_metallicity1,
+            'star_metallicity2':grid.star_metallicity2,
+            'star_mass_initial':grid.star_mass_initial,
+            'star_mass':grid.star_mass}
+         
         return grid.field_dict[field]
 
         



https://bitbucket.org/yt_analysis/yt/changeset/ede4ebb071e5/
changeset:   ede4ebb071e5
branch:      yt
user:        Christopher Moody
date:        2012-10-19 05:05:11
summary:     fixed up field access in grids
affected #:  1 file

diff -r 52dca3a2655ac496ba65529ba8468ae92e7aed86 -r ede4ebb071e5d17d10fadbe546191c17336c6eb8 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -146,8 +146,14 @@
             'star_metallicity2':grid.star_metallicity2,
             'star_mass_initial':grid.star_mass_initial,
             'star_mass':grid.star_mass}
-         
-        return grid.field_dict[field]
+        starfield = field.replace('particle','star')
+        psi = grid.pf.particle_star_index
+        if field not in field_dict.keys() and starfield in field_dict.keys():
+            particle_field = np.zeros(grid.particle_mass.shape)                    
+            particle_field[grid.particle_id==psi]=field_dict[starfield]
+            return particle_field
+        else:
+            return field_dict[field]
 
         
     def _read_data_set(self, grid, field):



https://bitbucket.org/yt_analysis/yt/changeset/533c263cacf7/
changeset:   533c263cacf7
branch:      yt
user:        Christopher Moody
date:        2012-10-19 05:09:08
summary:     na->np
affected #:  1 file

diff -r ede4ebb071e5d17d10fadbe546191c17336c6eb8 -r 533c263cacf791e49e3afb53ea54402595fb3270 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -174,7 +174,7 @@
     dd /= data.pf.conversion_factors["Density"]
     tr = dg/dd*data.pf.tr
     #ghost cells have zero density?
-    tr[na.isnan(tr)] = 0.0
+    tr[np.isnan(tr)] = 0.0
     #dd[di] = -1.0
     #if data.id==460:
     #tr[di] = -1.0 #replace the zero-density points with zero temp
@@ -247,11 +247,11 @@
 #Derived particle fields
 
 def mass_dm(field, data):
-    tr = na.ones(data.ActiveDimensions, dtype='float32')
+    tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
-    if na.sum(idx)>0:
+    if np.sum(idx)>0:
         tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
         tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
         print tr.shape
@@ -265,7 +265,7 @@
         projection_conversion="1")
 
 def _spdensity(field, data):
-    grid_mass = na.zeros(data.ActiveDimensions, dtype='float32')
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.star_mass.shape[0] ==0 : return grid_mass 
     amr_utils.CICDeposit_3(data.star_position_x,
                            data.star_position_y,
@@ -273,16 +273,16 @@
                            data.star_mass.astype('float32'),
                            data.star_mass.shape[0],
                            grid_mass, 
-                           na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+                           np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return grid_mass 
 
 #add_field("star_density", function=_spdensity,
 #          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _simple_density(field,data):
-    mass = na.sum(data.star_mass)
+    mass = np.sum(data.star_mass)
     volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
     return mass/volume
 



https://bitbucket.org/yt_analysis/yt/changeset/7963775e8a8a/
changeset:   7963775e8a8a
branch:      yt
user:        Christopher Moody
date:        2012-10-19 05:29:34
summary:     the total metal density should be a sum of SNIa and SNII
affected #:  1 file

diff -r 533c263cacf791e49e3afb53ea54402595fb3270 -r 7963775e8a8a55718f4a81fb08e0d89cf1df51f0 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -235,7 +235,7 @@
 
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
-    tr = data["MetalDensitySNII"]
+    tr += data["MetalDensitySNII"]
     return tr
 add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Metal_Density"]._units = r""



https://bitbucket.org/yt_analysis/yt/changeset/c4721d6438db/
changeset:   c4721d6438db
branch:      yt
user:        Christopher Moody
date:        2012-10-23 09:30:19
summary:     re-enabled stars
affected #:  1 file

diff -r 7963775e8a8a55718f4a81fb08e0d89cf1df51f0 -r c4721d6438dbf48acbb885badeb932316cf85f48 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -570,7 +570,7 @@
             do_stars = (self.pf.only_particle_type is None) or \
                        (self.pf.only_particle_type == -1) or \
                        (self.pf.only_particle_type == len(lspecies))
-            self.pf.do_stars = False
+            self.pf.do_stars = do_stars 
             if self.pf.file_star_data and do_stars: 
                 nstars_pa = nstars
                 (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \



https://bitbucket.org/yt_analysis/yt/changeset/3d857fa07ccd/
changeset:   3d857fa07ccd
branch:      yt
user:        Christopher Moody
date:        2012-10-23 09:30:53
summary:     fixed star access in IO
affected #:  1 file

diff -r c4721d6438dbf48acbb885badeb932316cf85f48 -r 3d857fa07ccd00d36a6d5dd1dd61af7243563b97 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -150,7 +150,7 @@
         psi = grid.pf.particle_star_index
         if field not in field_dict.keys() and starfield in field_dict.keys():
             particle_field = np.zeros(grid.particle_mass.shape)                    
-            particle_field[grid.particle_id==psi]=field_dict[starfield]
+            particle_field[grid.particle_type==psi]=field_dict[starfield]
             return particle_field
         else:
             return field_dict[field]



https://bitbucket.org/yt_analysis/yt/changeset/149daa338564/
changeset:   149daa338564
branch:      yt
user:        Christopher Moody
date:        2012-10-23 09:31:57
summary:     now imposes a super oct tree structure if you'd like to export more than a single root cell across. cleaned up old comments
affected #:  1 file

diff -r 3d857fa07ccd00d36a6d5dd1dd61af7243563b97 -r 149daa3385640c99c73a2f712ad5790269a92cc7 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -75,7 +75,6 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-
     fc = np.array(fc)
     fwidth = np.array(fwidth)
     
@@ -192,16 +191,22 @@
     return domains_list
 
 def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
-    add_fields() #add the metal mass field that sunrise wants
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
+    try:
+        dd['MetalMass']
+    except KeyError:
+        add_fields() #add the metal mass field that sunrise wants
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
     
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    if dd is None:
-        #we keep passing dd around to not regenerate the data all the time
-        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -249,6 +254,7 @@
     output   = np.zeros((o_length,len(fields)), dtype='float64')
     refined  = np.zeros(r_length, dtype='int32')
     levels   = np.zeros(r_length, dtype='int32')
+    ids      = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -257,7 +263,7 @@
             c = center*pf['kpc']
         else:
             c = ile*1.0/pf.domain_dimensions*pf['kpc']
-        printing = lambda x: print_oct(x,pf['kpc'],c)
+        printing = lambda x: print_oct(x)
     else:
         printing = None
     pbar = get_pbar("Building Hilbert DFO octree",len(refined))
@@ -269,6 +275,7 @@
             output,refined,levels,
             grids,
             start_level,
+            ids,
             debug=printing,
             tracker=pbar)
     pbar.finish()
@@ -286,6 +293,7 @@
     ci = data['cell_index']
     l  = data['level']
     g  = data['grid']
+    o  = g.offset
     fle = g.left_edges+g.dx*ci
     fre = g.left_edges+g.dx*(ci+1)
     if nd is not None:
@@ -294,13 +302,14 @@
         if nc is not None:
             fle -= nc
             fre -= nc
-    txt  = '%1i '
-    txt += '%1.3f '*3+'- '
-    txt += '%1.3f '*3
+    txt  = '%+1i '
+    txt += '%+1i '
+    txt += '%+1.3f '*3+'- '
+    txt += '%+1.3f '*3
     if l<2:
-        print txt%((l,)+tuple(fle)+tuple(fre))
+        print txt%((l,)+(o,)+tuple(fle)+tuple(fre))
 
-def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index]
                             pos, #the output hydro data position and refinement position
                             grid,  #grid that this oct lives on (not its children)
                             hilbert,  #the hilbert state
@@ -309,6 +318,7 @@
                             levels, #For a given Oct, what is the level
                             grids, #list of all patch grids available to us
                             level, #starting level of the oct (not the children)
+                            ids, #record the oct ID
                             debug=None,tracker=True):
     if tracker is not None:
         if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
@@ -316,9 +326,10 @@
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    levels[pos.output_pos]  = level
+    levels[pos.refined_pos]  = level
     is_leaf = (child_grid_index==-1) and (level>0)
     refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
+    ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf
     pos.refined_pos+= 1 
     if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
@@ -340,13 +351,16 @@
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + na.array(vertex)*2**(-level)
+                #child_ile = cell_index + np.array(vertex)*2**(-level)
+                child_ile = cell_index + np.array(vertex)*2**(-(level+1))
+                child_ile = child_ile.astype('int')
             else:
                 child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
 
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                subgrid,hilbert_child,output,refined,levels,grids,level+1,
+                subgrid,hilbert_child,output,refined,levels,grids,
+                level+1,ids = ids,
                 debug=debug,tracker=tracker)
 
 
@@ -362,8 +376,6 @@
     for i,a in enumerate('xyz'):
         st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
         st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
@@ -524,8 +536,7 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
-        #metallicity *=0.0198
-        #print 'WARNING: multiplying metallicirt by 0.0198'
+        assert np.all(metallicity>0.0)
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
     formation_time = pf.current_time*pf['years']-age
@@ -540,13 +551,10 @@
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
     col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
-    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -565,10 +573,8 @@
         
     def _convMetalMass(data):
         return 1.0
-    
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
-
     def _initial_mass_cen_ostriker(field, data):
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
@@ -585,9 +591,6 @@
 
     add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-    def _temp_times_mass(field, data):
-        return data["Temperature"]*data["CellMassMsun"]
-    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
 class position:
     def __init__(self):



https://bitbucket.org/yt_analysis/yt/changeset/0598f1fd62de/
changeset:   0598f1fd62de
branch:      yt
user:        Christopher Moody
date:        2012-11-01 01:46:10
summary:     Velocity fields did not have appropriate arguments of field,data
affected #:  1 file

diff -r 149daa3385640c99c73a2f712ad5790269a92cc7 -r 0598f1fd62de714e524f131ffbd24cd016cb181a yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -211,21 +211,21 @@
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(data):
+def _x_velocity(field,data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
 add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(data):
+def _y_velocity(field,data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
 add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(data):
+def _z_velocity(field,data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
 add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
@@ -244,6 +244,11 @@
 
 #Particle fields
 
+def ParticleMass(field,data):
+    return data['particle_mass']
+add_field("ParticleMass",function=ParticleMass,units=r"\rm{g}",particle_type=True)
+
+
 #Derived particle fields
 
 def mass_dm(field, data):



https://bitbucket.org/yt_analysis/yt/changeset/8cd61c83df97/
changeset:   8cd61c83df97
branch:      yt
user:        Christopher Moody
date:        2012-11-07 23:06:06
summary:     added basic particle fields
affected #:  1 file

diff -r 0598f1fd62de714e524f131ffbd24cd016cb181a -r 8cd61c83df970c40509819853f49bda2dfb78d91 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -60,6 +60,7 @@
 #Density
 #Temperature
 #metallicities
+#MetalDensity SNII + SNia
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -67,7 +68,6 @@
 #Pressure
 #Gamma
 #GasEnergy
-#MetalDensity SNII + SNia
 #Potentials
 #xyzvelocity
 
@@ -232,7 +232,6 @@
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
@@ -251,6 +250,17 @@
 
 #Derived particle fields
 
+def ParticleMassMsun(field,data):
+    return data['particle_mass']*data.pf['Msun']
+add_field("ParticleMassMsun",function=ParticleMassMsun,units=r"\rm{g}",particle_type=True)
+
+def _creation_time(field,data):
+    pa = data["particle_age"]
+    tr = np.zeros(pa.shape,dtype='float')-1.0
+    tr[pa>0] = pa[pa>0]
+    return tr
+add_field("creation_time",function=_creation_time,units=r"\rm{s}",particle_type=True)
+
 def mass_dm(field, data):
     tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5



https://bitbucket.org/yt_analysis/yt/changeset/6f3d3274c898/
changeset:   6f3d3274c898
branch:      yt
user:        Christopher Moody
date:        2012-11-07 23:07:14
summary:     can now export across multiple cells
removed camera positions / incorrect math went it
affected #:  1 file

diff -r 8cd61c83df970c40509819853f49bda2dfb78d91 -r 6f3d3274c89811485f3af55179c9fc0b39e371f8 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,9 +33,6 @@
 
 import time
 import numpy as np
-import numpy.linalg as linalg
-import collections
-
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
 from yt.data_objects.universal_fields import add_field
@@ -471,6 +468,7 @@
             #quit if idxq is true:
             idxq = idx[0]>0 and np.all(idx==idx[0])
             out  = np.all(fle>cfle) and np.all(fre<cfre) 
+            out &= abs(np.log2(idx[0])-np.rint(np.log2(idx[0])))<1e-5 #nwide should be a power of 2
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
@@ -680,254 +678,3 @@
         j+=1
         yield vertex, self.descend(j)
 
-def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
-    if cameraset is None:
-        cameraset =cameraset_vertex 
-    campos =[]
-    names = []
-    dd = pf.h.all_data()
-    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
-        kwargs['scene_position']=scene_pos
-        kwargs['scene_up']=scene_up
-        kwargs['scene_rot']=scene_rot
-        kwargs['dd']=dd
-        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
-        campos += line,
-        names += name,
-    return names,campos     
-
-def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
-                                     sim_sphere_radius=None,sim_halo_radius=None,
-                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
-                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
-                                     dd=None):
-    """Translate the simulation to center on sim_center, 
-    then rotate such that sim_up is along the +z direction. Then we are in the 
-    'scene' basis coordinates from which scene_up and scene_offset are defined.
-    Then a position vector, direction vector, up vector and angular field of view
-    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
-    The angular field of view is in radians. The 10 numbers should match the inputs to
-    camera_positions in Sunrise.
-    """
-
-    sim_center = np.array(sim_center)
-    if sim_sphere_radius is None:
-        sim_sphere_radius = 10.0/pf['kpc']
-    if sim_axis_short is None:
-        if dd is None:
-            dd = pf.h.all_data()
-        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
-        mas = dd["particle_mass"]
-        pos = pos[idx]
-        mas = mas[idx]
-        mo_inertia = position_moment(pos,mas)
-        eigva, eigvc = linalg.eig(mo_inertia)
-        #order into short, long axes
-        order = eigva.real.argsort()
-        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
-    else:
-        ax_short = sim_axis_short
-        ax_long  = sim_axis_long
-    if sim_halo_radius is None:
-        sim_halo_radius = 200.0/pf['kpc']
-    if scene_distance is  None:
-        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
-    if scene_fov is None:
-        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
-        #idx= radii < sim_halo_radius*0.10
-        #radii = radii[idx]
-        #mass  = mas[idx] #copying mass into mas
-        si = np.argsort(radii)
-        radii = radii[si]
-        mass  = mas[si]
-        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
-        re = radii[idx[0]]
-        scene_fov = 5*re
-        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
-        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
-    #find rotation matrix
-    angles=find_half_euler_angles(ax_short,ax_long)
-    rotation  = euler_matrix(*angles)
-    irotation = numpy.linalg.inv(rotation)
-    axs = (ax_short,ax_med,ax_long)
-    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
-    axs = ([1,0,0],[0,1,0],[0,0,1])
-    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
-    
-    #rotate the camera
-    if scene_rot :
-        irotation = np.eye(3)
-    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
-    sunrise_up  = matmul(irotation,scene_up)
-    sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
-
-    #change to physical kpc
-    sunrise_pos *= pf['kpc']
-    sunrise_direction *= pf['kpc']
-    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
-
-def matmul(m, v):
-    """Multiply a matrix times a set of vectors, or a single vector.
-    My nPart x nDim convention leads to two transpositions, which is
-    why this is hidden away in a function.  Note that if you try to
-    use this to muliply two matricies, it will think that you're
-    trying to multiply by a set of vectors and all hell will break
-    loose."""    
-    assert type(v) is not np.matrix
-    v = np.asarray(v)
-    m, vs = [np.asmatrix(a) for a in (m, v)]
-
-    result = np.asarray(np.transpose(m * np.transpose(vs)))    
-    if len(v.shape) == 1:
-        return result[0]
-    return result
-
-
-def mag(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return np.sqrt( (vs**2).sum() )
-    return np.sqrt( (vs**2).sum(axis=1) )
-
-def mag2(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return (vs**2).sum()
-    return (vs**2).sum(axis=1)
-
-
-def position_moment(rs, ms=None, axes=None):
-    """Find second position moment tensor.
-    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = np.asarray(rs)
-    Npart, N = rs.shape
-    if ms is None: ms = np.ones(Npart)
-    else: ms = np.asarray(ms)    
-    if axes is not None:
-        axes = np.asarray(axes,dtype=float64)
-        axes = axes/axes.max()
-        norms2 = mag2(rs/axes)
-    else:
-        norms2 = np.ones(Npart)
-    M = ms.sum()
-    result = np.zeros((N,N))
-    # matrix is symmetric, so only compute half of it then fill in the
-    # other half
-    for i in range(N):
-        for j in range(i+1):
-            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
-        
-    result = result + result.transpose() - np.identity(N)*result
-    return result
-    
-
-
-def find_half_euler_angles(v,w,check=True):
-    """Find the passive euler angles that will make v lie along the z
-    axis and w lie along the x axis.  v and w are uncertain up to
-    inversions (ie, eigenvectors) so this routine removes degeneracies
-    associated with that
-
-    (old) Calculate angles to bring a body into alignment with the
-    coordinate system.  If v1 is the SHORTEST axis and v2 is the
-    LONGEST axis, then this will return the angle (Euler angles) to
-    make the long axis line up with the x axis and the short axis line
-    up with the x (z) axis for the 2 (3) dimensional case."""
-    # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: np.sqrt(np.sum(x**2.0))
-    v = v/mag(v)
-    w = w/mag(w)    
-    if check:
-        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
-
-    # Break eigenvector scaling degeneracy by forcing it to have a positive
-    # z component
-    if v[2] < 0: v = -v
-    phi,theta = find_euler_phi_theta(v)
-
-    # Rotate w according to phi,theta and then break inversion
-    # degeneracy by requiring that resulting vector has positive
-    # x component
-    w_prime = euler_passive(w,phi,theta,0.)
-    if w_prime[0] < 0: w_prime = -w_prime
-    # Now last Euler angle should just be this:
-    psi = np.arctan2(w_prime[1],w_prime[0])
-    return phi, theta, psi
-
-def find_euler_phi_theta(v):
-    """Find (passive) euler angles that will make v point in the z
-    direction"""
-    # Make sure the vector is normalized
-    v = v/mag(v)
-    theta = np.arccos(v[2])
-    phi = np.arctan2(v[0],-v[1])
-    return phi,theta
-
-def euler_matrix(phi, the, psi):
-    """Make an Euler transformation matrix"""
-    cpsi=np.cos(psi)
-    spsi=np.sin(psi)
-    cphi=np.cos(phi)
-    sphi=np.sin(phi)
-    cthe=np.cos(the)
-    sthe=np.sin(the)
-    m = np.mat(np.zeros((3,3)))
-    m[0,0] = cpsi*cphi - cthe*sphi*spsi
-    m[0,1] = cpsi*sphi + cthe*cphi*spsi
-    m[0,2] = spsi*sthe
-    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
-    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
-    m[1,2] = cpsi*sthe
-    m[2,0] = sthe*sphi
-    m[2,1] = -sthe*cphi
-    m[2,2] = cthe
-    return m
-
-def euler_passive(v, phi, the, psi):
-    """Passive Euler transform"""
-    m = euler_matrix(phi, the, psi)
-    return matmul(m,v)
-
-
-#the format for these camerasets is name,up vector,camera location, 
-#rotate to the galaxy's up direction?
-cameraset_compass = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
-    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
-    ])
-
-cameraset_vertex = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
-    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
-    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
-    ])
-
-#up is 45deg down from z, towards north
-#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
-#up is -45deg down from z, towards north
-
-cameraset_ring = collections.OrderedDict()
-
-segments = 20
-for angle in np.linspace(0,360,segments):
-    pos = [np.cos(angle),0.,np.sin(angle)]
-    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
-    cameraset_ring['02i'%angle]=(pos,vc)
-            
-
-



https://bitbucket.org/yt_analysis/yt/changeset/f41ffc7702a0/
changeset:   f41ffc7702a0
branch:      yt
user:        Christopher Moody
date:        2012-11-07 23:08:18
summary:     total particles actually set
affected #:  1 file

diff -r 6f3d3274c89811485f3af55179c9fc0b39e371f8 -r f41ffc7702a092bda67920e238635329f25a13fd yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -46,7 +46,7 @@
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,dm_type=1):
+            outbase=None,particle_mass=-1.0,dm_type=1,force_res=None):
         r"""Spawns the Rockstar Halo finder, distributes dark matter
         particles and finds halos.
 
@@ -78,6 +78,9 @@
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
+        force_res: None
+            The default force resolution is 0.0012 comoving Mpc/H
+            This overrides Rockstars' defaults
 
         Returns
         -------
@@ -113,10 +116,10 @@
         if self.comm.size > 1: 
             self.comm.barrier()            
         tpf = ts.__iter__().next()
-        def _particle_count(field,data):
-            return (data["particle_type"]==0).sum()
-        add_field("particle_count",function=_particle_count,particle_type=True)
-        self.total_particles = -1
+        dd = tpf.h.all_data()
+        total_particles = na.sum(dd['particle_type']==dm_type).astype('int64')
+        mylog.info("Found %i halo particles",total_particles)
+        self.total_particles = total_particles
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
@@ -134,6 +137,7 @@
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
         data_source = tpf.h.all_data()
         self.comm.barrier()
+        self.force_res = force_res
         self.handler = rockstar_interface.RockstarInterface(
                 self.ts, data_source)
 
@@ -170,14 +174,14 @@
             raise NotImplementedError
         self._get_hosts()
         self.handler.setup_rockstar(self.server_address, self.port,
-                    len(self.ts), #self.total_particles, 
-                    self.dm_type,
+                    len(self.ts), self.total_particles, self.dm_type,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
+                    force_res=self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
         #because rockstar *always* write to exactly the same



https://bitbucket.org/yt_analysis/yt/changeset/b4617fbc3b18/
changeset:   b4617fbc3b18
branch:      yt
user:        Christopher Moody
date:        2012-11-07 23:09:16
summary:     can now set the force resolution
affected #:  1 file

diff -r f41ffc7702a092bda67920e238635329f25a13fd -r b4617fbc3b189ff172593a6e5b62c0487f91f46a yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -310,20 +310,23 @@
         self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
-                       int num_snaps, #np.int64_t total_particles,
+                       int num_snaps, np.int64_t total_particles,
                        int dm_type,
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, 
+                       int periodic = 1, force_res=None,
                        int min_halo_size = 25, outbase = "None"):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-        global OVERLAP_LENGTH
+        global OVERLAP_LENGTH, FORCE_RES
+        if force_res is not None:
+            FORCE_RES=np.float64(force_res)
+            print "set force res to ",FORCE_RES
         OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
@@ -343,7 +346,7 @@
         NUM_WRITERS = num_writers
         NUM_BLOCKS = num_readers
         MIN_HALO_OUTPUT_SIZE=min_halo_size
-        #TOTAL_PARTICLES = total_particles
+        TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
         
         tpf = self.ts[0]



https://bitbucket.org/yt_analysis/yt/changeset/abc2a575b177/
changeset:   abc2a575b177
branch:      yt
user:        Christopher Moody
date:        2012-11-07 23:20:15
summary:     added particle count field
affected #:  1 file

diff -r b4617fbc3b189ff172593a6e5b62c0487f91f46a -r abc2a575b1777742cc0782ed93ef55b34b9fc6f2 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -117,9 +117,6 @@
             self.comm.barrier()            
         tpf = ts.__iter__().next()
         dd = tpf.h.all_data()
-        total_particles = na.sum(dd['particle_type']==dm_type).astype('int64')
-        mylog.info("Found %i halo particles",total_particles)
-        self.total_particles = total_particles
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
@@ -138,6 +135,12 @@
         data_source = tpf.h.all_data()
         self.comm.barrier()
         self.force_res = force_res
+        def _pcount(field,data):
+            return (data["particle_type"]=dm_type).sum()
+        add_field("pcount",function=_pcount,particle_type=True)
+        total_particles = dd.quantities['TotalQuantity']('pcount')
+        self.total_particles = total_particles
+        mylog.info("Found %i halo particles",total_particles)
         self.handler = rockstar_interface.RockstarInterface(
                 self.ts, data_source)
 



https://bitbucket.org/yt_analysis/yt/changeset/b25bf1498a9a/
changeset:   b25bf1498a9a
branch:      yt
user:        juxtaposicion
date:        2012-11-26 20:06:14
summary:     Merge
affected #:  9 files

diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r b25bf1498a9af53e11e77415fbdb86b3c6102378 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -26,141 +26,23 @@
 from yt.mods import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
+
 from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
-from yt.config import ytcfg
-
 import rockstar_interface
-
 import socket
 import time
-import threading
-import signal
-import os
-from os import environ
-from os import mkdir
-from os import path
 
-# Get some definitions from Rockstar directly.
-if "ROCKSTAR_DIR" in os.environ:
-    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
-elif os.path.exists("rockstar.cfg"):
-    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
-else:
-    print "Reading Rockstar location from rockstar.cfg failed."
-    print "Please place the base directory of your"
-    print "Rockstar install in rockstar.cfg and restart."
-    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
-    sys.exit(1)
-lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
-READER_TYPE = None
-WRITER_TYPE = None
-for line in lines:
-    if "READER_TYPE" in line:
-        line = line.split()
-        READER_TYPE = int(line[-1])
-    if "WRITER_TYPE" in line:
-        line = line.split()
-        WRITER_TYPE = int(line[-1])
-    if READER_TYPE != None and WRITER_TYPE != None:
-        break
-lines.close()
+class DomainDecomposer(ParallelAnalysisInterface):
+    def __init__(self, pf, comm):
+        ParallelAnalysisInterface.__init__(self, comm=comm)
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
 
-class InlineRunner(ParallelAnalysisInterface):
-    def __init__(self, num_writers):
-        # If this is being run inline, num_readers == comm.size, always.
-        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
-        if num_writers is None:
-            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-
-    def split_work(self, pool):
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're inline, everyone is a reader.
-        self.readers = avail[:]
-        if self.num_writers == pool.comm.size:
-            # And everyone is a writer!
-            self.writers = avail[:]
-        else:
-            # Everyone is not a writer.
-            # Cyclically assign writers which should approximate
-            # memory load balancing (depending on the mpirun call,
-            # but this should do it in most cases).
-            stride = int(ceil(float(pool.comm.size) / self.num_writers))
-            while len(self.writers) < self.num_writers:
-                self.writers.extend(avail[::stride])
-                for r in readers:
-                    avail.pop(avail.index(r))
-
-    def run(self, handler, pool):
-        # If inline, we use forks.
-        server_pid = 0
-        # Start a server on only one machine/fork.
-        if pool.comm.rank == 0:
-            server_pid = os.fork()
-            if server_pid == 0:
-                handler.start_server()
-                os._exit(0)
-        # Start writers.
-        writer_pid = 0
-        if pool.comm.rank in self.writers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            writer_pid = os.fork()
-            if writer_pid == 0:
-                handler.start_client(WRITER_TYPE)
-                os._exit(0)
-        # Start readers, not forked.
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        # Make sure the forks are done, which they should be.
-        if writer_pid != 0:
-            os.waitpid(writer_pid, 0)
-        if server_pid != 0:
-            os.waitpid(server_pid, 0)
-
-class StandardRunner(ParallelAnalysisInterface):
-    def __init__(self, num_readers, num_writers):
-        self.num_readers = num_readers
-        if num_writers is None:
-            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
-                - num_readers - 1
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
-                "__global_parallel_size"):
-            mylog.error('%i reader + %i writers != %i mpi',
-                    self.num_readers, self.num_writers,
-                    ytcfg.getint("yt", "__global_parallel_size"))
-            raise RuntimeError
-    
-    def split_work(self, pool):
-        # Who is going to do what.
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're not running inline, rank 0 should be removed immediately.
-        avail.pop(0)
-        # Now we assign the rest.
-        for i in range(self.num_readers):
-            self.readers.append(avail.pop(0))
-        for i in range(self.num_writers):
-            self.writers.append(avail.pop(0))
-    
-    def run(self, handler, pool):
-        # Not inline so we just launch them directly from our MPI threads.
-        if pool.comm.rank == 0:
-            handler.start_server()
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        if pool.comm.rank in self.writers:
-            time.sleep(0.2 + pool.comm.rank/10.0)
-            handler.start_client(WRITER_TYPE)
+    def decompose(self):
+        dd = self.pf.h.all_data()
+        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
+        return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
@@ -183,30 +65,23 @@
             The number of reader can be increased from the default
             of 1 in the event that a single snapshot is split among
             many files. This can help in cases where performance is
-            IO-limited. Default is 1. If run inline, it is
-            equal to the number of MPI threads.
+            IO-limited. Default is 1.
         num_writers: int
             The number of writers determines the number of processing threads
             as well as the number of threads writing output data.
-            The default is set to comm.size-num_readers-1. If run inline,
-            the default is equal to the number of MPI threads.
+            The default is set comm.size-num_readers-1.
         outbase: str
             This is where the out*list files that Rockstar makes should be
-            placed. Default is 'rockstar_halos'.
+            placed. Default is str(pf)+'_rockstar'.
         particle_mass: float
             This sets the DM particle mass used in Rockstar.
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: float
-            This parameter specifies the force resolution that Rockstar uses
-            in units of Mpc/h.
-            If no value is provided, this parameter is automatically set to
-            the width of the smallest grid element in the simulation from the
-            last data snapshot (i.e. the one where time has evolved the
-            longest) in the time series:
-            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
-            
+        force_res: None
+            The default force resolution is 0.0012 comoving Mpc/H
+            This overrides Rockstars' defaults
+
         Returns
         -------
         None
@@ -218,6 +93,7 @@
 
         test_rockstar.py:
 
+        from mpi4py import MPI
         from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
         from yt.mods import *
         import sys
@@ -229,64 +105,50 @@
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()
         """
-        # Decide how we're working.
-        if ytcfg.getboolean("yt", "inline") == True:
-            self.runner = InlineRunner(num_writers)
-        else:
-            self.runner = StandardRunner(num_readers, num_writers)
-        self.num_readers = self.runner.num_readers
-        self.num_writers = self.runner.num_writers
-        mylog.info("Rockstar is using %d readers and %d writers",
-            self.num_readers, self.num_writers)
-        # Note that Rockstar does not support subvolumes.
-        # We assume that all of the snapshots in the time series
-        # use the same domain info as the first snapshots.
+        ParallelAnalysisInterface.__init__(self)
+        # No subvolume support
+        #we assume that all of the snapshots in the time series
+        #use the same domain info as the first snapshots
         if not isinstance(ts,TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
+        if self.comm.size > 1: 
+            self.comm.barrier()            
         tpf = ts.__iter__().next()
-        def _particle_count(field, data):
-            try:
-                return (data["particle_type"]==dm_type).sum()
-            except KeyError:
-                return np.prod(data["particle_position_x"].shape)
-        add_field("particle_count",function=_particle_count, not_in_all=True,
-            particle_type=True)
-        # Get total_particles in parallel.
         dd = tpf.h.all_data()
-        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
         if outbase is None:
-            outbase = 'rockstar_halos'
-        self.outbase = outbase
-        self.particle_mass = particle_mass
-        if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
-        else:
-            self.force_res = force_res
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
+            outbase = str(tpf)+'_rockstar'
+        self.outbase = outbase        
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
+        self.num_readers = num_readers
+        self.num_writers = num_writers
+        if self.num_readers + self.num_writers + 1 != self.comm.size:
+            #we need readers+writers+1 server = comm size        
+            raise RuntimeError
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        # We set up the workgroups *before* initializing
-        # ParallelAnalysisInterface. Everyone is their own workgroup!
-        self.pool = ProcessorPool()
-        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
-             self.pool.add_workgroup(size=1)
-        ParallelAnalysisInterface.__init__(self)
-        for wg in self.pool.workgroups:
-            if self.pool.comm.rank in wg.ranks:
-                self.workgroup = wg
+        data_source = tpf.h.all_data()
+        self.comm.barrier()
+        self.force_res = force_res
+        def _pcount(field,data):
+            return (data["particle_type"]=dm_type).sum()
+        add_field("pcount",function=_pcount,particle_type=True)
+        total_particles = dd.quantities['TotalQuantity']('pcount')
+        self.total_particles = total_particles
+        mylog.info("Found %i halo particles",total_particles)
         self.handler = rockstar_interface.RockstarInterface(
-                self.ts, dd)
+                self.ts, data_source)
 
     def __del__(self):
         self.pool.free_all()
 
     def _get_hosts(self):
-        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
+        if self.comm.size == 1 or self.workgroup.name == "server":
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -294,7 +156,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.pool.comm.mpi_bcast(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -302,13 +164,21 @@
         """
         
         """
+        if self.comm.size > 1:
+            self.pool = ProcessorPool()
+            mylog.debug("Num Writers = %s Num Readers = %s",
+                        self.num_writers, self.num_readers)
+            self.pool.add_workgroup(1, name = "server")
+            self.pool.add_workgroup(self.num_readers, name = "readers")
+            self.pool.add_workgroup(self.num_writers, name = "writers")
+            for wg in self.pool.workgroups:
+                if self.comm.rank in wg.ranks: self.workgroup = wg
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
         self.handler.setup_rockstar(self.server_address, self.port,
-                    len(self.ts), self.total_particles, 
-                    self.dm_type,
-                    parallel = self.pool.comm.size > 1,
+                    len(self.ts), self.total_particles, self.dm_type,
+                    parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
@@ -317,29 +187,27 @@
                     force_res=self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
-        # Make the directory to store the halo lists in.
-        if self.pool.comm.rank == 0:
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
             if not os.path.exists(self.outbase):
                 os.mkdir(self.outbase)
-            # Make a record of which dataset corresponds to which set of
-            # output files because it will be easy to lose this connection.
-            fp = open(self.outbase + '/pfs.txt', 'w')
-            fp.write("# pfname\tindex\n")
-            for i, pf in enumerate(self.ts):
-                pfloc = path.join(path.relpath(pf.fullpath), pf.basename)
-                line = "%s\t%d\n" % (pfloc, i)
-                fp.write(line)
-            fp.close()
-        # This barrier makes sure the directory exists before it might be used.
-        self.pool.comm.barrier()
-        if self.pool.comm.size == 1:
+        if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            # Split up the work.
-            self.runner.split_work(self.pool)
-            # And run it!
-            self.runner.run(self.handler, self.pool)
-        self.pool.comm.barrier()
+            self.comm.barrier()
+            if self.workgroup.name == "server":
+                self.handler.start_server()
+            elif self.workgroup.name == "readers":
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            elif self.workgroup.name == "writers":
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            self.pool.free_all()
+        self.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
@@ -347,4 +215,5 @@
         Reads in the out_0.list file and generates RockstarHaloList
         and RockstarHalo objects.
         """
-        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)
+        tpf = self.ts[0]
+        return RockstarHaloList(tpf,file_name)


diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r b25bf1498a9af53e11e77415fbdb86b3c6102378 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -29,8 +29,6 @@
 cimport cython
 from libc.stdlib cimport malloc
 
-from yt.config import ytcfg
-
 cdef import from "particle.h":
     struct particle:
         np.int64_t id
@@ -46,11 +44,11 @@
 cdef import from "config.h":
     void setup_config()
 
-cdef import from "server.h" nogil:
+cdef import from "server.h":
     int server()
 
-cdef import from "client.h" nogil:
-    void client(np.int64_t in_type)
+cdef import from "client.h":
+    void client()
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
@@ -239,54 +237,26 @@
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
-    global SCALE_NOW
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
+    global SCALE_NOW, TOTAL_PARTICLES
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
-    pf = rh.tsl.next()
-    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     block = int(str(filename).rsplit(".")[-1])
+    
+
+    # Now we want to grab data from only a subset of the grids.
     n = rh.block_ratio
-
-    all_grids = pf.h.grids
+    dd = pf.h.all_data()
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-    # Now we want to grab data from only a subset of the grids for each reader.
-    if NUM_BLOCKS == 1:
-        grids = all_grids
-    else:
-        if ytcfg.getboolean("yt", "inline") == False:
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
-        else:
-            # We must be inline, grap only the local grids.
-            grids  = [g for g in all_grids if g.proc_num ==
-                          ytcfg.getint('yt','__topcomm_parallel_rank')]
-    
-    all_fields = set(pf.h.derived_field_list + pf.h.field_list)
-
-    # First we need to find out how many this reader is going to read in
-    # if the number of readers > 1.
-    if NUM_BLOCKS > 1:
-        local_parts = 0
-        for g in grids:
-            if g.NumberOfParticles == 0: continue
-            if "particle_type" in all_fields:
-                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
-                iddm = g["particle_type"] == rh.dm_type
-            else:
-                iddm = Ellipsis
-            arri = g["particle_index"].astype("int64")
-            arri = arri[iddm] #pick only DM
-            local_parts += arri.size
-    else:
-        local_parts = TOTAL_PARTICLES
-
-    #print "local_parts", local_parts
-
-    p[0] = <particle *> malloc(sizeof(particle) * local_parts)
-
+    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
+    tnpart = 0
+    for g in grids:
+        tnpart += np.sum(dd._get_data_from_grid(g, "particle_type")==rh.dm_type)
+    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
+    #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
     left_edge[0] = pf.domain_left_edge[0]
@@ -295,12 +265,8 @@
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
-        if g.NumberOfParticles == 0: continue
-        if "particle_type" in all_fields:
-            iddm = g["particle_type"] == rh.dm_type
-        else:
-            iddm = Ellipsis
-        arri = g["particle_index"].astype("int64")
+        iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
         arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
@@ -310,13 +276,22 @@
                       "particle_position_z",
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
-            arr = g[field].astype("float64")
+            arr = dd._get_data_from_grid(g, field).astype("float64")
             arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
-    num_p[0] = local_parts
+    num_p[0] = tnpart
+    TOTAL_PARTICLES = tnpart
+    #print 'first particle coordinates'
+    #for i in range(3):
+    #    print p[0][0].pos[i],
+    #print ""
+    #print 'last particle coordinates'
+    #for i in range(3):
+    #    print p[0][tnpart-1].pos[i],
+    #print ""
 
 cdef class RockstarInterface:
 
@@ -348,10 +323,10 @@
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        global OVERLAP_LENGTH, FORCE_RES
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
-            #print "set force res to ",FORCE_RES
+            print "set force res to ",FORCE_RES
         OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
@@ -392,7 +367,6 @@
                     tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
-        rh.dm_type = dm_type
         cdef LPG func = rh_read_particles
         set_load_particles_generic(func)
 
@@ -402,9 +376,7 @@
         output_and_free_halos(0, 0, 0, NULL)
 
     def start_server(self):
-        with nogil:
-            server()
+        server()
 
-    def start_client(self, in_type):
-        in_type = np.int64(in_type)
-        client(in_type)
+    def start_client(self):
+        client()


diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r b25bf1498a9af53e11e77415fbdb86b3c6102378 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,16 +33,11 @@
 
 import time
 import numpy as np
-import numpy.linalg as linalg
-import collections
-
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
-debug = True
-
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
@@ -77,7 +72,6 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-
     fc = np.array(fc)
     fwidth = np.array(fwidth)
     
@@ -95,7 +89,7 @@
     #Create a list of the star particle properties in PARTICLE_DATA
     #Include ID, parent-ID, position, velocity, creation_mass, 
     #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+    particle_data,nstars = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
                                            dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
@@ -109,7 +103,7 @@
 
     create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    return fle,fre,ile,ire,dd,nleaf
+    return fle,fre,ile,ire,dd,nleaf,nstars
 
 def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
                                         halo_list,domains_list=None,**kwargs):
@@ -193,17 +187,23 @@
     domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
-def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
-    add_fields() #add the metal mass field that sunrise wants
+def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
+    try:
+        dd['MetalMass']
+    except KeyError:
+        add_fields() #add the metal mass field that sunrise wants
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
     
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    if dd is None:
-        #we keep passing dd around to not regenerate the data all the time
-        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -251,6 +251,7 @@
     output   = np.zeros((o_length,len(fields)), dtype='float64')
     refined  = np.zeros(r_length, dtype='int32')
     levels   = np.zeros(r_length, dtype='int32')
+    ids      = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -259,7 +260,7 @@
             c = center*pf['kpc']
         else:
             c = ile*1.0/pf.domain_dimensions*pf['kpc']
-        printing = lambda x: print_oct(x,pf['kpc'],c)
+        printing = lambda x: print_oct(x)
     else:
         printing = None
     pbar = get_pbar("Building Hilbert DFO octree",len(refined))
@@ -271,6 +272,7 @@
             output,refined,levels,
             grids,
             start_level,
+            ids,
             debug=printing,
             tracker=pbar)
     pbar.finish()
@@ -278,6 +280,7 @@
     #for the next spot, so we're off by 1
     print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    print 'first few entries :',refined[:12]
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
@@ -287,6 +290,7 @@
     ci = data['cell_index']
     l  = data['level']
     g  = data['grid']
+    o  = g.offset
     fle = g.left_edges+g.dx*ci
     fre = g.left_edges+g.dx*(ci+1)
     if nd is not None:
@@ -295,12 +299,14 @@
         if nc is not None:
             fle -= nc
             fre -= nc
-    txt  = '%1i '
-    txt += '%1.3f '*3+'- '
-    txt += '%1.3f '*3
-    print txt%((l,)+tuple(fle)+tuple(fre))
+    txt  = '%+1i '
+    txt += '%+1i '
+    txt += '%+1.3f '*3+'- '
+    txt += '%+1.3f '*3
+    if l<2:
+        print txt%((l,)+(o,)+tuple(fle)+tuple(fre))
 
-def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index]
                             pos, #the output hydro data position and refinement position
                             grid,  #grid that this oct lives on (not its children)
                             hilbert,  #the hilbert state
@@ -309,6 +315,7 @@
                             levels, #For a given Oct, what is the level
                             grids, #list of all patch grids available to us
                             level, #starting level of the oct (not the children)
+                            ids, #record the oct ID
                             debug=None,tracker=True):
     if tracker is not None:
         if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
@@ -316,16 +323,19 @@
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    refined[pos.refined_pos] = child_grid_index!=-1
-    levels[pos.output_pos]  = level
+    levels[pos.refined_pos]  = level
+    is_leaf = (child_grid_index==-1) and (level>0)
+    refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
+    ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf
     pos.refined_pos+= 1 
-    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+    if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
         for field_index in range(grid.fields.shape[0]):
             output[pos.output_pos,field_index] = \
                     grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
         pos.output_pos+= 1 
     else:
+        assert child_grid_index>-1
         #find the grid we descend into
         #then find the eight cells we break up into
         subgrid = grids[child_grid_index]
@@ -338,18 +348,21 @@
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + vertex*2**(-level)
+                #child_ile = cell_index + np.array(vertex)*2**(-level)
+                child_ile = cell_index + np.array(vertex)*2**(-(level+1))
+                child_ile = child_ile.astype('int')
             else:
                 child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
+
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
-                    debug=debug,tracker=tracker)
+                subgrid,hilbert_child,output,refined,levels,grids,
+                level+1,ids = ids,
+                debug=debug,tracker=tracker)
 
 
 
 def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
-
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
@@ -360,8 +373,6 @@
     for i,a in enumerate('xyz'):
         st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
         st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
@@ -457,6 +468,7 @@
             #quit if idxq is true:
             idxq = idx[0]>0 and np.all(idx==idx[0])
             out  = np.all(fle>cfle) and np.all(fre<cfre) 
+            out &= abs(np.log2(idx[0])-np.rint(np.log2(idx[0])))<1e-5 #nwide should be a power of 2
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
@@ -495,11 +507,15 @@
                           dd=None):
     if dd is None:
         dd = pf.h.all_data()
-    idx = dd["particle_type"] == star_type
+    idxst = dd["particle_type"] == star_type
+
+    #make sure we select more than a single particle
+    assert na.sum(idxst)>0
     if pos is None:
         pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    idx = idxst & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    assert np.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
@@ -518,8 +534,7 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
-        #metallicity *=0.0198
-        #print 'WARNING: multiplying metallicirt by 0.0198'
+        assert np.all(metallicity>0.0)
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
     formation_time = pf.current_time*pf['years']-age
@@ -534,19 +549,19 @@
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
     col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
-    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
     pd_table = pyfits.new_table(cols)
     pd_table.name = "PARTICLEDATA"
-    return pd_table
+    
+    #make sure we have nonzero particle number
+    assert pd_table.data.shape[0]>0
+    return pd_table,na.sum(idx)
 
 
 def add_fields():
@@ -556,10 +571,8 @@
         
     def _convMetalMass(data):
         return 1.0
-    
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
-
     def _initial_mass_cen_ostriker(field, data):
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
@@ -576,9 +589,6 @@
 
     add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-    def _temp_times_mass(field, data):
-        return data["Temperature"]*data["CellMassMsun"]
-    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
 class position:
     def __init__(self):
@@ -668,254 +678,3 @@
         j+=1
         yield vertex, self.descend(j)
 
-def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
-    if cameraset is None:
-        cameraset =cameraset_vertex 
-    campos =[]
-    names = []
-    dd = pf.h.all_data()
-    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
-        kwargs['scene_position']=scene_pos
-        kwargs['scene_up']=scene_up
-        kwargs['scene_rot']=scene_rot
-        kwargs['dd']=dd
-        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
-        campos += line,
-        names += name,
-    return names,campos     
-
-def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
-                                     sim_sphere_radius=None,sim_halo_radius=None,
-                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
-                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
-                                     dd=None):
-    """Translate the simulation to center on sim_center, 
-    then rotate such that sim_up is along the +z direction. Then we are in the 
-    'scene' basis coordinates from which scene_up and scene_offset are defined.
-    Then a position vector, direction vector, up vector and angular field of view
-    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
-    The angular field of view is in radians. The 10 numbers should match the inputs to
-    camera_positions in Sunrise.
-    """
-
-    sim_center = np.array(sim_center)
-    if sim_sphere_radius is None:
-        sim_sphere_radius = 10.0/pf['kpc']
-    if sim_axis_short is None:
-        if dd is None:
-            dd = pf.h.all_data()
-        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
-        mas = dd["particle_mass"]
-        pos = pos[idx]
-        mas = mas[idx]
-        mo_inertia = position_moment(pos,mas)
-        eigva, eigvc = linalg.eig(mo_inertia)
-        #order into short, long axes
-        order = eigva.real.argsort()
-        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
-    else:
-        ax_short = sim_axis_short
-        ax_long  = sim_axis_long
-    if sim_halo_radius is None:
-        sim_halo_radius = 200.0/pf['kpc']
-    if scene_distance is  None:
-        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
-    if scene_fov is None:
-        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
-        #idx= radii < sim_halo_radius*0.10
-        #radii = radii[idx]
-        #mass  = mas[idx] #copying mass into mas
-        si = np.argsort(radii)
-        radii = radii[si]
-        mass  = mas[si]
-        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
-        re = radii[idx[0]]
-        scene_fov = 5*re
-        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
-        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
-    #find rotation matrix
-    angles=find_half_euler_angles(ax_short,ax_long)
-    rotation  = euler_matrix(*angles)
-    irotation = numpy.linalg.inv(rotation)
-    axs = (ax_short,ax_med,ax_long)
-    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
-    axs = ([1,0,0],[0,1,0],[0,0,1])
-    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
-    
-    #rotate the camera
-    if scene_rot :
-        irotation = np.eye(3)
-    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
-    sunrise_up  = matmul(irotation,scene_up)
-    sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
-
-    #change to physical kpc
-    sunrise_pos *= pf['kpc']
-    sunrise_direction *= pf['kpc']
-    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
-
-def matmul(m, v):
-    """Multiply a matrix times a set of vectors, or a single vector.
-    My nPart x nDim convention leads to two transpositions, which is
-    why this is hidden away in a function.  Note that if you try to
-    use this to muliply two matricies, it will think that you're
-    trying to multiply by a set of vectors and all hell will break
-    loose."""    
-    assert type(v) is not np.matrix
-    v = np.asarray(v)
-    m, vs = [np.asmatrix(a) for a in (m, v)]
-
-    result = np.asarray(np.transpose(m * np.transpose(vs)))    
-    if len(v.shape) == 1:
-        return result[0]
-    return result
-
-
-def mag(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return np.sqrt( (vs**2).sum() )
-    return np.sqrt( (vs**2).sum(axis=1) )
-
-def mag2(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return (vs**2).sum()
-    return (vs**2).sum(axis=1)
-
-
-def position_moment(rs, ms=None, axes=None):
-    """Find second position moment tensor.
-    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = np.asarray(rs)
-    Npart, N = rs.shape
-    if ms is None: ms = np.ones(Npart)
-    else: ms = np.asarray(ms)    
-    if axes is not None:
-        axes = np.asarray(axes,dtype=float64)
-        axes = axes/axes.max()
-        norms2 = mag2(rs/axes)
-    else:
-        norms2 = np.ones(Npart)
-    M = ms.sum()
-    result = np.zeros((N,N))
-    # matrix is symmetric, so only compute half of it then fill in the
-    # other half
-    for i in range(N):
-        for j in range(i+1):
-            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
-        
-    result = result + result.transpose() - np.identity(N)*result
-    return result
-    
-
-
-def find_half_euler_angles(v,w,check=True):
-    """Find the passive euler angles that will make v lie along the z
-    axis and w lie along the x axis.  v and w are uncertain up to
-    inversions (ie, eigenvectors) so this routine removes degeneracies
-    associated with that
-
-    (old) Calculate angles to bring a body into alignment with the
-    coordinate system.  If v1 is the SHORTEST axis and v2 is the
-    LONGEST axis, then this will return the angle (Euler angles) to
-    make the long axis line up with the x axis and the short axis line
-    up with the x (z) axis for the 2 (3) dimensional case."""
-    # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: np.sqrt(np.sum(x**2.0))
-    v = v/mag(v)
-    w = w/mag(w)    
-    if check:
-        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
-
-    # Break eigenvector scaling degeneracy by forcing it to have a positive
-    # z component
-    if v[2] < 0: v = -v
-    phi,theta = find_euler_phi_theta(v)
-
-    # Rotate w according to phi,theta and then break inversion
-    # degeneracy by requiring that resulting vector has positive
-    # x component
-    w_prime = euler_passive(w,phi,theta,0.)
-    if w_prime[0] < 0: w_prime = -w_prime
-    # Now last Euler angle should just be this:
-    psi = np.arctan2(w_prime[1],w_prime[0])
-    return phi, theta, psi
-
-def find_euler_phi_theta(v):
-    """Find (passive) euler angles that will make v point in the z
-    direction"""
-    # Make sure the vector is normalized
-    v = v/mag(v)
-    theta = np.arccos(v[2])
-    phi = np.arctan2(v[0],-v[1])
-    return phi,theta
-
-def euler_matrix(phi, the, psi):
-    """Make an Euler transformation matrix"""
-    cpsi=np.cos(psi)
-    spsi=np.sin(psi)
-    cphi=np.cos(phi)
-    sphi=np.sin(phi)
-    cthe=np.cos(the)
-    sthe=np.sin(the)
-    m = np.mat(np.zeros((3,3)))
-    m[0,0] = cpsi*cphi - cthe*sphi*spsi
-    m[0,1] = cpsi*sphi + cthe*cphi*spsi
-    m[0,2] = spsi*sthe
-    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
-    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
-    m[1,2] = cpsi*sthe
-    m[2,0] = sthe*sphi
-    m[2,1] = -sthe*cphi
-    m[2,2] = cthe
-    return m
-
-def euler_passive(v, phi, the, psi):
-    """Passive Euler transform"""
-    m = euler_matrix(phi, the, psi)
-    return matmul(m,v)
-
-
-#the format for these camerasets is name,up vector,camera location, 
-#rotate to the galaxy's up direction?
-cameraset_compass = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
-    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
-    ])
-
-cameraset_vertex = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
-    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
-    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
-    ])
-
-#up is 45deg down from z, towards north
-#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
-#up is -45deg down from z, towards north
-
-cameraset_ring = collections.OrderedDict()
-
-segments = 20
-for angle in np.linspace(0,360,segments):
-    pos = [np.cos(angle),0.,np.sin(angle)]
-    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
-    cameraset_ring['02i'%angle]=(pos,vc)
-            
-
-




diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r b25bf1498a9af53e11e77415fbdb86b3c6102378 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Christopher Erick Moody <cemoody at ucsc.edu>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -42,24 +44,24 @@
 from .fields import \
     ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-try:
-    import yt.frontends.ramses._ramses_reader as _ramses_reader
-except ImportError:
-    _ramses_reader = None
+import yt.frontends.ramses._ramses_reader as _ramses_reader #do not fail silently;
 
 from yt.utilities.physical_constants import \
-    mass_hydrogen_cgs, sec_per_Gyr
-
+    mass_hydrogen_cgs
+    
 from yt.frontends.art.definitions import art_particle_field_names
 
 from yt.frontends.art.io import _read_child_mask_level
 from yt.frontends.art.io import read_particles
 from yt.frontends.art.io import read_stars
+from yt.frontends.art.io import spread_ages
 from yt.frontends.art.io import _count_art_octs
 from yt.frontends.art.io import _read_art_level_info
 from yt.frontends.art.io import _read_art_child
@@ -81,19 +83,43 @@
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
+    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
+            child_mask=None,nop=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
-        start_index = props[0]
+        start_index =start_index 
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
         
-        self.LeftEdge = props[0]
-        self.RightEdge = props[1]
-        self.ActiveDimensions = props[2] 
+        self.LeftEdge = le
+        self.RightEdge = re
+        self.ActiveDimensions = gd
+        self.NumberOfParticles=nop
+        self.particle_type = np.array([])
+        self.particle_id= np.array([])
+        self.particle_age= np.array([])
+        self.particle_position_x = np.array([])
+        self.particle_position_y = np.array([])
+        self.particle_position_z = np.array([])
+        self.particle_velocity_x = np.array([])
+        self.particle_velocity_y = np.array([])
+        self.particle_velocity_z = np.array([])
+        self.particle_mass= np.array([])
+        self.star_position_x = np.array([])
+        self.star_position_y = np.array([])
+        self.star_position_z = np.array([])
+        self.star_velocity_x = np.array([])
+        self.star_velocity_y = np.array([])
+        self.star_velocity_z = np.array([])
+        self.star_age = np.array([])
+        self.star_metallicity1 = np.array([])
+        self.star_metallicity2 = np.array([])
+        self.star_mass_initial = np.array([])
+        self.star_mass = np.array([])
+
         #if child_mask is not None:
         #    self._set_child_mask(child_mask)
 
@@ -109,7 +135,8 @@
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
+                = self.dds
 
     def get_global_startindex(self):
         """
@@ -138,13 +165,105 @@
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
+        if 'particle_position' in dir(self.pf):
+            self._setup_particle_grids()
         self._setup_field_list()
         
+    def _setup_particle_grids(self):
+        grid_particle_count = np.zeros(len(self.grids),dtype='int64')
+        npt = self.pf.particle_position.shape[0]
+        if self.pf.do_grid_particles:
+            nps = self.pf.star_position.shape[0]
+            grid_indices = np.zeros(nps,dtype='int64')
+            particle_id= np.arange(nps,dtype='int64')
+            pbar = get_pbar("Gridding Particles",len(self.grids))
+            grid_indices,grid_particle_count,grids_done = \
+                    particle_assignment(self.grids,
+                      self.grids[0], 
+                      self.pf.star_position,
+                      particle_id,
+                      grid_indices,
+                      grid_particle_count, 
+                      self.pf.domain_dimensions,
+                      self.pf.max_level,
+                      logger=pbar)
+            pbar.finish()        
+            pbar = get_pbar("Finalizing grids ",len(self.grids))
+            for gi, (g,npi) in enumerate(zip(self.grids,grid_particle_count)): 
+                star_mask= grid_indices==gi
+                if gi==0:
+                    #attach all the particles to the root grid
+                    g.particle_type = self.pf.particle_type
+                    g.particle_id = np.arange(npt)
+                    g.particle_mass = self.pf.particle_mass
+                    g.particle_mass_initial = self.pf.particle_mass_initial
+                    g.particle_age = self.pf.particle_age
+                    g.particle_metallicity= self.pf.particle_metallicity
+                    g.particle_position_x= self.pf.particle_position[:,0]
+                    g.particle_position_y= self.pf.particle_position[:,1]
+                    g.particle_position_z= self.pf.particle_position[:,2]
+                    g.particle_velocity_x= self.pf.particle_velocity[:,0]
+                    g.particle_velocity_y= self.pf.particle_velocity[:,1]
+                    g.particle_velocity_z= self.pf.particle_velocity[:,2]
+                if star_mask.sum()>0:
+                    star_data = self.pf.star_data[star_mask]         
+                    (g.star_position_x, \
+                        g.star_position_y, \
+                        g.star_position_z, \
+                        g.star_velocity_x,\
+                        g.star_velocity_y,\
+                        g.star_velocity_z,\
+                        g.star_age,\
+                        g.star_metallicity1,\
+                        g.star_metallicity2,\
+                        g.star_mass_initial,\
+                        g.star_mass) = tuple(star_data.T)
+                    g.NumberOfParticles = npi        
+                self.grids[gi] = g
+                pbar.update(gi)
+            pbar.finish()
+        else:        
+            pbar = get_pbar("Finalizing grids ",len(self.grids))
+            for gi, g in enumerate(self.grids): 
+                if gi==0:
+                    #attach all the particles to the root grid
+                    g.particle_type = self.pf.particle_type
+                    g.particle_id = np.arange(npt)
+                    g.particle_mass = self.pf.particle_mass
+                    g.particle_mass_initial = self.pf.particle_mass_initial
+                    g.particle_age = self.pf.particle_age
+                    g.particle_metallicity= self.pf.particle_metallicity
+                    g.particle_position_x= self.pf.particle_position[:,0]
+                    g.particle_position_y= self.pf.particle_position[:,1]
+                    g.particle_position_z= self.pf.particle_position[:,2]
+                    g.particle_velocity_x= self.pf.particle_velocity[:,0]
+                    g.particle_velocity_y= self.pf.particle_velocity[:,1]
+                    g.particle_velocity_z= self.pf.particle_velocity[:,2]
+                    if self.pf.do_stars:
+                        (g.star_position_x, \
+                            g.star_position_y, \
+                            g.star_position_z, \
+                            g.star_velocity_x,\
+                            g.star_velocity_y,\
+                            g.star_velocity_z,\
+                            g.star_age,\
+                            g.star_metallicity1,\
+                            g.star_metallicity2,\
+                            g.star_mass_initial,\
+                            g.star_mass) = tuple(self.pf.star_data.T)
+                    g.NumberOfParticles = npt        
+                else:
+                    g.star_indices = []
+                self.grids[gi] = g
+            pbar.finish()
+            grid_particle_count[0]=npt
+        self.grid_particle_count = grid_particle_count
+
     def _initialize_data_storage(self):
         pass
 
@@ -209,11 +328,18 @@
             if level > self.pf.limit_level : continue
             
             #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
+            left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                    self.pf.level_oct_offsets,level,
+                    coarse_grid=self.pf.domain_dimensions[0])
+            if level>1:
+                assert root_level == last_root_level
+            last_root_level = root_level
+                    
             #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             
             #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
+            idc, art_child_mask = _read_child_mask_level(f, 
+                    self.pf.level_child_offsets,
                 level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
             art_child_mask = art_child_mask.reshape((nocts,2,2,2))
             self.pf.level_art_child_masks[level]=art_child_mask
@@ -307,7 +433,7 @@
                         eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
             
         
-            mylog.debug("Done with level % 2i", level)
+            mylog.info("Done with level % 2i; max LE %i", level,np.max(left_index))
             pbar.finish()
             self.proto_grids.append(psgs)
             #print sum(len(psg.grid_file_locations) for psg in psgs)
@@ -322,35 +448,33 @@
 
         
     def _parse_hierarchy(self):
-        """ The root grid has no octs except one which is refined.
-        Still, it is the size of 128 cells along a length.
-        Ignore the proto subgrid created for the root grid - it is wrong.
-        """
         grids = []
         gi = 0
-        
+        dd=self.pf.domain_dimensions
         for level, grid_list in enumerate(self.proto_grids):
-            #The root level spans [0,2]
-            #The next level spans [0,256]
-            #The 3rd Level spans up to 128*2^3, etc.
-            #Correct root level to span up to 128
-            correction=1L
-            if level == 0:
-                correction=64L
+            dds = ((2**level) * dd).astype("float64")
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()*correction
-                dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                props = g.get_properties()
+                start_index = props[0,:]
+                le = props[0,:].astype('float64')/dds
+                re = props[1,:].astype('float64')/dds
+                gd = props[2,:].astype('int64')
+                if level==0:
+                    le = np.zeros(3,dtype='float64')
+                    re = np.ones(3,dtype='float64')
+                    gd = dd
+                self.grid_left_edge[gi,:] = le
+                self.grid_right_edge[gi,:] = re
+                self.grid_dimensions[gi,:] = gd
+                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = np.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,props[0],
+                amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*np.array(correction).astype('int64')))
+                    start_index,le,re,gd))
                 gi += 1
         self.grids = np.empty(len(grids), dtype='object')
         
@@ -359,7 +483,7 @@
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
-            nstars = lspecies[-1]
+            nstars = np.diff(lspecies)[-1]
             a = self.pf.parameters['aexpn']
             hubble = self.pf.parameters['hubble']
             ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
@@ -368,7 +492,7 @@
             um *= 1.989e33 #convert solar masses to grams 
             pbar = get_pbar("Loading Particles   ",5)
             self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,nstars,Nrow)
+                read_particles(self.pf.file_particle_data,Nrow)
             pbar.update(1)
             npa,npb=0,0
             npb = lspecies[-1]
@@ -378,27 +502,33 @@
                 if type(self.pf.only_particle_type)==type(5):
                     npa = clspecies[self.pf.only_particle_type]
                     npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
+            nparticles = npb-npa
+            npt = nparticles
+            #make sure we aren't going to throw out good particles
+            if not np.all(self.pf.particle_position[npb:]==0.0):
+                print 'WARNING: unused particles discovered from lspecies'
             self.pf.particle_position   = self.pf.particle_position[npa:npb]
             #do NOT correct by an offset of 1.0
             #self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
-            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
+            self.pf.particle_position  /= self.pf.domain_dimensions 
+            #to unitary units (comoving)
             pbar.update(3)
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = np.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
-            
+            self.pf.particle_type         = np.zeros(nparticles,dtype='uint8')
+            self.pf.particle_mass         = np.zeros(nparticles,dtype='float64')
+            self.pf.particle_mass_initial = np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_creation_time= np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_metallicity  = np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_metallicity1 = np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_metallicity2 = np.zeros(nparticles,dtype='float64')-1
+            self.pf.particle_age          = np.zeros(nparticles,dtype='float64')-1
+
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_mass_initial'] = 1.0
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
@@ -411,10 +541,12 @@
             self.pf.conversion_factors['particle_index']=1.0
             self.pf.conversion_factors['particle_type']=1
             self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
+            self.pf.conversion_factors['Msun'] = 5.027e-34 
+            #conversion to solar mass units
             
 
             a,b=0,0
+            self.pf.particle_star_index = len(wspecies)-1
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
                 if type(self.pf.only_particle_type)==type(5):
                     if not i==self.pf.only_particle_type:
@@ -425,80 +557,82 @@
                 else:
                     self.pf.particle_type[a:b] = i #particle type
                     self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                    if m==0.0:
+                        self.pf.particle_star_index = i
                 a=b
             pbar.finish()
 
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
+            lparticles = [0,]+list(lspecies)
+            for j,npi in enumerate(lparticles):
+                mylog.debug('found %i of particle type %i'%(j,npi))
             
-            self.pf.particle_star_index = i
             
             do_stars = (self.pf.only_particle_type is None) or \
                        (self.pf.only_particle_type == -1) or \
                        (self.pf.only_particle_type == len(lspecies))
+            self.pf.do_stars = do_stars 
             if self.pf.file_star_data and do_stars: 
-                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
-                     = read_stars(self.pf.file_star_data,nstars,Nrow)
-                nstars = nstars[0] 
-                if nstars > 0 :
+                nstars_pa = nstars
+                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
+                        ws_old,ws_oldi,tdum,adum \
+                     = read_stars(self.pf.file_star_data)
+                self.pf.nstars_rs = nstars_rs     
+                self.pf.nstars_pa = nstars_pa
+                if not nstars_rs==np.sum(self.pf.particle_type==self.pf.particle_star_index):
+                    print 'WARNING!: nstars is inconsistent!'
+                if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
                     pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
+                    birthtimes= \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= sec_per_Gyr #from Gyr to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
+                    assert birthtimes.shape == tbirth.shape    
+                    birthtimes*= 1.0e9 #from Gyr to yr
+                    birthtimes*= 365*24*3600 #to seconds
+                    ages = self.pf.current_time-birthtimes
+                    spread = self.pf.spread
+                    if spread == False:
+                        pass
+                    elif type(spread)==type(5.5):
+                        ages = spread_ages(ages,spread=spread)
+                    else:
+                        ages = spread_ages(ages)
+                    idx = self.pf.particle_type == self.pf.particle_star_index    
+                    assert np.sum(idx)==nstars_pa
+                    self.pf.star_position = self.pf.particle_position[idx]
+                    self.pf.star_velocity = self.pf.particle_velocity[idx]
+                    self.pf.particle_age[idx] = ages
+                    self.pf.star_age = ages
                     pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    #self.pf.particle_metallicity1 *= 0.0199 
-                    #self.pf.particle_metallicity2 *= 0.0199 
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
-                    self.pf.particle_mass[-nstars:] = mass*um
+                    self.pf.particle_metallicity[idx] = metallicity1+metallicity2
+                    self.pf.particle_metallicity1[idx] = metallicity1
+                    self.pf.particle_metallicity2[idx] = metallicity2
+                    self.pf.particle_mass[idx] = mass*um
+                    self.pf.particle_mass_initial[idx] = mass*um
+                    self.pf.star_metallicity1 = metallicity1
+                    self.pf.star_metallicity2 = metallicity2
+                    self.pf.star_mass_initial = imass*um
+                    self.pf.star_mass = mass*um
+                    self.pf.star_data = np.array([
+                        self.pf.star_position[:,0],
+                        self.pf.star_position[:,1],
+                        self.pf.star_position[:,2],
+                        self.pf.star_velocity[:,0],
+                        self.pf.star_velocity[:,1],
+                        self.pf.star_velocity[:,2],
+                        self.pf.star_age,
+                        self.pf.star_metallicity1,
+                        self.pf.star_metallicity2,
+                        self.pf.star_mass_initial,
+                        self.pf.star_mass]).T
 
             done = 0
             init = self.pf.particle_position.shape[0]
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
-            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
-            if type(self.pf.grid_particles) == type(5):
-                particle_level = min(self.pf.max_level,self.pf.grid_particles)
-            else:
-                particle_level = 2
-            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    np.zeros(len(pos[:,0])).astype('int32')-1,
-                    particle_level, #dont grid particles past this
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
-            pbar.finish()
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
-        pbar = get_pbar("Finalizing grids ",len(grids))
-        for gi, g in enumerate(grids): 
-            self.grids[gi] = g
-        pbar.finish()
-            
-
+            #pos = np.vstack((na.arange(pos.shape[0]),pos.T)).T 
+        for gi,g in enumerate(grids):    
+            self.grids[gi]=g
+                    
     def _get_grid_parents(self, grid, LE, RE):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
@@ -507,48 +641,36 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = np.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            #Now we do overlapping siblings; note that one has to "win" with
+            #siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
+        pb.finish()
         self.max_level = self.grid_levels.max()
 
-    # def _populate_grid_objects(self):
-    #     mask = np.empty(self.grids.size, dtype='int32')
-    #     pb = get_pbar("Populating grids", len(self.grids))
-    #     for gi,g in enumerate(self.grids):
-    #         pb.update(gi)
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level - 1,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask)
-    #         parents = self.grids[mask.astype("bool")]
-    #         if len(parents) > 0:
-    #             g.Parent.extend((p for p in parents.tolist()
-    #                     if p.locations[0,0] == g.locations[0,0]))
-    #             for p in parents: p.Children.append(g)
-    #         # Now we do overlapping siblings; note that one has to "win" with
-    #         # siblings, so we assume the lower ID one will "win"
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask, gi)
-    #         mask[gi] = False
-    #         siblings = self.grids[mask.astype("bool")]
-    #         if len(siblings) > 0:
-    #             g.OverlappingSiblings = siblings.tolist()
-    #         g._prepare_grid()
-    #         g._setup_dx()
-    #     pb.finish()
-    #     self.max_level = self.grid_levels.max()
-
     def _setup_field_list(self):
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
@@ -588,10 +710,11 @@
                  file_particle_data=None,
                  file_star_data=None,
                  discover_particles=True,
-                 use_particles=True,
                  limit_level=None,
                  only_particle_type = None,
-                 grid_particles=False,
+                 do_grid_particles=False,
+                 merge_dm_and_stars=False,
+                 spread = True,
                  single_particle_mass=False,
                  single_particle_type=0):
         
@@ -605,8 +728,10 @@
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
         self.only_particle_type = only_particle_type
-        self.grid_particles = grid_particles
+        self.do_grid_particles = do_grid_particles
         self.single_particle_mass = single_particle_mass
+        self.merge_dm_and_stars = merge_dm_and_stars
+        self.spread = spread
         
         if limit_level is None:
             self.limit_level = np.inf
@@ -719,8 +844,11 @@
             # Add on the 1e5 to get to cm/s
             self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
         seconds = self.t0
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
+        self.time_units['Gyr']   = 1.0/(1.0e9*365*3600*24.0)
+        self.time_units['Myr']   = 1.0/(1.0e6*365*3600*24.0)
+        self.time_units['years'] = 1.0/(365*3600*24.0)
+        self.time_units['days']  = 1.0 / (3600*24.0)
+
 
         #we were already in seconds, go back in to code units
         #self.current_time /= self.t0 
@@ -817,7 +945,7 @@
         # integrand_arr = integrand(spacings)
         # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
         # self.current_time *= self.hubble_time
-        self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
+        self.current_time = b2t(self.current_time_raw)*1.0e9*365*3600*24         
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
             _skip_record(f)
 
@@ -865,7 +993,8 @@
         self.root_grid_mask_offset = f.tell()
         #_skip_record(f) # iOctCh
         root_cells = self.domain_dimensions.prod()
-        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+        self.root_iOctChfull = _read_frecord(f,'>i')
+        self.root_iOctCh = self.root_iOctChfull[:root_cells]
         self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
         self.root_grid_offset = f.tell()
         _skip_record(f) # hvar
@@ -927,10 +1056,12 @@
         seek_extras = 137
         fh.seek(seek_extras)
         n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
-        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
-        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
+        self.parameters['wspeciesf'] = np.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspeciesf'] = np.fromfile(fh,dtype='>i',count=10)
+        assert np.all(self.parameters['lspeciesf'][n:]==0.0)
+        assert np.all(self.parameters['wspeciesf'][n:]==0.0)
+        self.parameters['wspecies'] = self.parameters['wspeciesf'][:n]
+        self.parameters['lspecies'] = self.parameters['lspeciesf'][:n]
         fh.close()
         
         ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
@@ -940,14 +1071,64 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        """
-        Defined for Daniel Ceverino's file naming scheme.
-        This could differ for other formats.
-        """
-        fn = ("%s" % (os.path.basename(args[0])))
-        f = ("%s" % args[0])
-        if fn.endswith(".d") and fn.startswith('10Mpc') and\
-                os.path.exists(f): 
-                return True
+        if "10MpcBox" in args[0]:
+            return True
         return False
 
+def particle_assignment(grids,this_grid, 
+                                  pos,
+                                  particle_id,
+                                  grid_indices,
+                                  grid_particle_count, 
+                                  domain_dimensions,
+                                  max_level,
+                                  subdiv=2,
+                                  grids_done=0,
+                                  logger=None):
+    #for every particle check every child grid to see if it fits inside
+    #cast the pos -> cell location index (instead of doing a LE<pos<RE check)
+    #find if cell descends into the next mesh
+    
+    #cast every position into a cell on this grid
+    #we may get negative indices or indices outside this grid
+    #mask them out
+    exp = domain_dimensions*subdiv**this_grid.Level
+    lei= np.floor((pos-this_grid.LeftEdge)*exp).astype('int64')
+
+    #now lookup these indices in the child index mask
+    #throw out child grids = -1 and particles outside the range
+    #default state is to not grid a particle
+    child_idx = np.zeros(lei.shape[0],dtype='int64')-1
+    #remove particles to the left or right of the grid
+    lei_out  = np.any(lei>=this_grid.ActiveDimensions,axis=1)
+    lei_out |= np.any(lei<0,axis=1)
+    #lookup grids for every particle except the ones to the 
+    leio=lei[~lei_out]
+    #child_idx[~lei_out]= \
+    child_idx[~lei_out]= \
+            this_grid.child_index_mask[(leio[:,0],leio[:,1],leio[:,2])]
+    mask = (child_idx > -1)
+    #only assign the particles if they point to a grid ID that isnt -1
+    grid_indices[particle_id[mask]] = child_idx[mask]
+    #the number of particles on this grid is equal to those
+    #that point to -1
+    grid_particle_count[this_grid.id] = np.sum(~mask)
+    grids_done +=1
+    if logger:
+        logger.update(grids_done)
+
+    for child_grid_index in np.unique(this_grid.child_index_mask):
+        if child_grid_index == -1: 
+            continue
+        if grids[child_grid_index].Level == max_level:
+            continue
+        mask = child_idx == child_grid_index
+        if np.sum(mask)==0:continue
+        grid_indices,grid_particle_count,grids_done = \
+        particle_assignment(grids,grids[child_grid_index],
+                pos[mask],particle_id[mask],
+                grid_indices,grid_particle_count,
+                domain_dimensions,max_level,grids_done=grids_done,
+                subdiv=subdiv,logger=logger)
+    return grid_indices,grid_particle_count,grids_done
+


diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r b25bf1498a9af53e11e77415fbdb86b3c6102378 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -26,13 +26,10 @@
 """
 
 art_particle_field_names = [
-'particle_age',
 'particle_index',
 'particle_mass',
 'particle_mass_initial',
-'particle_creation_time',
-'particle_metallicity1',
-'particle_metallicity2',
+'particle_age',
 'particle_metallicity',
 'particle_position_x',
 'particle_position_y',
@@ -40,4 +37,18 @@
 'particle_velocity_x',
 'particle_velocity_y',
 'particle_velocity_z',
-'particle_type']
+'particle_type',
+'star_position_x',
+'star_position_y',
+'star_position_z',
+'star_velocity_x',
+'star_velocity_y',
+'star_velocity_z',
+'star_age',
+'star_mass',
+'star_mass_initial',
+'star_creation_time',
+'star_metallicity1',
+'star_metallicity2',
+'star_metallicity',
+]


diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r b25bf1498a9af53e11e77415fbdb86b3c6102378 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -34,8 +34,6 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, mass_hydrogen_cgs
 import yt.utilities.lib as amr_utils
 
 KnownARTFields = FieldInfoContainer()
@@ -62,6 +60,7 @@
 #Density
 #Temperature
 #metallicities
+#MetalDensity SNII + SNia
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -69,7 +68,6 @@
 #Pressure
 #Gamma
 #GasEnergy
-#MetalDensity SNII + SNia
 #Potentials
 #xyzvelocity
 
@@ -170,32 +168,27 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    cd = data.pf.conversion_factors["Density"]
-    cg = data.pf.conversion_factors["GasEnergy"]
-    ct = data.pf.tr
     dg = data["GasEnergy"].astype('float64')
+    dg /= data.pf.conversion_factors["GasEnergy"]
     dd = data["Density"].astype('float64')
-    di = dd==0.0
+    dd /= data.pf.conversion_factors["Density"]
+    tr = dg/dd*data.pf.tr
+    #ghost cells have zero density?
+    tr[np.isnan(tr)] = 0.0
     #dd[di] = -1.0
-    tr = dg/dd
-    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
-    #    import pdb;pdb.set_trace()
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
-    tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
     #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x = data.pf.conversion_factors["Temperature"]
+    #x = data.pf.conversion_factors["Temperature"]
     x = 1.0
     return x
 add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_converttemperature
+#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
@@ -218,28 +211,27 @@
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(data):
+def _x_velocity(field,data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
 add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(data):
+def _y_velocity(field,data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
 add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(data):
+def _z_velocity(field,data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
 add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
@@ -251,20 +243,63 @@
 
 #Particle fields
 
+def ParticleMass(field,data):
+    return data['particle_mass']
+add_field("ParticleMass",function=ParticleMass,units=r"\rm{g}",particle_type=True)
+
+
 #Derived particle fields
 
+def ParticleMassMsun(field,data):
+    return data['particle_mass']*data.pf['Msun']
+add_field("ParticleMassMsun",function=ParticleMassMsun,units=r"\rm{g}",particle_type=True)
+
+def _creation_time(field,data):
+    pa = data["particle_age"]
+    tr = np.zeros(pa.shape,dtype='float')-1.0
+    tr[pa>0] = pa[pa>0]
+    return tr
+add_field("creation_time",function=_creation_time,units=r"\rm{s}",particle_type=True)
+
 def mass_dm(field, data):
+    tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
-    tr  = data["Ones"] #create a grid in the right size
     if np.sum(idx)>0:
-        tr /= np.prod(tr.shape) #divide by the volume
-        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
+        print tr.shape
         return tr
     else:
-        return tr*0.0
+        return tr*1e-9
 
-add_field("particle_cell_mass_dm", function=mass_dm,
-          validators=[ValidateSpatial(0)])
+add_field("particle_cell_mass_dm", function=mass_dm, units = r"\mathrm{M_{sun}}",
+        validators=[ValidateSpatial(0)],        
+        take_log=False,
+        projection_conversion="1")
 
+def _spdensity(field, data):
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
+    if data.star_mass.shape[0] ==0 : return grid_mass 
+    amr_utils.CICDeposit_3(data.star_position_x,
+                           data.star_position_y,
+                           data.star_position_z,
+                           data.star_mass.astype('float32'),
+                           data.star_mass.shape[0],
+                           grid_mass, 
+                           np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    return grid_mass 
+
+#add_field("star_density", function=_spdensity,
+#          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+
+def _simple_density(field,data):
+    mass = np.sum(data.star_mass)
+    volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
+    return mass/volume
+
+add_field("star_density", function=_simple_density,
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity)


diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r b25bf1498a9af53e11e77415fbdb86b3c6102378 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -121,41 +121,39 @@
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        #This will be cleaned up later
-        idx = np.array(grid.particle_indices)
-        if field == 'particle_index':
-            return np.array(idx)
-        if field == 'particle_type':
-            return grid.pf.particle_type[idx]
-        if field == 'particle_position_x':
-            return grid.pf.particle_position[idx][:,0]
-        if field == 'particle_position_y':
-            return grid.pf.particle_position[idx][:,1]
-        if field == 'particle_position_z':
-            return grid.pf.particle_position[idx][:,2]
-        if field == 'particle_mass':
-            return grid.pf.particle_mass[idx]
-        if field == 'particle_velocity_x':
-            return grid.pf.particle_velocity[idx][:,0]
-        if field == 'particle_velocity_y':
-            return grid.pf.particle_velocity[idx][:,1]
-        if field == 'particle_velocity_z':
-            return grid.pf.particle_velocity[idx][:,2]
-        
-        #stellar fields
-        if field == 'particle_age':
-            return grid.pf.particle_age[idx]
-        if field == 'particle_metallicity':
-            return grid.pf.particle_metallicity1[idx] +\
-                   grid.pf.particle_metallicity2[idx]
-        if field == 'particle_metallicity1':
-            return grid.pf.particle_metallicity1[idx]
-        if field == 'particle_metallicity2':
-            return grid.pf.particle_metallicity2[idx]
-        if field == 'particle_mass_initial':
-            return grid.pf.particle_mass_initial[idx]
-        
-        raise 'Should have matched one of the particle fields...'
+        field_dict = { 'particle_index': grid.particle_id,
+            'particle_type':grid.particle_type,
+            'particle_position_x':grid.particle_position_x,
+            'particle_position_y':grid.particle_position_y,
+            'particle_position_z':grid.particle_position_z,
+            'particle_age':grid.particle_age,
+            'particle_mass':grid.particle_mass,
+            'particle_velocity_x':grid.particle_velocity_x,
+            'particle_velocity_y':grid.particle_velocity_y,
+            'particle_velocity_z':grid.particle_velocity_z,
+            
+            #stellar fields
+            'star_position_x':grid.star_position_x,
+            'star_position_y':grid.star_position_y,
+            'star_position_z':grid.star_position_z,
+            'star_mass':grid.star_mass,
+            'star_velocity_x':grid.star_velocity_x,
+            'star_velocity_y':grid.star_velocity_y,
+            'star_velocity_z':grid.star_velocity_z,
+            'star_age':grid.star_age,
+            'star_metallicity':grid.star_metallicity1 + grid.star_metallicity2,
+            'star_metallicity1':grid.star_metallicity1,
+            'star_metallicity2':grid.star_metallicity2,
+            'star_mass_initial':grid.star_mass_initial,
+            'star_mass':grid.star_mass}
+        starfield = field.replace('particle','star')
+        psi = grid.pf.particle_star_index
+        if field not in field_dict.keys() and starfield in field_dict.keys():
+            particle_field = np.zeros(grid.particle_mass.shape)                    
+            particle_field[grid.particle_type==psi]=field_dict[starfield]
+            return particle_field
+        else:
+            return field_dict[field]
 
         
     def _read_data_set(self, grid, field):
@@ -198,9 +196,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -232,7 +230,7 @@
     f.seek(offset)
     return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
 
-def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
     pos = f.tell()
     f.seek(level_oct_offsets[level])
     #Get the info for this level, skip the rest
@@ -283,13 +281,18 @@
     le = le[idx]
     fl = fl[idx]
 
+
     #left edges are expressed as if they were on 
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
     #le = le/2**(root_level-1-level)-1
 
+    #try to find the root_level first
+    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
+    root_level = root_level.astype('int64')
+
     #try without the -1
-    le = le/2**(root_level-2-level)-1
+    le = le/2**(root_level+1-level)-1
 
     #now read the hvars and vars arrays
     #we are looking for iOctCh
@@ -299,13 +302,12 @@
     
     
     f.seek(pos)
-    return le,fl,nLevel
+    return le,fl,nLevel,root_level
 
 
-def read_particles(file,nstars,Nrow):
+def read_particles(file,Nrow):
     words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4 # for file_particle_data; not always true?
-    np = nstars # number of particles including stars, should come from lspecies[-1]
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
@@ -314,7 +316,7 @@
     data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
-def read_stars(file,nstars,Nrow):
+def read_stars(file):
     fh = open(file,'rb')
     tdum,adum   = _read_frecord(fh,'>d')
     nstars      = _read_frecord(fh,'>i')
@@ -327,7 +329,8 @@
     if fh.tell() < os.path.getsize(file):
         metallicity2 = _read_frecord(fh,'>f')     
     assert fh.tell() == os.path.getsize(file)
-    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+    return  nstars, mass, imass, tbirth, metallicity1, metallicity2,\
+            ws_old,ws_oldi,tdum,adum
 
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
@@ -476,3 +479,29 @@
     #fb2t = interp1d(tbs,ages)
     return fb2t
 
+def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
+    #stars are formed in lumps; spread out the ages linearly
+    da= np.diff(ages)
+    assert np.all(da<=0)
+    #ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da<0)
+    idx+=1 #mark the right edges
+    #spread this age evenly out to the next age
+    lidx=0
+    lage=0
+    for i in idx:
+        n = i-lidx #n stars affected
+        rage = ages[i]
+        lage = max(rage-spread,0.0)
+        agesd[lidx:i]=np.linspace(lage,rage,n)
+        lidx=i
+        #lage=rage
+        if logger: logger(i)
+    #we didn't get the last iter
+    i=ages.shape[0]-1
+    n = i-lidx #n stars affected
+    rage = ages[i]
+    lage = max(rage-spread,0.0)
+    agesd[lidx:i]=np.linspace(lage,rage,n)
+    return agesd


diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r b25bf1498a9af53e11e77415fbdb86b3c6102378 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -958,12 +958,87 @@
         return info, values
 
 class StereoPairCamera(Camera):
-    def __init__(self, original_camera, relative_separation = 0.005):
+    def __init__(self,original_camera,
+                 auto_focus=False,
+                 focal_length=None,
+                 frac_near_plane = 0.90, 
+                 frac_far_plane  = 1.10,
+                 frac_eye_separation=0.05,
+                 aperture = 60.0,
+                 relative_separation=0.005):
+        """
+        Auto-focus is adapted from a guide & code at :
+        http://paulbourke.net/miscellaneous/stereographics/stereorender/
+        """
         ParallelAnalysisInterface.__init__(self)
         self.original_camera = original_camera
-        self.relative_separation = relative_separation
+        oc = self.original_camera
+        if self.auto_focus:
+            dist = lambda x,y: na.sqrt(na.sum((x-y)**2.0))
+            if self.focal_length is None:
+                self.focal_length = dist(oc.normal_vector,0.0)
+            self.focal_far  = oc.center + frac_far_plane*oc.normal_vector
+            self.focal_near = oc.center + frac_near_plane*oc.normal_vector
+            self.wh_ratio = oc.resolution[0]/oc.resolution[1]
+            self.eye_sep  = self.focal_length*frac_eye_separation
+            self.aperture = aperture
+            self.frac_eye_separation = frac_eye_separation
+            self.center_eye_pos = oc.center + oc.normal_vector
+        else:
+            #default to old separation
+            self.relative_separation = relative_separation
+    
+    def finalize_image(self,image):
+        if self.auto_focus:
+            #we have extra frustum pixels on the left and right
+            #cameras
+            left_trim,right_trim = self.trim[0],self.trim[1]
+            left = abs(left_trim)
+            right = image.shapae[0]-abs(right_trim)
+            image = image[left:right,:]
+            return image
+
+	def auto_split(self):
+		"""We must calculate the new camera centers, as well
+        as the extended frustum pixels."""
+        oc = self.original_camera
+        nv = oc.orienter.normal_vector
+        up = oc.north_vector
+        c = oc.center
+        px = resolution[0] #pixel width
+        norm = lambda x: na.sqrt(na.dot(x,x.conj()))
+        between_eyes = na.cross(nv,up)
+        between_eyes /= norm(between_eyes)
+        between_eyes *= eye_sep/2.0
+        le_norm = nv-between_eyes 
+        le_c= c-between_eyes 
+        re_norm = nv+between_eyes 
+        re_c = c+between_eyes 
+        angular_aperture = na.tan(self.aperture/360.0*2.0*na.pi/2.0)
+        delta = na.rint(px*self.frac_eye_separation/(2.0*(angular_aperture)))
+        delta = delta.astype('int')
+        eresolution = resolution[0]+delta
+        left_camera = Camera(le_c, le_norm, oc.width,
+                     eresolution, oc.transfer_function, north_vector=up,
+                     volume=oc.volume, fields=oc.fields, 
+                     log_fields=oc.log_fields,
+                     sub_samples=oc.sub_samples, pf=oc.pf)
+        left_camera.trim = [-delta,0]
+        right_camera = Camera(re_c, re_norm, oc.width,
+                     eresolution, oc.transfer_function, north_vector=up,
+                     volume=oc.volume, fields=oc.fields, 
+                     log_fields=oc.log_fields,
+                     sub_samples=oc.sub_samples, pf=oc.pf)
+        right_camera.trim = [0,-delta]
+        return (left_camera, right_camera)
 
     def split(self):
+        if self.auto_focus:
+            return self.auto_split()
+        else:
+            return self.default_split()
+    
+    def default_split(self):
         oc = self.original_camera
         uv = oc.orienter.unit_vectors
         c = oc.center
@@ -981,6 +1056,10 @@
                              sub_samples=oc.sub_samples, pf=oc.pf)
         return (left_camera, right_camera)
 
+
+
+        
+
 class FisheyeCamera(Camera):
     def __init__(self, center, radius, fov, resolution,
                  transfer_function = None, fields = None,



https://bitbucket.org/yt_analysis/yt/changeset/45c4b5dd02c2/
changeset:   45c4b5dd02c2
branch:      yt
user:        Christopher Moody
date:        2012-11-26 21:03:45
summary:     updating definitions
affected #:  1 file

diff -r b25bf1498a9af53e11e77415fbdb86b3c6102378 -r 45c4b5dd02c28d56f234a042e03c009620bb3aab yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -1,7 +1,7 @@
 """
 Definitions specific to ART
 
-Author: Christopher E. Moody <cemoody at ucsc.ed>
+Author: Christopher E. Moody <cemoody at ucsc.edu>
 Affiliation: UC Santa Cruz
 Homepage: http://yt-project.org/
 License:
@@ -25,30 +25,118 @@
 
 """
 
-art_particle_field_names = [
-'particle_index',
-'particle_mass',
-'particle_mass_initial',
-'particle_age',
-'particle_metallicity',
-'particle_position_x',
-'particle_position_y',
-'particle_position_z',
-'particle_velocity_x',
-'particle_velocity_y',
-'particle_velocity_z',
-'particle_type',
-'star_position_x',
-'star_position_y',
-'star_position_z',
-'star_velocity_x',
-'star_velocity_y',
-'star_velocity_z',
-'star_age',
-'star_mass',
-'star_mass_initial',
-'star_creation_time',
-'star_metallicity1',
-'star_metallicity2',
-'star_metallicity',
+fluid_fields= [ 
+    'Density',
+    'TotalEnergy',
+    'XMomentumDensity',
+    'YMomentumDensity',
+    'ZMomentumDensity',
+    'Pressure',
+    'Gamma',
+    'GasEnergy',
+    'MetalDensitySNII',
+    'MetalDensitySNIa',
+    'PotentialNew',
+    'PotentialOld'
 ]
+
+particle_fields= [
+    'particle_age',
+    'particle_index',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+    'particle_position_x',
+    'particle_position_y',
+    'particle_position_z',
+    'particle_velocity_x',
+    'particle_velocity_y',
+    'particle_velocity_z',
+    'particle_type'
+]
+
+filename_pattern = {				
+	'amr':'10MpcBox_csf512_%s.d',
+	'particle_header':'PMcrd%s.DAT',
+	'particle_data':'PMcrs0%s.DAT',
+	'particle_stars':'stars_%s.dat'
+}
+
+amr_header_struct = [
+    ('>i','pad byte'),
+    ('>256s','jname'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','istep'),
+    ('>d','t'),
+    ('>d','dt'),
+    ('>f','aexpn'),
+    ('>f','ainit'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','boxh'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','Omb0'),
+    ('>f','hubble'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','nextras'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','extra1'),
+    ('>f','extra2'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>256s','lextra'),
+    ('>256s','lextra'),
+    ('>i','pad byte'),
+    ('>i', 'pad byte'),
+    ('>i', 'min_level'),
+    ('>i', 'max_level'),
+    ('>i', 'pad byte'),
+]
+
+particle_header_struct =[
+    ('>i','pad'),
+    ('45s','header'), 
+    ('>f','aexpn'),
+    ('>f','aexp0'),
+    ('>f','amplt'),
+    ('>f','astep'),
+    ('>i','istep'),
+    ('>f','partw'),
+    ('>f','tintg'),
+    ('>f','Ekin'),
+    ('>f','Ekin1'),
+    ('>f','Ekin2'),
+    ('>f','au0'),
+    ('>f','aeu0'),
+    ('>i','Nrow'),
+    ('>i','Ngridc'),
+    ('>i','Nspecies'),
+    ('>i','Nseed'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','hubble'),
+    ('>f','Wp5'),
+    ('>f','Ocurv'),
+    ('>f','Omb0'),
+    ('>%ds'%(396),'extras'),
+    ('>f','unknown'),
+    ('>i','pad')
+]
+
+constants = {
+    "Y_p":0.245,
+    "gamma":5./3.,
+    "T_CMB0":2.726,
+    "T_min":300.,
+    "ng":128,
+    "wmu":4.0/(8.0-5.0*0.245)
+}
+
+seek_extras = 137



https://bitbucket.org/yt_analysis/yt/changeset/47dca7608ac6/
changeset:   47dca7608ac6
branch:      yt
user:        Christopher Moody
date:        2012-11-26 21:04:12
summary:     cleaned up the datastructures
affected #:  1 file

diff -r 45c4b5dd02c28d56f234a042e03c009620bb3aab -r 47dca7608ac6e35030d3a04977aff73fb08db370 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -3,7 +3,7 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
-Author: Christopher Erick Moody <cemoody at ucsc.edu>
+Author: Christopher Moody <cemoody at ucsc.edu>
 Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
@@ -20,65 +20,233 @@
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-
+.
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
 import numpy as np
+import os.path
+import glob
 import stat
 import weakref
-import cPickle
-import os
-import struct
+import cStringIO
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
       AMRGridPatch
-from yt.data_objects.hierarchy import \
-      AMRHierarchy
+from yt.geometry.oct_geometry_handler import \
+    OctreeGeometryHandler
+from yt.geometry.geometry_handler import \
+    GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
-      StaticOutput
+    StaticOutput
+
+from .definitions import *
+from io import _read_struct
+from io import _read_art_level_info
+from io import _read_record
+from io import _read_frecord
+from io import _skip_record
+from io import _count_art_octs
+from io import b2t
+from io import load_level
+
+from .fields import ARTFieldInfo, KnownARTFields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
-from .fields import \
-    ARTFieldInfo, add_art_field, KnownARTFields
-from yt.utilities.definitions import \
-    mpc_conversion
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.lib import \
-    get_box_grids_level
-import yt.utilities.lib as amr_utils
+from yt.utilities.physical_constants import \
+    mass_hydrogen_cgs, sec_per_Gyr
 
-import yt.frontends.ramses._ramses_reader as _ramses_reader #do not fail silently;
+class ARTStaticOutput(StaticOutput):
+    _hierarchy_class = ARTHierarchy
+    _fieldinfo_fallback = ARTFieldInfo
+    _fieldinfo_known = KnownARTFields
+    
+    def __init__(self, file_amr, storage_filename = None,
+            skip_particles=False,skip_stars=False,data_style='art'):
+        self.data_style = data_style
+        self._find_files(file_amr)
+        self.skip_particles = skip_particles
+        self.skip_stars = skip_stars
+        self.file_amr = file_amr
+        self.parameter_filename = file_amr
+        self.domain_left_edge  = np.zeros(3,dtype='float64')
+        self.domain_right_edge = np.ones(3,dtype='float64') 
+        StaticOutput.__init__(self, file_amr, data_style)
+        self.storage_filename = storage_filename
 
-from yt.utilities.physical_constants import \
-    mass_hydrogen_cgs
+    def _find_files(self,file_amr):
+        """
+        Given the AMR base filename, attempt to find the
+        particle header, star files, etc.
+        """
+        prefix,suffix = filename_pattern['amr'].split('%s')
+        affix = os.path.basename(file_amr).replace(prefix,'')
+        affix = affix.replace(suffix,'')
+        affix = affix.replace('_','')
+        affix = affix[1:-1]
+        dirname = os.path.dirname(file_amr)
+        for filetype, pattern in filename_pattern.items():
+            #sometimes the affix is surrounded by an extraneous _
+            #so check for an extra character on either side
+            check_filename = dirname+'/'+pattern%('?%s?'%affix)
+            filenames = glob.glob(check_filename)
+            if len(filenames)==1:
+                setattr(self,"file_"+filetype,filenames[0])
+                mylog.info('discovered %s',filetype)
+            elif len(filenames)>1:
+                setattr(self,"file_"+filetype,None)
+                mylog.info("Ambiguous number of files found for %s",
+                        check_filename)
+            else:
+                setattr(self,"file_"+filetype,None)
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+        
+    def _set_units(self):
+        """
+        Generates the conversion to various physical units based 
+		on the parameters from the header
+        """
+        self.units = {}
+        self.time_units = {}
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0
+        self._parse_parameter_file()
+
+        #spatial units
+        z   = self.current_redshift
+        h   = self.hubble_constant
+        boxcm_cal = self.parameters["boxh"]
+        boxcm_uncal = boxcm_cal / h
+        box_proper = boxcm_uncal/(1+z)
+        aexpn = self["aexpn"]
+        for unit in mpc_conversion:
+            self.units[unit] = mpc_conversion[unit] * box_proper
+            self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
+            self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
+            self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
+
+        #all other units
+        wmu = self.parameters["wmu"]
+        Om0 = self.parameters['Om0']
+        ng  = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh   = self.parameters['boxh'] 
+        aexpn  = self.parameters["aexpn"]
+        hubble = self.parameters['hubble']
+
+        r0 = boxh/ng
+        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        S_0 = 52.077 * wmu**(5.0/3.0)
+        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        t0 = r0/v0
+        rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
+        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
+
+        #factors to multiply the native code units to CGS
+        cf = defaultdict(lambda: 1.0)
+        cf['Pressure'] = P0 #already cgs
+        cf['Velocity'] = v0*1e3 #km/s -> cm/s
+        cf["Mass"] = aM0 * 1.98892e33
+        cf["Density"] = rho0*(aexpn**-3.0)
+        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
+        cf["Potential"] = 1.0
+        cf["Entropy"] = S_0
+        cf["Temperature"] = tr
+        self.cosmological_simulation = True
+        self.conversion_factors = cf
+        
+        for ax in 'xyz':
+            self.conversion_factors["%s-velocity" % ax] = v0/aexpn
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+        for particle_field in particle_fields:
+            self.pf.conversion_factors[particle_field] =  1.0
+        self.pf.conversion_factors['particle_creation_time'] =  31556926.0
+        self.pf.conversion_factors['Msun'] = 5.027e-34 
+
+    def _parse_parameter_file(self):
+        """
+        Get the various simulation parameters & constants.
+        """
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.cosmological_simulation = True
+        self.parameters = {}
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        header_vals = {}
+        self.parameters.update(constants)
+        with open(self.file_amr,'rb') as f:
+            amr_header_vals = _read_struct(f,amr_header_struct)
+            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
+                _skip_record(f)
+            (self.ncell,) = struct.unpack('>l', _read_record(f))
+            # Try to figure out the root grid dimensions
+            est = int(np.rint(self.ncell**(1.0/3.0)))
+            # Note here: this is the number of *cells* on the root grid.
+            # This is not the same as the number of Octs.
+            self.domain_dimensions = np.ones(3, dtype='int64')*est 
+            self.root_grid_mask_offset = f.tell()
+            root_cells = self.domain_dimensions.prod()
+            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
+                 order='F')
+            self.root_grid_offset = f.tell()
+            _skip_record(f) # hvar
+            _skip_record(f) # var
+            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.child_grid_offset = f.tell()
+        self.parameters.update(amr_header_vals)
+        if self.file_particle_header is None:
+            with open(self.file_particle_header,"rb") as fh:
+                particle_header_vals = _read_struct(fh,particle_header_struct)
+                fh.seek(seek_extras)
+                n = particle_header_vals['Nspecies']
+                wspecies = np.fromfile(fh,dtype='>f',count=10)
+                lspecies = np.fromfile(fh,dtype='>i',count=10)
+            self.parameters['wspecies'] = wspecies[:n]
+            self.parameters['lspecies'] = lspecies[:n]
+            ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
+            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
+                ls_nonzero)
+            self.parameters.update(particle_header_vals)
     
-from yt.frontends.art.definitions import art_particle_field_names
+        #setup standard simulation yt expects to see
+        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
+        self.omega_lambda = amr_header_vals['Oml0']
+        self.omega_matter = amr_header_vals['Om0']
+        self.hubble_constant = amr_header_vals['hubble']
+        self.min_level = amr_header_vals['min_level']
+        self.max_level = amr_header_vals['max_level']
+        self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
+        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
 
-from yt.frontends.art.io import _read_child_mask_level
-from yt.frontends.art.io import read_particles
-from yt.frontends.art.io import read_stars
-from yt.frontends.art.io import spread_ages
-from yt.frontends.art.io import _count_art_octs
-from yt.frontends.art.io import _read_art_level_info
-from yt.frontends.art.io import _read_art_child
-from yt.frontends.art.io import _skip_record
-from yt.frontends.art.io import _read_record
-from yt.frontends.art.io import _read_frecord
-from yt.frontends.art.io import _read_record_size
-from yt.frontends.art.io import _read_struct
-from yt.frontends.art.io import b2t
-
-def num_deep_inc(f):
-    def wrap(self, *args, **kwargs):
-        self.num_deep += 1
-        rv = f(self, *args, **kwargs)
-        self.num_deep -= 1
-        return rv
-    return wrap
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        """
+        Defined for the NMSU file naming scheme.
+        This could differ for other formats.
+        """
+        fn = ("%s" % (os.path.basename(args[0])))
+        f = ("%s" % args[0])
+        if fn.endswith(".d") and fn.startswith('10Mpc') and\
+                os.path.exists(f): 
+                return True
+        return False
 
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
@@ -98,34 +266,10 @@
         self.RightEdge = re
         self.ActiveDimensions = gd
         self.NumberOfParticles=nop
-        self.particle_type = np.array([])
-        self.particle_id= np.array([])
-        self.particle_age= np.array([])
-        self.particle_position_x = np.array([])
-        self.particle_position_y = np.array([])
-        self.particle_position_z = np.array([])
-        self.particle_velocity_x = np.array([])
-        self.particle_velocity_y = np.array([])
-        self.particle_velocity_z = np.array([])
-        self.particle_mass= np.array([])
-        self.star_position_x = np.array([])
-        self.star_position_y = np.array([])
-        self.star_position_z = np.array([])
-        self.star_velocity_x = np.array([])
-        self.star_velocity_y = np.array([])
-        self.star_velocity_z = np.array([])
-        self.star_age = np.array([])
-        self.star_metallicity1 = np.array([])
-        self.star_metallicity2 = np.array([])
-        self.star_mass_initial = np.array([])
-        self.star_mass = np.array([])
-
-        #if child_mask is not None:
-        #    self._set_child_mask(child_mask)
+        for particle_field in particle_fields:
+            setattr(self,particle_field) = np.array([])
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
             self.dds = self.Parent[0].dds / self.pf.refine_by
@@ -158,7 +302,6 @@
         return "ARTGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 class ARTHierarchy(AMRHierarchy):
-
     grid = ARTGrid
     _handle = None
     
@@ -170,282 +313,165 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
-        if 'particle_position' in dir(self.pf):
+        if not self.pf.skip_particles:
             self._setup_particle_grids()
+        self._setup_particle_grids()
         self._setup_field_list()
         
     def _setup_particle_grids(self):
-        grid_particle_count = np.zeros(len(self.grids),dtype='int64')
-        npt = self.pf.particle_position.shape[0]
-        if self.pf.do_grid_particles:
-            nps = self.pf.star_position.shape[0]
-            grid_indices = np.zeros(nps,dtype='int64')
-            particle_id= np.arange(nps,dtype='int64')
-            pbar = get_pbar("Gridding Particles",len(self.grids))
-            grid_indices,grid_particle_count,grids_done = \
-                    particle_assignment(self.grids,
-                      self.grids[0], 
-                      self.pf.star_position,
-                      particle_id,
-                      grid_indices,
-                      grid_particle_count, 
-                      self.pf.domain_dimensions,
-                      self.pf.max_level,
-                      logger=pbar)
-            pbar.finish()        
-            pbar = get_pbar("Finalizing grids ",len(self.grids))
-            for gi, (g,npi) in enumerate(zip(self.grids,grid_particle_count)): 
-                star_mask= grid_indices==gi
-                if gi==0:
-                    #attach all the particles to the root grid
-                    g.particle_type = self.pf.particle_type
-                    g.particle_id = np.arange(npt)
-                    g.particle_mass = self.pf.particle_mass
-                    g.particle_mass_initial = self.pf.particle_mass_initial
-                    g.particle_age = self.pf.particle_age
-                    g.particle_metallicity= self.pf.particle_metallicity
-                    g.particle_position_x= self.pf.particle_position[:,0]
-                    g.particle_position_y= self.pf.particle_position[:,1]
-                    g.particle_position_z= self.pf.particle_position[:,2]
-                    g.particle_velocity_x= self.pf.particle_velocity[:,0]
-                    g.particle_velocity_y= self.pf.particle_velocity[:,1]
-                    g.particle_velocity_z= self.pf.particle_velocity[:,2]
-                if star_mask.sum()>0:
-                    star_data = self.pf.star_data[star_mask]         
-                    (g.star_position_x, \
-                        g.star_position_y, \
-                        g.star_position_z, \
-                        g.star_velocity_x,\
-                        g.star_velocity_y,\
-                        g.star_velocity_z,\
-                        g.star_age,\
-                        g.star_metallicity1,\
-                        g.star_metallicity2,\
-                        g.star_mass_initial,\
-                        g.star_mass) = tuple(star_data.T)
-                    g.NumberOfParticles = npi        
-                self.grids[gi] = g
-                pbar.update(gi)
-            pbar.finish()
-        else:        
-            pbar = get_pbar("Finalizing grids ",len(self.grids))
-            for gi, g in enumerate(self.grids): 
-                if gi==0:
-                    #attach all the particles to the root grid
-                    g.particle_type = self.pf.particle_type
-                    g.particle_id = np.arange(npt)
-                    g.particle_mass = self.pf.particle_mass
-                    g.particle_mass_initial = self.pf.particle_mass_initial
-                    g.particle_age = self.pf.particle_age
-                    g.particle_metallicity= self.pf.particle_metallicity
-                    g.particle_position_x= self.pf.particle_position[:,0]
-                    g.particle_position_y= self.pf.particle_position[:,1]
-                    g.particle_position_z= self.pf.particle_position[:,2]
-                    g.particle_velocity_x= self.pf.particle_velocity[:,0]
-                    g.particle_velocity_y= self.pf.particle_velocity[:,1]
-                    g.particle_velocity_z= self.pf.particle_velocity[:,2]
-                    if self.pf.do_stars:
-                        (g.star_position_x, \
-                            g.star_position_y, \
-                            g.star_position_z, \
-                            g.star_velocity_x,\
-                            g.star_velocity_y,\
-                            g.star_velocity_z,\
-                            g.star_age,\
-                            g.star_metallicity1,\
-                            g.star_metallicity2,\
-                            g.star_mass_initial,\
-                            g.star_mass) = tuple(self.pf.star_data.T)
-                    g.NumberOfParticles = npt        
-                else:
-                    g.star_indices = []
-                self.grids[gi] = g
-            pbar.finish()
-            grid_particle_count[0]=npt
-        self.grid_particle_count = grid_particle_count
-
+        raise NotImplementedError
+    
     def _initialize_data_storage(self):
         pass
-
+    
     def _detect_fields(self):
-        # This will need to be generalized to be used elsewhere.
-        self.field_list = [ 'Density','TotalEnergy',
-             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-             'Pressure','Gamma','GasEnergy',
-             'MetalDensitySNII', 'MetalDensitySNIa',
-             'PotentialNew','PotentialOld']
-        self.field_list += art_particle_field_names
-
+        self.field_list = []
+        self.field_list += fluid_fields
+        self.field_list += particle_fields
+        
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
-
+            
     def _count_grids(self):
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
-        
         min_eff = 0.30
-        
         vol_max = 128**3
-        
-        f = open(self.pf.parameter_filename,'rb')
-        
-        
-        (self.pf.nhydro_vars, self.pf.level_info,
-        self.pf.level_oct_offsets, 
-        self.pf.level_child_offsets) = \
-                         _count_art_octs(f, 
-                          self.pf.child_grid_offset,
-                          self.pf.min_level, self.pf.max_level)
-        self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = np.array(self.pf.level_info)        
-        self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
-        self.pf.level_offsets[0] = self.pf.root_grid_offset
-        
-        self.pf.level_art_child_masks = {}
-        cm = self.pf.root_iOctCh>0
-        cm_shape = (1,)+cm.shape 
-        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
-        del cm
-        
-        root_psg = _ramses_reader.ProtoSubgrid(
-                        np.zeros(3, dtype='int64'), # left index of PSG
-                        self.pf.domain_dimensions, # dim of PSG
-                        np.zeros((1,3), dtype='int64'), # left edges of grids
-                        np.zeros((1,6), dtype='int64') # empty
-                        )
-        
-        self.proto_grids = [[root_psg],]
-        for level in xrange(1, len(self.pf.level_info)):
-            if self.pf.level_info[level] == 0:
-                self.proto_grids.append([])
-                continue
-            psgs = []
-            effs,sizes = [], []
-
-            if level > self.pf.limit_level : continue
-            
-            #refers to the left index for the art octgrid
-            left_index, fl, nocts,root_level = _read_art_level_info(f, 
-                    self.pf.level_oct_offsets,level,
-                    coarse_grid=self.pf.domain_dimensions[0])
-            if level>1:
-                assert root_level == last_root_level
-            last_root_level = root_level
-                    
-            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-            
-            #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, 
-                    self.pf.level_child_offsets,
-                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
-            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
-            self.pf.level_art_child_masks[level]=art_child_mask
-            #child_mask is zero where child grids exist and
-            #thus where higher resolution data is available
-            
-            
-            #compute the hilbert indices up to a certain level
-            #the indices will associate an oct grid to the nearest
-            #hilbert index?
-            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
-                              np.log10(2))
-            hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level, left_index)
-            #print base_level, hilbert_indices.max(),
-            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-            #print hilbert_indices.max()
-            
-            # Strictly speaking, we don't care about the index of any
-            # individual oct at this point.  So we can then split them up.
-            unique_indices = np.unique(hilbert_indices)
-            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
-                        level, unique_indices.size, hilbert_indices.size)
-            
-            #use the hilbert indices to order oct grids so that consecutive
-            #items on a list are spatially near each other
-            #this is useful because we will define grid patches over these
-            #octs, which are more efficient if the octs are spatially close
-            
-            #split into list of lists, with domains containing 
-            #lists of sub octgrid left indices and an index
-            #referring to the domain on which they live
-            pbar = get_pbar("Calc Hilbert Indices ",1)
-            locs, lefts = _ramses_reader.get_array_indices_lists(
-                        hilbert_indices, unique_indices, left_index, fl)
-            pbar.finish()
-            
-            #iterate over the domains    
-            step=0
-            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
-            psg_eff = []
-            for ddleft_index, ddfl in zip(lefts, locs):
-                #iterate over just the unique octs
-                #why would we ever have non-unique octs?
-                #perhaps the hilbert ordering may visit the same
-                #oct multiple times - review only unique octs 
-                #for idomain in np.unique(ddfl[:,1]):
-                #dom_ind = ddfl[:,1] == idomain
-                #dleft_index = ddleft_index[dom_ind,:]
-                #dfl = ddfl[dom_ind,:]
+        with open(self.pf.parameter_filename,'rb') as f:
+            (self.pf.nhydro_vars, self.pf.level_info,
+            self.pf.level_oct_offsets, 
+            self.pf.level_child_offsets) = \
+                             _count_art_octs(f, 
+                              self.pf.child_grid_offset,
+                              self.pf.min_level, self.pf.max_level)
+            self.pf.level_info[0]=self.pf.ncell
+            self.pf.level_info = np.array(self.pf.level_info)
+            self.pf.level_offsets = self.pf.level_child_offsets
+            self.pf.level_offsets = np.array(self.pf.level_offsets, 
+                                             dtype='int64')
+            self.pf.level_offsets[0] = self.pf.root_grid_offset
+            self.pf.level_art_child_masks = {}
+            cm = self.pf.root_iOctCh>0
+            cm_shape = (1,)+cm.shape 
+            self.pf.level_art_child_masks[0] = \
+                    cm.reshape(cm_shape).astype('uint8')        
+            del cm
+            root_psg = _ramses_reader.ProtoSubgrid(
+                            np.zeros(3, dtype='int64'), # left index of PSG
+                            self.pf.domain_dimensions, # dim of PSG
+                            np.zeros((1,3), dtype='int64'),# left edges of grids
+                            np.zeros((1,6), dtype='int64') # empty
+                            )
+            self.proto_grids = [[root_psg],]
+            for level in xrange(1, len(self.pf.level_info)):
+                if self.pf.level_info[level] == 0:
+                    self.proto_grids.append([])
+                    continue
+                psgs = []
+                effs,sizes = [], []
+                if level > self.pf.limit_level : continue
+                #refers to the left index for the art octgrid
+                left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                        self.pf.level_oct_offsets,level,
+                        coarse_grid=self.pf.domain_dimensions[0])
+                if level>1:
+                    assert root_level == last_root_level
+                last_root_level = root_level
+                #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+                #read in the child masks for this level and save them
+                idc, art_child_mask = _read_child_mask_level(f, 
+                        self.pf.level_child_offsets,
+                    level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+                art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+                self.pf.level_art_child_masks[level]=art_child_mask
+                #child_mask is zero where child grids exist and
+                #thus where higher resolution data is available
+                #compute the hilbert indices up to a certain level
+                #the indices will associate an oct grid to the nearest
+                #hilbert index?
+                base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                                  np.log10(2))
+                hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                        level + base_level, left_index)
+                #print base_level, hilbert_indices.max(),
+                hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+                #print hilbert_indices.max()
+                # Strictly speaking, we don't care about the index of any
+                # individual oct at this point.  So we can then split them up.
+                unique_indices = np.unique(hilbert_indices)
+                mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                            level, unique_indices.size, hilbert_indices.size)
+                #use the hilbert indices to order oct grids so that consecutive
+                #items on a list are spatially near each other
+                #this is useful because we will define grid patches over these
+                #octs, which are more efficient if the octs are spatially close
+                #split into list of lists, with domains containing 
+                #lists of sub octgrid left indices and an index
+                #referring to the domain on which they live
+                pbar = get_pbar("Calc Hilbert Indices ",1)
+                locs, lefts = _ramses_reader.get_array_indices_lists(
+                            hilbert_indices, unique_indices, left_index, fl)
+                pbar.finish()
+                #iterate over the domains    
+                step=0
+                pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+                psg_eff = []
+                for ddleft_index, ddfl in zip(lefts, locs):
+                    #iterate over just the unique octs
+                    #why would we ever have non-unique octs?
+                    #perhaps the hilbert ordering may visit the same
+                    #oct multiple times - review only unique octs 
+                    #for idomain in np.unique(ddfl[:,1]):
+                    #dom_ind = ddfl[:,1] == idomain
+                    #dleft_index = ddleft_index[dom_ind,:]
+                    #dfl = ddfl[dom_ind,:]
+                    dleft_index = ddleft_index
+                    dfl = ddfl
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()
+                    idims +=2
+                    #this creates a grid patch that doesn't cover the whole leve
+                    #necessarily, but with other patches covers all the regions
+                    #with octs. This object automatically shrinks its size
+                    #to barely encompass the octs inside of it.
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    #because grid patches maybe mostly empty, and with octs
+                    #that only partially fill the grid, it may be more efficient
+                    #to split large patches into smaller patches. We split
+                    #if less than 10% the volume of a patch is covered with octs
+                    if idims.prod() > vol_max or psg.efficiency < min_eff:
+                        psg_split = _ramses_reader.recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dfl,min_eff=min_eff,use_center=True,
+                            split_on_vol=vol_max)
+                        psgs.extend(psg_split)
+                        psg_eff += [x.efficiency for x in psg_split] 
+                    else:
+                        psgs.append(psg)
+                        psg_eff =  [psg.efficiency,]
+                    tol = 1.00001
+                    step+=1
+                    pbar.update(step)
+                eff_mean = np.mean(psg_eff)
+                eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
+                eff_nall = len(psg_eff)
+                mylog.info("Average subgrid efficiency %02.1f %%",
+                            eff_mean*100.0)
+                mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                            eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
                 
-                dleft_index = ddleft_index
-                dfl = ddfl
-                initial_left = np.min(dleft_index, axis=0)
-                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
-                #this creates a grid patch that doesn't cover the whole level
-                #necessarily, but with other patches covers all the regions
-                #with octs. This object automatically shrinks its size
-                #to barely encompass the octs inside of it.
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                
-                #because grid patches may still be mostly empty, and with octs
-                #that only partially fill the grid,it  may be more efficient
-                #to split large patches into smaller patches. We split
-                #if less than 10% the volume of a patch is covered with octs
-                if idims.prod() > vol_max or psg.efficiency < min_eff:
-                    psg_split = _ramses_reader.recursive_patch_splitting(
-                        psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=vol_max)
-                    
-                    psgs.extend(psg_split)
-                    psg_eff += [x.efficiency for x in psg_split] 
-                else:
-                    psgs.append(psg)
-                    psg_eff =  [psg.efficiency,]
-                
-                tol = 1.00001
-                
-                
-                step+=1
-                pbar.update(step)
-            eff_mean = np.mean(psg_eff)
-            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
-            eff_nall = len(psg_eff)
-            mylog.info("Average subgrid efficiency %02.1f %%",
-                        eff_mean*100.0)
-            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
-                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            
-        
-            mylog.info("Done with level % 2i; max LE %i", level,np.max(left_index))
+            mylog.info("Done with level % 2i; max LE %i", level,
+                       np.max(left_index))
             pbar.finish()
             self.proto_grids.append(psgs)
             #print sum(len(psg.grid_file_locations) for psg in psgs)
             #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
-                    
-            
-            
-
-    num_deep = 0
-
         
     def _parse_hierarchy(self):
         grids = []
@@ -477,159 +503,55 @@
                     start_index,le,re,gd))
                 gi += 1
         self.grids = np.empty(len(grids), dtype='object')
-        
-
-        if self.pf.file_particle_data:
+        if not self.pf.skip_particles and self.pf.file_particle_data:
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
-            Nrow     = self.pf.parameters['Nrow']
-            nstars = np.diff(lspecies)[-1]
-            a = self.pf.parameters['aexpn']
-            hubble = self.pf.parameters['hubble']
-            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
-            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
-            um  = self.pf.parameters['aM0'] #mass units in solar masses
-            um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles   ",5)
             self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,Nrow)
-            pbar.update(1)
-            npa,npb=0,0
-            npb = lspecies[-1]
-            clspecies = np.concatenate(([0,],lspecies))
-            if self.pf.only_particle_type is not None:
-                npb = lspecies[0]
-                if type(self.pf.only_particle_type)==type(5):
-                    npa = clspecies[self.pf.only_particle_type]
-                    npb = clspecies[self.pf.only_particle_type+1]
-            nparticles = npb-npa
-            npt = nparticles
-            #make sure we aren't going to throw out good particles
-            if not np.all(self.pf.particle_position[npb:]==0.0):
-                print 'WARNING: unused particles discovered from lspecies'
-            self.pf.particle_position   = self.pf.particle_position[npa:npb]
-            #do NOT correct by an offset of 1.0
-            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
-            pbar.update(2)
+                read_particles(self.pf.file_particle_data,
+                        self.pf.parameters['Nrow'])
+            self.pf.particle_position   = self.pf.particle_position
             self.pf.particle_position  /= self.pf.domain_dimensions 
-            #to unitary units (comoving)
-            pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+            self.pf.particle_velocity   = self.pf.particle_velocity
             self.pf.particle_velocity  *= uv #to proper cm/s
-            pbar.update(4)
-            self.pf.particle_type         = np.zeros(nparticles,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(nparticles,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(nparticles,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(nparticles,dtype='float64')-1
-            self.pf.particle_metallicity  = np.zeros(nparticles,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(nparticles,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(nparticles,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(nparticles,dtype='float64')-1
-
-            dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0
-            self.pf.conversion_factors['particle_species'] = 1.0
-            for ax in 'xyz':
-                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                #already in unitary units
-                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
-            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity']=1.0
-            self.pf.conversion_factors['particle_metallicity1']=1.0
-            self.pf.conversion_factors['particle_metallicity2']=1.0
-            self.pf.conversion_factors['particle_index']=1.0
-            self.pf.conversion_factors['particle_type']=1
-            self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 
-            #conversion to solar mass units
-            
-
-            a,b=0,0
             self.pf.particle_star_index = len(wspecies)-1
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if type(self.pf.only_particle_type)==type(5):
-                    if not i==self.pf.only_particle_type:
-                        continue
-                    self.pf.particle_type += i
-                    self.pf.particle_mass += m*um
-
-                else:
-                    self.pf.particle_type[a:b] = i #particle type
-                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
-                    if m==0.0:
-                        self.pf.particle_star_index = i
+                if i == self.particle_star_index:
+                    sa,sb = a,b
+                self.pf.particle_type[a:b] = i #particle type
+                self.pf.particle_mass[a:b] = m*um #mass in solar masses
                 a=b
-            pbar.finish()
-
-            lparticles = [0,]+list(lspecies)
-            for j,npi in enumerate(lparticles):
-                mylog.debug('found %i of particle type %i'%(j,npi))
-            
-            
-            do_stars = (self.pf.only_particle_type is None) or \
-                       (self.pf.only_particle_type == -1) or \
-                       (self.pf.only_particle_type == len(lspecies))
-            self.pf.do_stars = do_stars 
-            if self.pf.file_star_data and do_stars: 
+            if not self.pf.skip_stars and self.pf.file_particle_stars: 
                 nstars_pa = nstars
                 (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
                         ws_old,ws_oldi,tdum,adum \
                      = read_stars(self.pf.file_star_data)
                 self.pf.nstars_rs = nstars_rs     
                 self.pf.nstars_pa = nstars_pa
-                if not nstars_rs==np.sum(self.pf.particle_type==self.pf.particle_star_index):
-                    print 'WARNING!: nstars is inconsistent!'
+                inconsisten = self.pf.particle_type==self.pf.particle_star_index
+                if not nstars_rs==np.sum(inconsistent):
+                    mylog.info('WARNING!: nstars is inconsistent!')
                 if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
-                    pbar = get_pbar("Stellar Ages        ",n)
-                    birthtimes= \
-                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
+                    birthtimes= b2t(tbirth,n=n)
+                    birthtimes = birthtimes.astype('float64')
                     assert birthtimes.shape == tbirth.shape    
                     birthtimes*= 1.0e9 #from Gyr to yr
                     birthtimes*= 365*24*3600 #to seconds
                     ages = self.pf.current_time-birthtimes
                     spread = self.pf.spread
-                    if spread == False:
-                        pass
-                    elif type(spread)==type(5.5):
+                    if type(spread)==type(5.5):
                         ages = spread_ages(ages,spread=spread)
-                    else:
+                    elif spread:
                         ages = spread_ages(ages)
-                    idx = self.pf.particle_type == self.pf.particle_star_index    
+                    idx = self.pf.particle_type == self.pf.particle_star_index
                     assert np.sum(idx)==nstars_pa
-                    self.pf.star_position = self.pf.particle_position[idx]
-                    self.pf.star_velocity = self.pf.particle_velocity[idx]
-                    self.pf.particle_age[idx] = ages
-                    self.pf.star_age = ages
-                    pbar.finish()
-                    self.pf.particle_metallicity[idx] = metallicity1+metallicity2
-                    self.pf.particle_metallicity1[idx] = metallicity1
-                    self.pf.particle_metallicity2[idx] = metallicity2
-                    self.pf.particle_mass[idx] = mass*um
-                    self.pf.particle_mass_initial[idx] = mass*um
-                    self.pf.star_metallicity1 = metallicity1
-                    self.pf.star_metallicity2 = metallicity2
-                    self.pf.star_mass_initial = imass*um
-                    self.pf.star_mass = mass*um
-                    self.pf.star_data = np.array([
-                        self.pf.star_position[:,0],
-                        self.pf.star_position[:,1],
-                        self.pf.star_position[:,2],
-                        self.pf.star_velocity[:,0],
-                        self.pf.star_velocity[:,1],
-                        self.pf.star_velocity[:,2],
-                        self.pf.star_age,
-                        self.pf.star_metallicity1,
-                        self.pf.star_metallicity2,
-                        self.pf.star_mass_initial,
-                        self.pf.star_mass]).T
-
-            done = 0
-            init = self.pf.particle_position.shape[0]
-            pos = self.pf.particle_position
-            #particle indices travel with the particle positions
-            #pos = np.vstack((na.arange(pos.shape[0]),pos.T)).T 
+                    pf.particle_age[sa:sb] = ages
+                    pf.particle_mass[sa:sb] = mass
+                    pf.particle_mass_initial[sa:sb] = imass
+                    pf.particle_creation_time[sa:sb] = birthtimes
+                    pf.particle_metallicity1[sa:sb] = metallicity1
+                    pf.particle_metallicity2[sa:sb] = metallicity2
+                    pf.particle_metallicity[sa:sb] = metallicity1+metallicity2
         for gi,g in enumerate(grids):    
             self.grids[gi]=g
                     
@@ -698,437 +620,3 @@
             self.pf.level_info,
             self.pf.level_offsets)
 
-class ARTStaticOutput(StaticOutput):
-    _hierarchy_class = ARTHierarchy
-    _fieldinfo_fallback = ARTFieldInfo
-    _fieldinfo_known = KnownARTFields
-    _handle = None
-    
-    def __init__(self, filename, data_style='art',
-                 storage_filename = None, 
-                 file_particle_header=None, 
-                 file_particle_data=None,
-                 file_star_data=None,
-                 discover_particles=True,
-                 limit_level=None,
-                 only_particle_type = None,
-                 do_grid_particles=False,
-                 merge_dm_and_stars=False,
-                 spread = True,
-                 single_particle_mass=False,
-                 single_particle_type=0):
-        
-        #dirn = os.path.dirname(filename)
-        base = os.path.basename(filename)
-        aexp = base.split('_')[2].replace('.d','')
-        if not aexp.startswith('a'):
-            aexp = '_'+aexp
-        
-        self.file_particle_header = file_particle_header
-        self.file_particle_data = file_particle_data
-        self.file_star_data = file_star_data
-        self.only_particle_type = only_particle_type
-        self.do_grid_particles = do_grid_particles
-        self.single_particle_mass = single_particle_mass
-        self.merge_dm_and_stars = merge_dm_and_stars
-        self.spread = spread
-        
-        if limit_level is None:
-            self.limit_level = np.inf
-        else:
-            limit_level = int(limit_level)
-            mylog.info("Using maximum level: %i",limit_level)
-            self.limit_level = limit_level
-        
-        def repu(x):
-            for i in range(5):
-                x=x.replace('__','_')
-            return x    
-        if discover_particles:
-            if file_particle_header is None:
-                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_header = loc
-                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
-            if file_particle_data is None:
-                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_data = loc
-                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
-            if file_star_data is None:
-                loc = filename.replace(base,'stars_%s.dat'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_star_data = loc
-                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
-        
-        self.use_particles = any([self.file_particle_header,
-            self.file_star_data, self.file_particle_data])
-        StaticOutput.__init__(self, filename, data_style)
-        
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = 'art'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["InitialTime"]=self.current_time
-        self.storage_filename = storage_filename
-        
-        
-    def __repr__(self):
-        return self.basename.rsplit(".", 1)[0]
-        
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        self.units = {}
-        self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        
-        
-        z = self.current_redshift
-        
-        h = self.hubble_constant
-        boxcm_cal = self["boxh"]
-        boxcm_uncal = boxcm_cal / h
-        box_proper = boxcm_uncal/(1+z)
-        aexpn = self["aexpn"]
-        for unit in mpc_conversion:
-            self.units[unit] = mpc_conversion[unit] * box_proper
-            self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
-            self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
-            self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
-        # Variable names have been chosen to reflect primary reference
-        #Om0 = self["Om0"]
-        #boxh = self["boxh"]
-        wmu = self["wmu"]
-        #ng = self.domain_dimensions[0]
-        #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
-        #v0 = r0 / t0
-        #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
-        #e0 = v0**2.0
-        
-        wmu = self["wmu"]
-        boxh = self["boxh"]
-        aexpn = self["aexpn"]
-        hubble = self.hubble_constant
-        ng = self.domain_dimensions[0]
-        self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
-        self.t0 = self.r0/self.v0
-        # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
-        # ie, critical density 
-        self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
-        tr  = self.tr
-        
-        #factors to multiply the native code units to CGS
-        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
-        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
-        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
-        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
-        #self.conversion_factors["Temperature"] = tr 
-        self.conversion_factors["Potential"] = 1.0
-        self.cosmological_simulation = True
-        
-        # Now our conversion factors
-        for ax in 'xyz':
-            # Add on the 1e5 to get to cm/s
-            self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
-        seconds = self.t0
-        self.time_units['Gyr']   = 1.0/(1.0e9*365*3600*24.0)
-        self.time_units['Myr']   = 1.0/(1.0e6*365*3600*24.0)
-        self.time_units['years'] = 1.0/(365*3600*24.0)
-        self.time_units['days']  = 1.0 / (3600*24.0)
-
-
-        #we were already in seconds, go back in to code units
-        #self.current_time /= self.t0 
-        #self.current_time = b2t(self.current_time,n=1)
-        
-    
-    def _parse_parameter_file(self):
-        # We set our domain to run from 0 .. 1 since we are otherwise
-        # unconstrained.
-        self.domain_left_edge = np.zeros(3, dtype="float64")
-        self.domain_right_edge = np.ones(3, dtype="float64")
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self.parameters = {}
-
-        header_struct = [
-            ('>i','pad byte'),
-            ('>256s','jname'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','istep'),
-            ('>d','t'),
-            ('>d','dt'),
-            ('>f','aexpn'),
-            ('>f','ainit'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>f','boxh'),
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','Omb0'),
-            ('>f','hubble'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','nextras'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>f','extra1'),
-            ('>f','extra2'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>256s','lextra'),
-            ('>256s','lextra'),
-            ('>i','pad byte'),
-            
-            ('>i', 'pad byte'),
-            ('>i', 'min_level'),
-            ('>i', 'max_level'),
-            ('>i', 'pad byte'),
-            ]
-        
-        f = open(self.parameter_filename, "rb")
-        header_vals = {}
-        for format, name in header_struct:
-            size = struct.calcsize(format)
-            # We parse single values at a time, so this will
-            # always need to be indexed with 0
-            output = struct.unpack(format, f.read(size))[0]
-            header_vals[name] = output
-        self.dimensionality = 3 # We only support three
-        self.refine_by = 2 # Octree
-        # Update our parameters with the header and with some compile-time
-        # constants we will set permanently.
-        self.parameters.update(header_vals)
-        self.parameters["Y_p"] = 0.245
-        self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
-        self.parameters["gamma"] = 5./3.
-        self.parameters["T_CMB0"] = 2.726  
-        self.parameters["T_min"] = 300.0 #T floor in K
-        self.parameters["boxh"] = header_vals['boxh']
-        self.parameters['ng'] = 128 # of 0 level cells in 1d 
-        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.parameters['CosmologyInitialRedshift']=self.current_redshift
-        self.data_comment = header_vals['jname']
-        self.current_time_raw = header_vals['t']
-        self.current_time = header_vals['t']
-        self.omega_lambda = header_vals['Oml0']
-        self.omega_matter = header_vals['Om0']
-        self.hubble_constant = header_vals['hubble']
-        self.min_level = header_vals['min_level']
-        self.max_level = header_vals['max_level']
-        self.nhydro_vars = 10 #this gets updated later, but we'll default to this
-        #nchem is nhydrovars-8, so we typically have 2 extra chem species 
-        self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
-        #self.hubble_time /= 3.168876e7 #Gyr in s 
-        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
-        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
-        # integrand_arr = integrand(spacings)
-        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-        # self.current_time *= self.hubble_time
-        self.current_time = b2t(self.current_time_raw)*1.0e9*365*3600*24         
-        for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-            _skip_record(f)
-
-        
-        Om0 = self.parameters['Om0']
-        hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * np.sqrt(Om0)
-        ng = self.parameters['ng']
-        wmu = self.parameters["wmu"]
-        boxh = header_vals['boxh'] 
-        
-        #distance unit #boxh is units of h^-1 Mpc
-        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
-        r0 = self.parameters["r0"]
-        #time, yrs
-        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
-        #velocity velocity units in km/s
-        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                np.sqrt(self.parameters["Om0"])
-        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
-        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
-        rho0 = self.parameters["rho0"]
-        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
-        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        #T_0 = unit of temperature in K and in keV)
-        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
-        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
-        #S_0 = unit of entropy in keV * cm^2
-        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        
-        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
-        #     for non-cosmological run aM0 must be defined during initialization
-        #     [aM0] = [Msun]
-        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        
-        #CGS for everything in the next block
-    
-        (self.ncell,) = struct.unpack('>l', _read_record(f))
-        # Try to figure out the root grid dimensions
-        est = int(np.rint(self.ncell**(1.0/3.0)))
-        # Note here: this is the number of *cells* on the root grid.
-        # This is not the same as the number of Octs.
-        self.domain_dimensions = np.ones(3, dtype='int64')*est 
-
-        self.root_grid_mask_offset = f.tell()
-        #_skip_record(f) # iOctCh
-        root_cells = self.domain_dimensions.prod()
-        self.root_iOctChfull = _read_frecord(f,'>i')
-        self.root_iOctCh = self.root_iOctChfull[:root_cells]
-        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
-        self.root_grid_offset = f.tell()
-        _skip_record(f) # hvar
-        _skip_record(f) # var
-
-        self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
-        self.child_grid_offset = f.tell()
-
-        f.close()
-        
-        if self.file_particle_header is not None:
-            self._read_particle_header(self.file_particle_header)
-        
-    def _read_particle_header(self,fn):    
-        """ Reads control information, various parameters from the 
-            particle data set. Adapted from Daniel Ceverino's 
-            Read_Particles_Binary in analysis_ART.F   
-        """ 
-        header_struct = [
-            ('>i','pad'),
-            ('45s','header'), 
-            ('>f','aexpn'),
-            ('>f','aexp0'),
-            ('>f','amplt'),
-            ('>f','astep'),
-
-            ('>i','istep'),
-            ('>f','partw'),
-            ('>f','tintg'),
-
-            ('>f','Ekin'),
-            ('>f','Ekin1'),
-            ('>f','Ekin2'),
-            ('>f','au0'),
-            ('>f','aeu0'),
-
-
-            ('>i','Nrow'),
-            ('>i','Ngridc'),
-            ('>i','Nspecies'),
-            ('>i','Nseed'),
-
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','hubble'),
-            ('>f','Wp5'),
-            ('>f','Ocurv'),
-            ('>f','Omb0'),
-            ('>%ds'%(396),'extras'),
-            ('>f','unknown'),
-
-            ('>i','pad')]
-        fh = open(fn,'rb')
-        vals = _read_struct(fh,header_struct)
-        
-        for k,v in vals.iteritems():
-            self.parameters[k]=v
-        
-        seek_extras = 137
-        fh.seek(seek_extras)
-        n = self.parameters['Nspecies']
-        self.parameters['wspeciesf'] = np.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspeciesf'] = np.fromfile(fh,dtype='>i',count=10)
-        assert np.all(self.parameters['lspeciesf'][n:]==0.0)
-        assert np.all(self.parameters['wspeciesf'][n:]==0.0)
-        self.parameters['wspecies'] = self.parameters['wspeciesf'][:n]
-        self.parameters['lspecies'] = self.parameters['lspeciesf'][:n]
-        fh.close()
-        
-        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
-        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
-        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
-        
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        if "10MpcBox" in args[0]:
-            return True
-        return False
-
-def particle_assignment(grids,this_grid, 
-                                  pos,
-                                  particle_id,
-                                  grid_indices,
-                                  grid_particle_count, 
-                                  domain_dimensions,
-                                  max_level,
-                                  subdiv=2,
-                                  grids_done=0,
-                                  logger=None):
-    #for every particle check every child grid to see if it fits inside
-    #cast the pos -> cell location index (instead of doing a LE<pos<RE check)
-    #find if cell descends into the next mesh
-    
-    #cast every position into a cell on this grid
-    #we may get negative indices or indices outside this grid
-    #mask them out
-    exp = domain_dimensions*subdiv**this_grid.Level
-    lei= np.floor((pos-this_grid.LeftEdge)*exp).astype('int64')
-
-    #now lookup these indices in the child index mask
-    #throw out child grids = -1 and particles outside the range
-    #default state is to not grid a particle
-    child_idx = np.zeros(lei.shape[0],dtype='int64')-1
-    #remove particles to the left or right of the grid
-    lei_out  = np.any(lei>=this_grid.ActiveDimensions,axis=1)
-    lei_out |= np.any(lei<0,axis=1)
-    #lookup grids for every particle except the ones to the 
-    leio=lei[~lei_out]
-    #child_idx[~lei_out]= \
-    child_idx[~lei_out]= \
-            this_grid.child_index_mask[(leio[:,0],leio[:,1],leio[:,2])]
-    mask = (child_idx > -1)
-    #only assign the particles if they point to a grid ID that isnt -1
-    grid_indices[particle_id[mask]] = child_idx[mask]
-    #the number of particles on this grid is equal to those
-    #that point to -1
-    grid_particle_count[this_grid.id] = np.sum(~mask)
-    grids_done +=1
-    if logger:
-        logger.update(grids_done)
-
-    for child_grid_index in np.unique(this_grid.child_index_mask):
-        if child_grid_index == -1: 
-            continue
-        if grids[child_grid_index].Level == max_level:
-            continue
-        mask = child_idx == child_grid_index
-        if np.sum(mask)==0:continue
-        grid_indices,grid_particle_count,grids_done = \
-        particle_assignment(grids,grids[child_grid_index],
-                pos[mask],particle_id[mask],
-                grid_indices,grid_particle_count,
-                domain_dimensions,max_level,grids_done=grids_done,
-                subdiv=subdiv,logger=logger)
-    return grid_indices,grid_particle_count,grids_done
-



https://bitbucket.org/yt_analysis/yt/changeset/f045eea08410/
changeset:   f045eea08410
branch:      yt
user:        Christopher Moody
date:        2012-11-26 22:07:52
summary:     fluids wprk; particles are not yet gridded
affected #:  4 files

diff -r 47dca7608ac6e35030d3a04977aff73fb08db370 -r f045eea0841056fcdbbec1f61e5d78499fc947ad setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,4 +6,6 @@
 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
\ No newline at end of file
+with-xunit=1
+#with-answer-testing=1
+#answer-compare=gold001


diff -r 47dca7608ac6e35030d3a04977aff73fb08db370 -r f045eea0841056fcdbbec1f61e5d78499fc947ad yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -34,22 +34,39 @@
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
       AMRGridPatch
-from yt.geometry.oct_geometry_handler import \
-    OctreeGeometryHandler
-from yt.geometry.geometry_handler import \
-    GeometryHandler, YTDataChunk
+from yt.data_objects.hierarchy import \
+      AMRHierarchy
 from yt.data_objects.static_output import \
-    StaticOutput
+      StaticOutput
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import \
+    ARTFieldInfo, add_art_field, KnownARTFields
+from yt.utilities.definitions import \
+    mpc_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
+import yt.utilities.lib as amr_utils
 
 from .definitions import *
-from io import _read_struct
+from io import _read_child_mask_level
+from io import read_particles
+from io import read_stars
+from io import spread_ages
+from io import _count_art_octs
 from io import _read_art_level_info
+from io import _read_art_child
+from io import _skip_record
 from io import _read_record
 from io import _read_frecord
-from io import _skip_record
-from io import _count_art_octs
+from io import _read_record_size
+from io import _read_struct
 from io import b2t
-from io import load_level
+
+
+import yt.frontends.ramses._ramses_reader as _ramses_reader
 
 from .fields import ARTFieldInfo, KnownARTFields
 from yt.utilities.definitions import \
@@ -63,191 +80,6 @@
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
-class ARTStaticOutput(StaticOutput):
-    _hierarchy_class = ARTHierarchy
-    _fieldinfo_fallback = ARTFieldInfo
-    _fieldinfo_known = KnownARTFields
-    
-    def __init__(self, file_amr, storage_filename = None,
-            skip_particles=False,skip_stars=False,data_style='art'):
-        self.data_style = data_style
-        self._find_files(file_amr)
-        self.skip_particles = skip_particles
-        self.skip_stars = skip_stars
-        self.file_amr = file_amr
-        self.parameter_filename = file_amr
-        self.domain_left_edge  = np.zeros(3,dtype='float64')
-        self.domain_right_edge = np.ones(3,dtype='float64') 
-        StaticOutput.__init__(self, file_amr, data_style)
-        self.storage_filename = storage_filename
-
-    def _find_files(self,file_amr):
-        """
-        Given the AMR base filename, attempt to find the
-        particle header, star files, etc.
-        """
-        prefix,suffix = filename_pattern['amr'].split('%s')
-        affix = os.path.basename(file_amr).replace(prefix,'')
-        affix = affix.replace(suffix,'')
-        affix = affix.replace('_','')
-        affix = affix[1:-1]
-        dirname = os.path.dirname(file_amr)
-        for filetype, pattern in filename_pattern.items():
-            #sometimes the affix is surrounded by an extraneous _
-            #so check for an extra character on either side
-            check_filename = dirname+'/'+pattern%('?%s?'%affix)
-            filenames = glob.glob(check_filename)
-            if len(filenames)==1:
-                setattr(self,"file_"+filetype,filenames[0])
-                mylog.info('discovered %s',filetype)
-            elif len(filenames)>1:
-                setattr(self,"file_"+filetype,None)
-                mylog.info("Ambiguous number of files found for %s",
-                        check_filename)
-            else:
-                setattr(self,"file_"+filetype,None)
-
-    def __repr__(self):
-        return self.basename.rsplit(".", 1)[0]
-        
-    def _set_units(self):
-        """
-        Generates the conversion to various physical units based 
-		on the parameters from the header
-        """
-        self.units = {}
-        self.time_units = {}
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0
-        self._parse_parameter_file()
-
-        #spatial units
-        z   = self.current_redshift
-        h   = self.hubble_constant
-        boxcm_cal = self.parameters["boxh"]
-        boxcm_uncal = boxcm_cal / h
-        box_proper = boxcm_uncal/(1+z)
-        aexpn = self["aexpn"]
-        for unit in mpc_conversion:
-            self.units[unit] = mpc_conversion[unit] * box_proper
-            self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
-            self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
-            self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
-
-        #all other units
-        wmu = self.parameters["wmu"]
-        Om0 = self.parameters['Om0']
-        ng  = self.parameters['ng']
-        wmu = self.parameters["wmu"]
-        boxh   = self.parameters['boxh'] 
-        aexpn  = self.parameters["aexpn"]
-        hubble = self.parameters['hubble']
-
-        r0 = boxh/ng
-        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
-        S_0 = 52.077 * wmu**(5.0/3.0)
-        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
-        t0 = r0/v0
-        rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
-        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
-
-        #factors to multiply the native code units to CGS
-        cf = defaultdict(lambda: 1.0)
-        cf['Pressure'] = P0 #already cgs
-        cf['Velocity'] = v0*1e3 #km/s -> cm/s
-        cf["Mass"] = aM0 * 1.98892e33
-        cf["Density"] = rho0*(aexpn**-3.0)
-        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
-        cf["Potential"] = 1.0
-        cf["Entropy"] = S_0
-        cf["Temperature"] = tr
-        self.cosmological_simulation = True
-        self.conversion_factors = cf
-        
-        for ax in 'xyz':
-            self.conversion_factors["%s-velocity" % ax] = v0/aexpn
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
-        for particle_field in particle_fields:
-            self.pf.conversion_factors[particle_field] =  1.0
-        self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-        self.pf.conversion_factors['Msun'] = 5.027e-34 
-
-    def _parse_parameter_file(self):
-        """
-        Get the various simulation parameters & constants.
-        """
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.cosmological_simulation = True
-        self.parameters = {}
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        header_vals = {}
-        self.parameters.update(constants)
-        with open(self.file_amr,'rb') as f:
-            amr_header_vals = _read_struct(f,amr_header_struct)
-            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-                _skip_record(f)
-            (self.ncell,) = struct.unpack('>l', _read_record(f))
-            # Try to figure out the root grid dimensions
-            est = int(np.rint(self.ncell**(1.0/3.0)))
-            # Note here: this is the number of *cells* on the root grid.
-            # This is not the same as the number of Octs.
-            self.domain_dimensions = np.ones(3, dtype='int64')*est 
-            self.root_grid_mask_offset = f.tell()
-            root_cells = self.domain_dimensions.prod()
-            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
-            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
-                 order='F')
-            self.root_grid_offset = f.tell()
-            _skip_record(f) # hvar
-            _skip_record(f) # var
-            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
-            self.child_grid_offset = f.tell()
-        self.parameters.update(amr_header_vals)
-        if self.file_particle_header is None:
-            with open(self.file_particle_header,"rb") as fh:
-                particle_header_vals = _read_struct(fh,particle_header_struct)
-                fh.seek(seek_extras)
-                n = particle_header_vals['Nspecies']
-                wspecies = np.fromfile(fh,dtype='>f',count=10)
-                lspecies = np.fromfile(fh,dtype='>i',count=10)
-            self.parameters['wspecies'] = wspecies[:n]
-            self.parameters['lspecies'] = lspecies[:n]
-            ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
-            mylog.info("Discovered %i species of particles",len(ls_nonzero))
-            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
-                ls_nonzero)
-            self.parameters.update(particle_header_vals)
-    
-        #setup standard simulation yt expects to see
-        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.omega_lambda = amr_header_vals['Oml0']
-        self.omega_matter = amr_header_vals['Om0']
-        self.hubble_constant = amr_header_vals['hubble']
-        self.min_level = amr_header_vals['min_level']
-        self.max_level = amr_header_vals['max_level']
-        self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
-        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        """
-        Defined for the NMSU file naming scheme.
-        This could differ for other formats.
-        """
-        fn = ("%s" % (os.path.basename(args[0])))
-        f = ("%s" % args[0])
-        if fn.endswith(".d") and fn.startswith('10Mpc') and\
-                os.path.exists(f): 
-                return True
-        return False
-
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
@@ -267,7 +99,7 @@
         self.ActiveDimensions = gd
         self.NumberOfParticles=nop
         for particle_field in particle_fields:
-            setattr(self,particle_field) = np.array([])
+            setattr(self,particle_field,np.array([]))
 
     def _setup_dx(self):
         id = self.id - self._id_offset
@@ -295,7 +127,8 @@
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
                        np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        self.start_index = (start_index*self.pf.refine_by)\
+                           .astype('int64').ravel()
         return self.start_index
 
     def __repr__(self):
@@ -319,7 +152,7 @@
         self._setup_field_list()
         
     def _setup_particle_grids(self):
-        raise NotImplementedError
+        pass
     
     def _initialize_data_storage(self):
         pass
@@ -371,7 +204,8 @@
                     continue
                 psgs = []
                 effs,sizes = [], []
-                if level > self.pf.limit_level : continue
+                if self.pf.limit_level:
+                    if level > self.pf.limit_level : continue
                 #refers to the left index for the art octgrid
                 left_index, fl, nocts,root_level = _read_art_level_info(f, 
                         self.pf.level_oct_offsets,level,
@@ -464,13 +298,11 @@
                 mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
                             eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
                 
-            mylog.info("Done with level % 2i; max LE %i", level,
-                       np.max(left_index))
-            pbar.finish()
-            self.proto_grids.append(psgs)
-            #print sum(len(psg.grid_file_locations) for psg in psgs)
-            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
-            if len(self.proto_grids[level]) == 1: continue
+                mylog.info("Done with level % 2i; max LE %i", level,
+                           np.max(left_index))
+                pbar.finish()
+                self.proto_grids.append(psgs)
+                if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
         
     def _parse_hierarchy(self):
@@ -506,30 +338,35 @@
         if not self.pf.skip_particles and self.pf.file_particle_data:
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
+            um  = self.pf.conversion_factors['Mass'] #mass units in g
+            uv  = self.pf.conversion_factors['Velocity'] #mass units in g
             self.pf.particle_position,self.pf.particle_velocity = \
                 read_particles(self.pf.file_particle_data,
                         self.pf.parameters['Nrow'])
-            self.pf.particle_position   = self.pf.particle_position
+            nparticles = self.pf.particle_position.shape[0]
             self.pf.particle_position  /= self.pf.domain_dimensions 
             self.pf.particle_velocity   = self.pf.particle_velocity
             self.pf.particle_velocity  *= uv #to proper cm/s
             self.pf.particle_star_index = len(wspecies)-1
+            self.pf.particle_type = np.zeros(nparticles,dtype='int')
+            self.pf.particle_mass = np.zeros(nparticles,dtype='float32')
+            a=0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if i == self.particle_star_index:
+                if i == self.pf.particle_star_index:
                     sa,sb = a,b
                 self.pf.particle_type[a:b] = i #particle type
-                self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                self.pf.particle_mass[a:b] = m*um #mass in grams
                 a=b
             if not self.pf.skip_stars and self.pf.file_particle_stars: 
-                nstars_pa = nstars
                 (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
                         ws_old,ws_oldi,tdum,adum \
-                     = read_stars(self.pf.file_star_data)
+                     = read_stars(self.pf.file_particle_stars)
                 self.pf.nstars_rs = nstars_rs     
-                self.pf.nstars_pa = nstars_pa
-                inconsisten = self.pf.particle_type==self.pf.particle_star_index
+                self.pf.nstars_pa = b-a
+                inconsistent=self.pf.particle_type==self.pf.particle_star_index
                 if not nstars_rs==np.sum(inconsistent):
                     mylog.info('WARNING!: nstars is inconsistent!')
+                del inconsistent
                 if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
                     birthtimes= b2t(tbirth,n=n)
@@ -538,20 +375,23 @@
                     birthtimes*= 1.0e9 #from Gyr to yr
                     birthtimes*= 365*24*3600 #to seconds
                     ages = self.pf.current_time-birthtimes
-                    spread = self.pf.spread
+                    spread = self.pf.spread_age
                     if type(spread)==type(5.5):
                         ages = spread_ages(ages,spread=spread)
                     elif spread:
                         ages = spread_ages(ages)
                     idx = self.pf.particle_type == self.pf.particle_star_index
-                    assert np.sum(idx)==nstars_pa
-                    pf.particle_age[sa:sb] = ages
-                    pf.particle_mass[sa:sb] = mass
-                    pf.particle_mass_initial[sa:sb] = imass
-                    pf.particle_creation_time[sa:sb] = birthtimes
-                    pf.particle_metallicity1[sa:sb] = metallicity1
-                    pf.particle_metallicity2[sa:sb] = metallicity2
-                    pf.particle_metallicity[sa:sb] = metallicity1+metallicity2
+                    for psf in particle_star_fields:
+                        setattr(self.pf,psf,
+                                np.zeros(nparticles,dtype='float32'))
+                    self.pf.particle_age[sa:sb] = ages
+                    self.pf.particle_mass[sa:sb] = mass
+                    self.pf.particle_mass_initial[sa:sb] = imass
+                    self.pf.particle_creation_time[sa:sb] = birthtimes
+                    self.pf.particle_metallicity1[sa:sb] = metallicity1
+                    self.pf.particle_metallicity2[sa:sb] = metallicity2
+                    self.pf.particle_metallicity[sa:sb]  = metallicity1\
+                                                          + metallicity2
         for gi,g in enumerate(grids):    
             self.grids[gi]=g
                     
@@ -594,10 +434,10 @@
         self.max_level = self.grid_levels.max()
 
     def _setup_field_list(self):
-        if self.parameter_file.use_particles:
+        if not self.parameter_file.skip_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
-            for field in art_particle_field_names:
+            for field in particle_fields:
                 def external_wrapper(f):
                     def _convert_function(data):
                         return data.convert(f)
@@ -620,3 +460,190 @@
             self.pf.level_info,
             self.pf.level_offsets)
 
+class ARTStaticOutput(StaticOutput):
+    _hierarchy_class = ARTHierarchy
+    _fieldinfo_fallback = ARTFieldInfo
+    _fieldinfo_known = KnownARTFields
+    
+    def __init__(self, file_amr, storage_filename = None,
+            skip_particles=False,skip_stars=False,limit_level=None,
+            spread_age=True,data_style='art'):
+        self.data_style = data_style
+        self._find_files(file_amr)
+        self.skip_particles = skip_particles
+        self.skip_stars = skip_stars
+        self.file_amr = file_amr
+        self.parameter_filename = file_amr
+        self.limit_level = limit_level
+        self.spread_age = spread_age
+        self.domain_left_edge  = np.zeros(3,dtype='float64')
+        self.domain_right_edge = np.ones(3,dtype='float64') 
+        StaticOutput.__init__(self, file_amr, data_style)
+        self.storage_filename = storage_filename
+
+    def _find_files(self,file_amr):
+        """
+        Given the AMR base filename, attempt to find the
+        particle header, star files, etc.
+        """
+        prefix,suffix = filename_pattern['amr'].split('%s')
+        affix = os.path.basename(file_amr).replace(prefix,'')
+        affix = affix.replace(suffix,'')
+        affix = affix.replace('_','')
+        affix = affix[1:-1]
+        dirname = os.path.dirname(file_amr)
+        for filetype, pattern in filename_pattern.items():
+            #sometimes the affix is surrounded by an extraneous _
+            #so check for an extra character on either side
+            check_filename = dirname+'/'+pattern%('?%s?'%affix)
+            filenames = glob.glob(check_filename)
+            if len(filenames)==1:
+                setattr(self,"file_"+filetype,filenames[0])
+                mylog.info('discovered %s',filetype)
+            elif len(filenames)>1:
+                setattr(self,"file_"+filetype,None)
+                mylog.info("Ambiguous number of files found for %s",
+                        check_filename)
+            else:
+                setattr(self,"file_"+filetype,None)
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+        
+    def _set_units(self):
+        """
+        Generates the conversion to various physical units based 
+		on the parameters from the header
+        """
+        self.units = {}
+        self.time_units = {}
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0
+
+        #spatial units
+        z   = self.current_redshift
+        h   = self.hubble_constant
+        boxcm_cal = self.parameters["boxh"]
+        boxcm_uncal = boxcm_cal / h
+        box_proper = boxcm_uncal/(1+z)
+        aexpn = self["aexpn"]
+        for unit in mpc_conversion:
+            self.units[unit] = mpc_conversion[unit] * box_proper
+            self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
+            self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
+            self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
+
+        #all other units
+        wmu = self.parameters["wmu"]
+        Om0 = self.parameters['Om0']
+        ng  = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh   = self.parameters['boxh'] 
+        aexpn  = self.parameters["aexpn"]
+        hubble = self.parameters['hubble']
+
+        r0 = boxh/ng
+        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        S_0 = 52.077 * wmu**(5.0/3.0)
+        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        t0 = r0/v0
+        rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
+        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
+
+        #factors to multiply the native code units to CGS
+        cf = defaultdict(lambda: 1.0)
+        cf['Pressure'] = P0 #already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        cf["Mass"] = aM0 * 1.98892e33
+        cf["Density"] = rho0*(aexpn**-3.0)
+        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
+        cf["Potential"] = 1.0
+        cf["Entropy"] = S_0
+        cf["Temperature"] = tr
+        self.cosmological_simulation = True
+        self.conversion_factors = cf
+        
+        for ax in 'xyz':
+            self.conversion_factors["%s-velocity" % ax] = v0/aexpn
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+        for particle_field in particle_fields:
+            self.conversion_factors[particle_field] =  1.0
+        self.conversion_factors['particle_creation_time'] =  31556926.0
+        self.conversion_factors['Msun'] = 5.027e-34 
+
+    def _parse_parameter_file(self):
+        """
+        Get the various simulation parameters & constants.
+        """
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.cosmological_simulation = True
+        self.parameters = {}
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        header_vals = {}
+        self.parameters.update(constants)
+        with open(self.file_amr,'rb') as f:
+            amr_header_vals = _read_struct(f,amr_header_struct)
+            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
+                _skip_record(f)
+            (self.ncell,) = struct.unpack('>l', _read_record(f))
+            # Try to figure out the root grid dimensions
+            est = int(np.rint(self.ncell**(1.0/3.0)))
+            # Note here: this is the number of *cells* on the root grid.
+            # This is not the same as the number of Octs.
+            self.domain_dimensions = np.ones(3, dtype='int64')*est 
+            self.root_grid_mask_offset = f.tell()
+            root_cells = self.domain_dimensions.prod()
+            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
+                 order='F')
+            self.root_grid_offset = f.tell()
+            _skip_record(f) # hvar
+            _skip_record(f) # var
+            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.child_grid_offset = f.tell()
+        self.parameters.update(amr_header_vals)
+        if not self.skip_particles and self.file_particle_header:
+            with open(self.file_particle_header,"rb") as fh:
+                particle_header_vals = _read_struct(fh,particle_header_struct)
+                fh.seek(seek_extras)
+                n = particle_header_vals['Nspecies']
+                wspecies = np.fromfile(fh,dtype='>f',count=10)
+                lspecies = np.fromfile(fh,dtype='>i',count=10)
+            self.parameters['wspecies'] = wspecies[:n]
+            self.parameters['lspecies'] = lspecies[:n]
+            ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
+            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
+                *np.diff(ls_nonzero))
+            self.parameters.update(particle_header_vals)
+    
+        #setup standard simulation yt expects to see
+        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
+        self.omega_lambda = amr_header_vals['Oml0']
+        self.omega_matter = amr_header_vals['Om0']
+        self.hubble_constant = amr_header_vals['hubble']
+        self.min_level = amr_header_vals['min_level']
+        self.max_level = amr_header_vals['max_level']
+        self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
+        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        """
+        Defined for the NMSU file naming scheme.
+        This could differ for other formats.
+        """
+        fn = ("%s" % (os.path.basename(args[0])))
+        f = ("%s" % args[0])
+        if fn.endswith(".d") and fn.startswith('10Mpc') and\
+                os.path.exists(f): 
+                return True
+        return False
+


diff -r 47dca7608ac6e35030d3a04977aff73fb08db370 -r f045eea0841056fcdbbec1f61e5d78499fc947ad yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -58,6 +58,16 @@
     'particle_type'
 ]
 
+particle_star_fields = [
+    'particle_age',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+]
+
 filename_pattern = {				
 	'amr':'10MpcBox_csf512_%s.d',
 	'particle_header':'PMcrd%s.DAT',


diff -r 47dca7608ac6e35030d3a04977aff73fb08db370 -r f045eea0841056fcdbbec1f61e5d78499fc947ad yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,7 +36,7 @@
     BaseIOHandler
 import yt.utilities.lib as au
 
-from yt.frontends.art.definitions import art_particle_field_names
+from yt.frontends.art.definitions import *
 
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
@@ -149,7 +149,7 @@
         starfield = field.replace('particle','star')
         psi = grid.pf.particle_star_index
         if field not in field_dict.keys() and starfield in field_dict.keys():
-            particle_field = np.zeros(grid.particle_mass.shape)                    
+            particle_field = np.zeros(grid.particle_mass.shape) 
             particle_field[grid.particle_type==psi]=field_dict[starfield]
             return particle_field
         else:
@@ -157,7 +157,7 @@
 
         
     def _read_data_set(self, grid, field):
-        if field in art_particle_field_names:
+        if field in particle_fields:
             return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
@@ -349,7 +349,7 @@
         arr = arr.reshape((width, chunk), order="F")
         assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
-        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined available
         #zero in the mask means there is refinement available
         a=b
         left -= chunk



https://bitbucket.org/yt_analysis/yt/changeset/1413a16ee876/
changeset:   1413a16ee876
branch:      yt
user:        Christopher Moody
date:        2012-11-26 23:19:05
summary:     assigned all particles to the root grid
affected #:  2 files

diff -r f045eea0841056fcdbbec1f61e5d78499fc947ad -r 1413a16ee876354a8ff666e0744b439f5e880b97 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -343,7 +343,11 @@
             self.pf.particle_position,self.pf.particle_velocity = \
                 read_particles(self.pf.file_particle_data,
                         self.pf.parameters['Nrow'])
-            nparticles = self.pf.particle_position.shape[0]
+            nparticles = lspecies[-1]
+            if not np.all(self.pf.particle_position[nparticles:]==0.0):
+                mylog.info('WARNING: unused particles discovered from lspecies')
+            self.pf.particle_position = self.pf.particle_position[:nparticles]
+            self.pf.particle_velocity = self.pf.particle_velocity[:nparticles]
             self.pf.particle_position  /= self.pf.domain_dimensions 
             self.pf.particle_velocity   = self.pf.particle_velocity
             self.pf.particle_velocity  *= uv #to proper cm/s
@@ -430,6 +434,22 @@
                 g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
+            #instead of gridding particles assign them all to the root grid
+            if gi==0:
+                for particle_field in particle_fields:
+                    source = getattr(self.pf,particle_field,None)
+                    if source is None:
+                        for i,ax in enumerate('xyz'):
+                            pf = particle_field.replace('_%s'%ax,'')
+                            source = getattr(self.pf,pf,None)
+                            if source is not None:
+                                source = source[:,i]
+                                break
+                    if source is not None:
+                        mylog.info("Attaching %s to the root grid",
+                                    particle_field)
+                        g.NumberOfParticles = source.shape[0]
+                        setattr(g,particle_field,source)
         pb.finish()
         self.max_level = self.grid_levels.max()
 
@@ -618,10 +638,10 @@
                 lspecies = np.fromfile(fh,dtype='>i',count=10)
             self.parameters['wspecies'] = wspecies[:n]
             self.parameters['lspecies'] = lspecies[:n]
-            ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
+            ls_nonzero = np.diff(lspecies)[:n-1]
             mylog.info("Discovered %i species of particles",len(ls_nonzero))
             mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
-                *np.diff(ls_nonzero))
+                *ls_nonzero)
             self.parameters.update(particle_header_vals)
     
         #setup standard simulation yt expects to see


diff -r f045eea0841056fcdbbec1f61e5d78499fc947ad -r 1413a16ee876354a8ff666e0744b439f5e880b97 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -121,40 +121,16 @@
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        field_dict = { 'particle_index': grid.particle_id,
-            'particle_type':grid.particle_type,
-            'particle_position_x':grid.particle_position_x,
-            'particle_position_y':grid.particle_position_y,
-            'particle_position_z':grid.particle_position_z,
-            'particle_age':grid.particle_age,
-            'particle_mass':grid.particle_mass,
-            'particle_velocity_x':grid.particle_velocity_x,
-            'particle_velocity_y':grid.particle_velocity_y,
-            'particle_velocity_z':grid.particle_velocity_z,
-            
-            #stellar fields
-            'star_position_x':grid.star_position_x,
-            'star_position_y':grid.star_position_y,
-            'star_position_z':grid.star_position_z,
-            'star_mass':grid.star_mass,
-            'star_velocity_x':grid.star_velocity_x,
-            'star_velocity_y':grid.star_velocity_y,
-            'star_velocity_z':grid.star_velocity_z,
-            'star_age':grid.star_age,
-            'star_metallicity':grid.star_metallicity1 + grid.star_metallicity2,
-            'star_metallicity1':grid.star_metallicity1,
-            'star_metallicity2':grid.star_metallicity2,
-            'star_mass_initial':grid.star_mass_initial,
-            'star_mass':grid.star_mass}
-        starfield = field.replace('particle','star')
-        psi = grid.pf.particle_star_index
-        if field not in field_dict.keys() and starfield in field_dict.keys():
-            particle_field = np.zeros(grid.particle_mass.shape) 
-            particle_field[grid.particle_type==psi]=field_dict[starfield]
-            return particle_field
-        else:
-            return field_dict[field]
-
+        dat = getattr(grid,field,None)
+        if dat is not None: 
+            return dat
+        starfield = field.replace('star','particle')
+        dat = getattr(grid,starfield,None)
+        if dat is not None:
+            psi = grid.pf.particle_star_index
+            idx = grid.particle_type==psi
+            return dat[idx]
+        raise KeyError
         
     def _read_data_set(self, grid, field):
         if field in particle_fields:



https://bitbucket.org/yt_analysis/yt/changeset/c94dedb96da3/
changeset:   c94dedb96da3
branch:      yt
user:        juxtaposicion
date:        2012-11-26 23:27:46
summary:     merge
affected #:  10 files

diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,4 +6,6 @@
 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
\ No newline at end of file
+with-xunit=1
+#with-answer-testing=1
+#answer-compare=gold001


diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -26,141 +26,23 @@
 from yt.mods import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
+
 from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
-from yt.config import ytcfg
-
 import rockstar_interface
-
 import socket
 import time
-import threading
-import signal
-import os
-from os import environ
-from os import mkdir
-from os import path
 
-# Get some definitions from Rockstar directly.
-if "ROCKSTAR_DIR" in os.environ:
-    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
-elif os.path.exists("rockstar.cfg"):
-    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
-else:
-    print "Reading Rockstar location from rockstar.cfg failed."
-    print "Please place the base directory of your"
-    print "Rockstar install in rockstar.cfg and restart."
-    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
-    sys.exit(1)
-lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
-READER_TYPE = None
-WRITER_TYPE = None
-for line in lines:
-    if "READER_TYPE" in line:
-        line = line.split()
-        READER_TYPE = int(line[-1])
-    if "WRITER_TYPE" in line:
-        line = line.split()
-        WRITER_TYPE = int(line[-1])
-    if READER_TYPE != None and WRITER_TYPE != None:
-        break
-lines.close()
+class DomainDecomposer(ParallelAnalysisInterface):
+    def __init__(self, pf, comm):
+        ParallelAnalysisInterface.__init__(self, comm=comm)
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
 
-class InlineRunner(ParallelAnalysisInterface):
-    def __init__(self, num_writers):
-        # If this is being run inline, num_readers == comm.size, always.
-        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
-        if num_writers is None:
-            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-
-    def split_work(self, pool):
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're inline, everyone is a reader.
-        self.readers = avail[:]
-        if self.num_writers == pool.comm.size:
-            # And everyone is a writer!
-            self.writers = avail[:]
-        else:
-            # Everyone is not a writer.
-            # Cyclically assign writers which should approximate
-            # memory load balancing (depending on the mpirun call,
-            # but this should do it in most cases).
-            stride = int(ceil(float(pool.comm.size) / self.num_writers))
-            while len(self.writers) < self.num_writers:
-                self.writers.extend(avail[::stride])
-                for r in readers:
-                    avail.pop(avail.index(r))
-
-    def run(self, handler, pool):
-        # If inline, we use forks.
-        server_pid = 0
-        # Start a server on only one machine/fork.
-        if pool.comm.rank == 0:
-            server_pid = os.fork()
-            if server_pid == 0:
-                handler.start_server()
-                os._exit(0)
-        # Start writers.
-        writer_pid = 0
-        if pool.comm.rank in self.writers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            writer_pid = os.fork()
-            if writer_pid == 0:
-                handler.start_client(WRITER_TYPE)
-                os._exit(0)
-        # Start readers, not forked.
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        # Make sure the forks are done, which they should be.
-        if writer_pid != 0:
-            os.waitpid(writer_pid, 0)
-        if server_pid != 0:
-            os.waitpid(server_pid, 0)
-
-class StandardRunner(ParallelAnalysisInterface):
-    def __init__(self, num_readers, num_writers):
-        self.num_readers = num_readers
-        if num_writers is None:
-            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
-                - num_readers - 1
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
-                "__global_parallel_size"):
-            mylog.error('%i reader + %i writers != %i mpi',
-                    self.num_readers, self.num_writers,
-                    ytcfg.getint("yt", "__global_parallel_size"))
-            raise RuntimeError
-    
-    def split_work(self, pool):
-        # Who is going to do what.
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're not running inline, rank 0 should be removed immediately.
-        avail.pop(0)
-        # Now we assign the rest.
-        for i in range(self.num_readers):
-            self.readers.append(avail.pop(0))
-        for i in range(self.num_writers):
-            self.writers.append(avail.pop(0))
-    
-    def run(self, handler, pool):
-        # Not inline so we just launch them directly from our MPI threads.
-        if pool.comm.rank == 0:
-            handler.start_server()
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        if pool.comm.rank in self.writers:
-            time.sleep(0.2 + pool.comm.rank/10.0)
-            handler.start_client(WRITER_TYPE)
+    def decompose(self):
+        dd = self.pf.h.all_data()
+        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
+        return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
@@ -183,30 +65,23 @@
             The number of reader can be increased from the default
             of 1 in the event that a single snapshot is split among
             many files. This can help in cases where performance is
-            IO-limited. Default is 1. If run inline, it is
-            equal to the number of MPI threads.
+            IO-limited. Default is 1.
         num_writers: int
             The number of writers determines the number of processing threads
             as well as the number of threads writing output data.
-            The default is set to comm.size-num_readers-1. If run inline,
-            the default is equal to the number of MPI threads.
+            The default is set comm.size-num_readers-1.
         outbase: str
             This is where the out*list files that Rockstar makes should be
-            placed. Default is 'rockstar_halos'.
+            placed. Default is str(pf)+'_rockstar'.
         particle_mass: float
             This sets the DM particle mass used in Rockstar.
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: float
-            This parameter specifies the force resolution that Rockstar uses
-            in units of Mpc/h.
-            If no value is provided, this parameter is automatically set to
-            the width of the smallest grid element in the simulation from the
-            last data snapshot (i.e. the one where time has evolved the
-            longest) in the time series:
-            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
-            
+        force_res: None
+            The default force resolution is 0.0012 comoving Mpc/H
+            This overrides Rockstars' defaults
+
         Returns
         -------
         None
@@ -218,6 +93,7 @@
 
         test_rockstar.py:
 
+        from mpi4py import MPI
         from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
         from yt.mods import *
         import sys
@@ -229,64 +105,50 @@
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()
         """
-        # Decide how we're working.
-        if ytcfg.getboolean("yt", "inline") == True:
-            self.runner = InlineRunner(num_writers)
-        else:
-            self.runner = StandardRunner(num_readers, num_writers)
-        self.num_readers = self.runner.num_readers
-        self.num_writers = self.runner.num_writers
-        mylog.info("Rockstar is using %d readers and %d writers",
-            self.num_readers, self.num_writers)
-        # Note that Rockstar does not support subvolumes.
-        # We assume that all of the snapshots in the time series
-        # use the same domain info as the first snapshots.
+        ParallelAnalysisInterface.__init__(self)
+        # No subvolume support
+        #we assume that all of the snapshots in the time series
+        #use the same domain info as the first snapshots
         if not isinstance(ts,TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
+        if self.comm.size > 1: 
+            self.comm.barrier()            
         tpf = ts.__iter__().next()
-        def _particle_count(field, data):
-            try:
-                return (data["particle_type"]==dm_type).sum()
-            except KeyError:
-                return np.prod(data["particle_position_x"].shape)
-        add_field("particle_count",function=_particle_count, not_in_all=True,
-            particle_type=True)
-        # Get total_particles in parallel.
         dd = tpf.h.all_data()
-        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
         if outbase is None:
-            outbase = 'rockstar_halos'
-        self.outbase = outbase
-        self.particle_mass = particle_mass
-        if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
-        else:
-            self.force_res = force_res
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
+            outbase = str(tpf)+'_rockstar'
+        self.outbase = outbase        
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
+        self.num_readers = num_readers
+        self.num_writers = num_writers
+        if self.num_readers + self.num_writers + 1 != self.comm.size:
+            #we need readers+writers+1 server = comm size        
+            raise RuntimeError
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        # We set up the workgroups *before* initializing
-        # ParallelAnalysisInterface. Everyone is their own workgroup!
-        self.pool = ProcessorPool()
-        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
-             self.pool.add_workgroup(size=1)
-        ParallelAnalysisInterface.__init__(self)
-        for wg in self.pool.workgroups:
-            if self.pool.comm.rank in wg.ranks:
-                self.workgroup = wg
+        data_source = tpf.h.all_data()
+        self.comm.barrier()
+        self.force_res = force_res
+        def _pcount(field,data):
+            return (data["particle_type"]=dm_type).sum()
+        add_field("pcount",function=_pcount,particle_type=True)
+        total_particles = dd.quantities['TotalQuantity']('pcount')
+        self.total_particles = total_particles
+        mylog.info("Found %i halo particles",total_particles)
         self.handler = rockstar_interface.RockstarInterface(
-                self.ts, dd)
+                self.ts, data_source)
 
     def __del__(self):
         self.pool.free_all()
 
     def _get_hosts(self):
-        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
+        if self.comm.size == 1 or self.workgroup.name == "server":
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -294,7 +156,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.pool.comm.mpi_bcast(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -302,13 +164,21 @@
         """
         
         """
+        if self.comm.size > 1:
+            self.pool = ProcessorPool()
+            mylog.debug("Num Writers = %s Num Readers = %s",
+                        self.num_writers, self.num_readers)
+            self.pool.add_workgroup(1, name = "server")
+            self.pool.add_workgroup(self.num_readers, name = "readers")
+            self.pool.add_workgroup(self.num_writers, name = "writers")
+            for wg in self.pool.workgroups:
+                if self.comm.rank in wg.ranks: self.workgroup = wg
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
         self.handler.setup_rockstar(self.server_address, self.port,
-                    len(self.ts), self.total_particles, 
-                    self.dm_type,
-                    parallel = self.pool.comm.size > 1,
+                    len(self.ts), self.total_particles, self.dm_type,
+                    parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
@@ -317,29 +187,27 @@
                     force_res=self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
-        # Make the directory to store the halo lists in.
-        if self.pool.comm.rank == 0:
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
             if not os.path.exists(self.outbase):
                 os.mkdir(self.outbase)
-            # Make a record of which dataset corresponds to which set of
-            # output files because it will be easy to lose this connection.
-            fp = open(self.outbase + '/pfs.txt', 'w')
-            fp.write("# pfname\tindex\n")
-            for i, pf in enumerate(self.ts):
-                pfloc = path.join(path.relpath(pf.fullpath), pf.basename)
-                line = "%s\t%d\n" % (pfloc, i)
-                fp.write(line)
-            fp.close()
-        # This barrier makes sure the directory exists before it might be used.
-        self.pool.comm.barrier()
-        if self.pool.comm.size == 1:
+        if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            # Split up the work.
-            self.runner.split_work(self.pool)
-            # And run it!
-            self.runner.run(self.handler, self.pool)
-        self.pool.comm.barrier()
+            self.comm.barrier()
+            if self.workgroup.name == "server":
+                self.handler.start_server()
+            elif self.workgroup.name == "readers":
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            elif self.workgroup.name == "writers":
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            self.pool.free_all()
+        self.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
@@ -347,4 +215,5 @@
         Reads in the out_0.list file and generates RockstarHaloList
         and RockstarHalo objects.
         """
-        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)
+        tpf = self.ts[0]
+        return RockstarHaloList(tpf,file_name)


diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -29,8 +29,6 @@
 cimport cython
 from libc.stdlib cimport malloc
 
-from yt.config import ytcfg
-
 cdef import from "particle.h":
     struct particle:
         np.int64_t id
@@ -46,11 +44,11 @@
 cdef import from "config.h":
     void setup_config()
 
-cdef import from "server.h" nogil:
+cdef import from "server.h":
     int server()
 
-cdef import from "client.h" nogil:
-    void client(np.int64_t in_type)
+cdef import from "client.h":
+    void client()
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
@@ -239,54 +237,26 @@
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
-    global SCALE_NOW
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
+    global SCALE_NOW, TOTAL_PARTICLES
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
-    pf = rh.tsl.next()
-    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     block = int(str(filename).rsplit(".")[-1])
+    
+
+    # Now we want to grab data from only a subset of the grids.
     n = rh.block_ratio
-
-    all_grids = pf.h.grids
+    dd = pf.h.all_data()
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-    # Now we want to grab data from only a subset of the grids for each reader.
-    if NUM_BLOCKS == 1:
-        grids = all_grids
-    else:
-        if ytcfg.getboolean("yt", "inline") == False:
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
-        else:
-            # We must be inline, grap only the local grids.
-            grids  = [g for g in all_grids if g.proc_num ==
-                          ytcfg.getint('yt','__topcomm_parallel_rank')]
-    
-    all_fields = set(pf.h.derived_field_list + pf.h.field_list)
-
-    # First we need to find out how many this reader is going to read in
-    # if the number of readers > 1.
-    if NUM_BLOCKS > 1:
-        local_parts = 0
-        for g in grids:
-            if g.NumberOfParticles == 0: continue
-            if "particle_type" in all_fields:
-                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
-                iddm = g["particle_type"] == rh.dm_type
-            else:
-                iddm = Ellipsis
-            arri = g["particle_index"].astype("int64")
-            arri = arri[iddm] #pick only DM
-            local_parts += arri.size
-    else:
-        local_parts = TOTAL_PARTICLES
-
-    #print "local_parts", local_parts
-
-    p[0] = <particle *> malloc(sizeof(particle) * local_parts)
-
+    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
+    tnpart = 0
+    for g in grids:
+        tnpart += np.sum(dd._get_data_from_grid(g, "particle_type")==rh.dm_type)
+    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
+    #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
     left_edge[0] = pf.domain_left_edge[0]
@@ -295,12 +265,8 @@
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
-        if g.NumberOfParticles == 0: continue
-        if "particle_type" in all_fields:
-            iddm = g["particle_type"] == rh.dm_type
-        else:
-            iddm = Ellipsis
-        arri = g["particle_index"].astype("int64")
+        iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
         arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
@@ -310,13 +276,22 @@
                       "particle_position_z",
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
-            arr = g[field].astype("float64")
+            arr = dd._get_data_from_grid(g, field).astype("float64")
             arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
-    num_p[0] = local_parts
+    num_p[0] = tnpart
+    TOTAL_PARTICLES = tnpart
+    #print 'first particle coordinates'
+    #for i in range(3):
+    #    print p[0][0].pos[i],
+    #print ""
+    #print 'last particle coordinates'
+    #for i in range(3):
+    #    print p[0][tnpart-1].pos[i],
+    #print ""
 
 cdef class RockstarInterface:
 
@@ -348,10 +323,10 @@
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        global OVERLAP_LENGTH, FORCE_RES
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
-            #print "set force res to ",FORCE_RES
+            print "set force res to ",FORCE_RES
         OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
@@ -392,7 +367,6 @@
                     tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
-        rh.dm_type = dm_type
         cdef LPG func = rh_read_particles
         set_load_particles_generic(func)
 
@@ -402,9 +376,7 @@
         output_and_free_halos(0, 0, 0, NULL)
 
     def start_server(self):
-        with nogil:
-            server()
+        server()
 
-    def start_client(self, in_type):
-        in_type = np.int64(in_type)
-        client(in_type)
+    def start_client(self):
+        client()


diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,16 +33,11 @@
 
 import time
 import numpy as np
-import numpy.linalg as linalg
-import collections
-
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
-debug = True
-
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
@@ -77,7 +72,6 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-
     fc = np.array(fc)
     fwidth = np.array(fwidth)
     
@@ -95,7 +89,7 @@
     #Create a list of the star particle properties in PARTICLE_DATA
     #Include ID, parent-ID, position, velocity, creation_mass, 
     #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+    particle_data,nstars = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
                                            dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
@@ -109,7 +103,7 @@
 
     create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    return fle,fre,ile,ire,dd,nleaf
+    return fle,fre,ile,ire,dd,nleaf,nstars
 
 def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
                                         halo_list,domains_list=None,**kwargs):
@@ -193,17 +187,23 @@
     domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
-def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
-    add_fields() #add the metal mass field that sunrise wants
+def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
+    try:
+        dd['MetalMass']
+    except KeyError:
+        add_fields() #add the metal mass field that sunrise wants
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
     
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    if dd is None:
-        #we keep passing dd around to not regenerate the data all the time
-        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -251,6 +251,7 @@
     output   = np.zeros((o_length,len(fields)), dtype='float64')
     refined  = np.zeros(r_length, dtype='int32')
     levels   = np.zeros(r_length, dtype='int32')
+    ids      = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -259,7 +260,7 @@
             c = center*pf['kpc']
         else:
             c = ile*1.0/pf.domain_dimensions*pf['kpc']
-        printing = lambda x: print_oct(x,pf['kpc'],c)
+        printing = lambda x: print_oct(x)
     else:
         printing = None
     pbar = get_pbar("Building Hilbert DFO octree",len(refined))
@@ -271,6 +272,7 @@
             output,refined,levels,
             grids,
             start_level,
+            ids,
             debug=printing,
             tracker=pbar)
     pbar.finish()
@@ -278,6 +280,7 @@
     #for the next spot, so we're off by 1
     print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    print 'first few entries :',refined[:12]
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
@@ -287,6 +290,7 @@
     ci = data['cell_index']
     l  = data['level']
     g  = data['grid']
+    o  = g.offset
     fle = g.left_edges+g.dx*ci
     fre = g.left_edges+g.dx*(ci+1)
     if nd is not None:
@@ -295,12 +299,14 @@
         if nc is not None:
             fle -= nc
             fre -= nc
-    txt  = '%1i '
-    txt += '%1.3f '*3+'- '
-    txt += '%1.3f '*3
-    print txt%((l,)+tuple(fle)+tuple(fre))
+    txt  = '%+1i '
+    txt += '%+1i '
+    txt += '%+1.3f '*3+'- '
+    txt += '%+1.3f '*3
+    if l<2:
+        print txt%((l,)+(o,)+tuple(fle)+tuple(fre))
 
-def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index]
                             pos, #the output hydro data position and refinement position
                             grid,  #grid that this oct lives on (not its children)
                             hilbert,  #the hilbert state
@@ -309,6 +315,7 @@
                             levels, #For a given Oct, what is the level
                             grids, #list of all patch grids available to us
                             level, #starting level of the oct (not the children)
+                            ids, #record the oct ID
                             debug=None,tracker=True):
     if tracker is not None:
         if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
@@ -316,16 +323,19 @@
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    refined[pos.refined_pos] = child_grid_index!=-1
-    levels[pos.output_pos]  = level
+    levels[pos.refined_pos]  = level
+    is_leaf = (child_grid_index==-1) and (level>0)
+    refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
+    ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf
     pos.refined_pos+= 1 
-    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+    if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
         for field_index in range(grid.fields.shape[0]):
             output[pos.output_pos,field_index] = \
                     grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
         pos.output_pos+= 1 
     else:
+        assert child_grid_index>-1
         #find the grid we descend into
         #then find the eight cells we break up into
         subgrid = grids[child_grid_index]
@@ -338,18 +348,21 @@
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + vertex*2**(-level)
+                #child_ile = cell_index + np.array(vertex)*2**(-level)
+                child_ile = cell_index + np.array(vertex)*2**(-(level+1))
+                child_ile = child_ile.astype('int')
             else:
                 child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
+
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
-                    debug=debug,tracker=tracker)
+                subgrid,hilbert_child,output,refined,levels,grids,
+                level+1,ids = ids,
+                debug=debug,tracker=tracker)
 
 
 
 def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
-
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
@@ -360,8 +373,6 @@
     for i,a in enumerate('xyz'):
         st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
         st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
@@ -457,6 +468,7 @@
             #quit if idxq is true:
             idxq = idx[0]>0 and np.all(idx==idx[0])
             out  = np.all(fle>cfle) and np.all(fre<cfre) 
+            out &= abs(np.log2(idx[0])-np.rint(np.log2(idx[0])))<1e-5 #nwide should be a power of 2
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
@@ -495,11 +507,15 @@
                           dd=None):
     if dd is None:
         dd = pf.h.all_data()
-    idx = dd["particle_type"] == star_type
+    idxst = dd["particle_type"] == star_type
+
+    #make sure we select more than a single particle
+    assert na.sum(idxst)>0
     if pos is None:
         pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    idx = idxst & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    assert np.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
@@ -518,8 +534,7 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
-        #metallicity *=0.0198
-        #print 'WARNING: multiplying metallicirt by 0.0198'
+        assert np.all(metallicity>0.0)
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
     formation_time = pf.current_time*pf['years']-age
@@ -534,19 +549,19 @@
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
     col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
-    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
     pd_table = pyfits.new_table(cols)
     pd_table.name = "PARTICLEDATA"
-    return pd_table
+    
+    #make sure we have nonzero particle number
+    assert pd_table.data.shape[0]>0
+    return pd_table,na.sum(idx)
 
 
 def add_fields():
@@ -556,10 +571,8 @@
         
     def _convMetalMass(data):
         return 1.0
-    
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
-
     def _initial_mass_cen_ostriker(field, data):
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
@@ -576,9 +589,6 @@
 
     add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-    def _temp_times_mass(field, data):
-        return data["Temperature"]*data["CellMassMsun"]
-    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
 class position:
     def __init__(self):
@@ -668,254 +678,3 @@
         j+=1
         yield vertex, self.descend(j)
 
-def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
-    if cameraset is None:
-        cameraset =cameraset_vertex 
-    campos =[]
-    names = []
-    dd = pf.h.all_data()
-    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
-        kwargs['scene_position']=scene_pos
-        kwargs['scene_up']=scene_up
-        kwargs['scene_rot']=scene_rot
-        kwargs['dd']=dd
-        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
-        campos += line,
-        names += name,
-    return names,campos     
-
-def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
-                                     sim_sphere_radius=None,sim_halo_radius=None,
-                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
-                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
-                                     dd=None):
-    """Translate the simulation to center on sim_center, 
-    then rotate such that sim_up is along the +z direction. Then we are in the 
-    'scene' basis coordinates from which scene_up and scene_offset are defined.
-    Then a position vector, direction vector, up vector and angular field of view
-    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
-    The angular field of view is in radians. The 10 numbers should match the inputs to
-    camera_positions in Sunrise.
-    """
-
-    sim_center = np.array(sim_center)
-    if sim_sphere_radius is None:
-        sim_sphere_radius = 10.0/pf['kpc']
-    if sim_axis_short is None:
-        if dd is None:
-            dd = pf.h.all_data()
-        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
-        mas = dd["particle_mass"]
-        pos = pos[idx]
-        mas = mas[idx]
-        mo_inertia = position_moment(pos,mas)
-        eigva, eigvc = linalg.eig(mo_inertia)
-        #order into short, long axes
-        order = eigva.real.argsort()
-        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
-    else:
-        ax_short = sim_axis_short
-        ax_long  = sim_axis_long
-    if sim_halo_radius is None:
-        sim_halo_radius = 200.0/pf['kpc']
-    if scene_distance is  None:
-        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
-    if scene_fov is None:
-        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
-        #idx= radii < sim_halo_radius*0.10
-        #radii = radii[idx]
-        #mass  = mas[idx] #copying mass into mas
-        si = np.argsort(radii)
-        radii = radii[si]
-        mass  = mas[si]
-        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
-        re = radii[idx[0]]
-        scene_fov = 5*re
-        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
-        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
-    #find rotation matrix
-    angles=find_half_euler_angles(ax_short,ax_long)
-    rotation  = euler_matrix(*angles)
-    irotation = numpy.linalg.inv(rotation)
-    axs = (ax_short,ax_med,ax_long)
-    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
-    axs = ([1,0,0],[0,1,0],[0,0,1])
-    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
-    
-    #rotate the camera
-    if scene_rot :
-        irotation = np.eye(3)
-    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
-    sunrise_up  = matmul(irotation,scene_up)
-    sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
-
-    #change to physical kpc
-    sunrise_pos *= pf['kpc']
-    sunrise_direction *= pf['kpc']
-    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
-
-def matmul(m, v):
-    """Multiply a matrix times a set of vectors, or a single vector.
-    My nPart x nDim convention leads to two transpositions, which is
-    why this is hidden away in a function.  Note that if you try to
-    use this to muliply two matricies, it will think that you're
-    trying to multiply by a set of vectors and all hell will break
-    loose."""    
-    assert type(v) is not np.matrix
-    v = np.asarray(v)
-    m, vs = [np.asmatrix(a) for a in (m, v)]
-
-    result = np.asarray(np.transpose(m * np.transpose(vs)))    
-    if len(v.shape) == 1:
-        return result[0]
-    return result
-
-
-def mag(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return np.sqrt( (vs**2).sum() )
-    return np.sqrt( (vs**2).sum(axis=1) )
-
-def mag2(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return (vs**2).sum()
-    return (vs**2).sum(axis=1)
-
-
-def position_moment(rs, ms=None, axes=None):
-    """Find second position moment tensor.
-    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = np.asarray(rs)
-    Npart, N = rs.shape
-    if ms is None: ms = np.ones(Npart)
-    else: ms = np.asarray(ms)    
-    if axes is not None:
-        axes = np.asarray(axes,dtype=float64)
-        axes = axes/axes.max()
-        norms2 = mag2(rs/axes)
-    else:
-        norms2 = np.ones(Npart)
-    M = ms.sum()
-    result = np.zeros((N,N))
-    # matrix is symmetric, so only compute half of it then fill in the
-    # other half
-    for i in range(N):
-        for j in range(i+1):
-            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
-        
-    result = result + result.transpose() - np.identity(N)*result
-    return result
-    
-
-
-def find_half_euler_angles(v,w,check=True):
-    """Find the passive euler angles that will make v lie along the z
-    axis and w lie along the x axis.  v and w are uncertain up to
-    inversions (ie, eigenvectors) so this routine removes degeneracies
-    associated with that
-
-    (old) Calculate angles to bring a body into alignment with the
-    coordinate system.  If v1 is the SHORTEST axis and v2 is the
-    LONGEST axis, then this will return the angle (Euler angles) to
-    make the long axis line up with the x axis and the short axis line
-    up with the x (z) axis for the 2 (3) dimensional case."""
-    # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: np.sqrt(np.sum(x**2.0))
-    v = v/mag(v)
-    w = w/mag(w)    
-    if check:
-        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
-
-    # Break eigenvector scaling degeneracy by forcing it to have a positive
-    # z component
-    if v[2] < 0: v = -v
-    phi,theta = find_euler_phi_theta(v)
-
-    # Rotate w according to phi,theta and then break inversion
-    # degeneracy by requiring that resulting vector has positive
-    # x component
-    w_prime = euler_passive(w,phi,theta,0.)
-    if w_prime[0] < 0: w_prime = -w_prime
-    # Now last Euler angle should just be this:
-    psi = np.arctan2(w_prime[1],w_prime[0])
-    return phi, theta, psi
-
-def find_euler_phi_theta(v):
-    """Find (passive) euler angles that will make v point in the z
-    direction"""
-    # Make sure the vector is normalized
-    v = v/mag(v)
-    theta = np.arccos(v[2])
-    phi = np.arctan2(v[0],-v[1])
-    return phi,theta
-
-def euler_matrix(phi, the, psi):
-    """Make an Euler transformation matrix"""
-    cpsi=np.cos(psi)
-    spsi=np.sin(psi)
-    cphi=np.cos(phi)
-    sphi=np.sin(phi)
-    cthe=np.cos(the)
-    sthe=np.sin(the)
-    m = np.mat(np.zeros((3,3)))
-    m[0,0] = cpsi*cphi - cthe*sphi*spsi
-    m[0,1] = cpsi*sphi + cthe*cphi*spsi
-    m[0,2] = spsi*sthe
-    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
-    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
-    m[1,2] = cpsi*sthe
-    m[2,0] = sthe*sphi
-    m[2,1] = -sthe*cphi
-    m[2,2] = cthe
-    return m
-
-def euler_passive(v, phi, the, psi):
-    """Passive Euler transform"""
-    m = euler_matrix(phi, the, psi)
-    return matmul(m,v)
-
-
-#the format for these camerasets is name,up vector,camera location, 
-#rotate to the galaxy's up direction?
-cameraset_compass = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
-    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
-    ])
-
-cameraset_vertex = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
-    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
-    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
-    ])
-
-#up is 45deg down from z, towards north
-#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
-#up is -45deg down from z, towards north
-
-cameraset_ring = collections.OrderedDict()
-
-segments = 20
-for angle in np.linspace(0,360,segments):
-    pos = [np.cos(angle),0.,np.sin(angle)]
-    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
-    cameraset_ring['02i'%angle]=(pos,vc)
-            
-
-




diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Christopher Moody <cemoody at ucsc.edu>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -18,17 +20,16 @@
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-
+.
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
 import numpy as np
+import os.path
+import glob
 import stat
 import weakref
-import cPickle
-import os
-import struct
+import cStringIO
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -42,64 +43,65 @@
 from .fields import \
     ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-try:
-    import yt.frontends.ramses._ramses_reader as _ramses_reader
-except ImportError:
-    _ramses_reader = None
+from .definitions import *
+from io import _read_child_mask_level
+from io import read_particles
+from io import read_stars
+from io import spread_ages
+from io import _count_art_octs
+from io import _read_art_level_info
+from io import _read_art_child
+from io import _skip_record
+from io import _read_record
+from io import _read_frecord
+from io import _read_record_size
+from io import _read_struct
+from io import b2t
 
+
+import yt.frontends.ramses._ramses_reader as _ramses_reader
+
+from .fields import ARTFieldInfo, KnownARTFields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
-from yt.frontends.art.definitions import art_particle_field_names
-
-from yt.frontends.art.io import _read_child_mask_level
-from yt.frontends.art.io import read_particles
-from yt.frontends.art.io import read_stars
-from yt.frontends.art.io import _count_art_octs
-from yt.frontends.art.io import _read_art_level_info
-from yt.frontends.art.io import _read_art_child
-from yt.frontends.art.io import _skip_record
-from yt.frontends.art.io import _read_record
-from yt.frontends.art.io import _read_frecord
-from yt.frontends.art.io import _read_record_size
-from yt.frontends.art.io import _read_struct
-from yt.frontends.art.io import b2t
-
-def num_deep_inc(f):
-    def wrap(self, *args, **kwargs):
-        self.num_deep += 1
-        rv = f(self, *args, **kwargs)
-        self.num_deep -= 1
-        return rv
-    return wrap
-
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
+    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
+            child_mask=None,nop=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
-        start_index = props[0]
+        start_index =start_index 
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
         
-        self.LeftEdge = props[0]
-        self.RightEdge = props[1]
-        self.ActiveDimensions = props[2] 
-        #if child_mask is not None:
-        #    self._set_child_mask(child_mask)
+        self.LeftEdge = le
+        self.RightEdge = re
+        self.ActiveDimensions = gd
+        self.NumberOfParticles=nop
+        for particle_field in particle_fields:
+            setattr(self,particle_field,np.array([]))
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
             self.dds = self.Parent[0].dds / self.pf.refine_by
@@ -109,7 +111,8 @@
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
+                = self.dds
 
     def get_global_startindex(self):
         """
@@ -124,381 +127,278 @@
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
                        np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        self.start_index = (start_index*self.pf.refine_by)\
+                           .astype('int64').ravel()
         return self.start_index
 
     def __repr__(self):
         return "ARTGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 class ARTHierarchy(AMRHierarchy):
-
     grid = ARTGrid
     _handle = None
     
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
+        if not self.pf.skip_particles:
+            self._setup_particle_grids()
+        self._setup_particle_grids()
         self._setup_field_list()
         
+    def _setup_particle_grids(self):
+        pass
+    
     def _initialize_data_storage(self):
         pass
-
+    
     def _detect_fields(self):
-        # This will need to be generalized to be used elsewhere.
-        self.field_list = [ 'Density','TotalEnergy',
-             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-             'Pressure','Gamma','GasEnergy',
-             'MetalDensitySNII', 'MetalDensitySNIa',
-             'PotentialNew','PotentialOld']
-        self.field_list += art_particle_field_names
-
+        self.field_list = []
+        self.field_list += fluid_fields
+        self.field_list += particle_fields
+        
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
-
+            
     def _count_grids(self):
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
-        
         min_eff = 0.30
-        
         vol_max = 128**3
-        
-        f = open(self.pf.parameter_filename,'rb')
-        
-        
-        (self.pf.nhydro_vars, self.pf.level_info,
-        self.pf.level_oct_offsets, 
-        self.pf.level_child_offsets) = \
-                         _count_art_octs(f, 
-                          self.pf.child_grid_offset,
-                          self.pf.min_level, self.pf.max_level)
-        self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = np.array(self.pf.level_info)        
-        self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
-        self.pf.level_offsets[0] = self.pf.root_grid_offset
-        
-        self.pf.level_art_child_masks = {}
-        cm = self.pf.root_iOctCh>0
-        cm_shape = (1,)+cm.shape 
-        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
-        del cm
-        
-        root_psg = _ramses_reader.ProtoSubgrid(
-                        np.zeros(3, dtype='int64'), # left index of PSG
-                        self.pf.domain_dimensions, # dim of PSG
-                        np.zeros((1,3), dtype='int64'), # left edges of grids
-                        np.zeros((1,6), dtype='int64') # empty
-                        )
-        
-        self.proto_grids = [[root_psg],]
-        for level in xrange(1, len(self.pf.level_info)):
-            if self.pf.level_info[level] == 0:
-                self.proto_grids.append([])
-                continue
-            psgs = []
-            effs,sizes = [], []
-
-            if level > self.pf.limit_level : continue
-            
-            #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
-            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-            
-            #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
-                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
-            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
-            self.pf.level_art_child_masks[level]=art_child_mask
-            #child_mask is zero where child grids exist and
-            #thus where higher resolution data is available
-            
-            
-            #compute the hilbert indices up to a certain level
-            #the indices will associate an oct grid to the nearest
-            #hilbert index?
-            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
-                              np.log10(2))
-            hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level, left_index)
-            #print base_level, hilbert_indices.max(),
-            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-            #print hilbert_indices.max()
-            
-            # Strictly speaking, we don't care about the index of any
-            # individual oct at this point.  So we can then split them up.
-            unique_indices = np.unique(hilbert_indices)
-            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
-                        level, unique_indices.size, hilbert_indices.size)
-            
-            #use the hilbert indices to order oct grids so that consecutive
-            #items on a list are spatially near each other
-            #this is useful because we will define grid patches over these
-            #octs, which are more efficient if the octs are spatially close
-            
-            #split into list of lists, with domains containing 
-            #lists of sub octgrid left indices and an index
-            #referring to the domain on which they live
-            pbar = get_pbar("Calc Hilbert Indices ",1)
-            locs, lefts = _ramses_reader.get_array_indices_lists(
-                        hilbert_indices, unique_indices, left_index, fl)
-            pbar.finish()
-            
-            #iterate over the domains    
-            step=0
-            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
-            psg_eff = []
-            for ddleft_index, ddfl in zip(lefts, locs):
-                #iterate over just the unique octs
-                #why would we ever have non-unique octs?
-                #perhaps the hilbert ordering may visit the same
-                #oct multiple times - review only unique octs 
-                #for idomain in np.unique(ddfl[:,1]):
-                #dom_ind = ddfl[:,1] == idomain
-                #dleft_index = ddleft_index[dom_ind,:]
-                #dfl = ddfl[dom_ind,:]
+        with open(self.pf.parameter_filename,'rb') as f:
+            (self.pf.nhydro_vars, self.pf.level_info,
+            self.pf.level_oct_offsets, 
+            self.pf.level_child_offsets) = \
+                             _count_art_octs(f, 
+                              self.pf.child_grid_offset,
+                              self.pf.min_level, self.pf.max_level)
+            self.pf.level_info[0]=self.pf.ncell
+            self.pf.level_info = np.array(self.pf.level_info)
+            self.pf.level_offsets = self.pf.level_child_offsets
+            self.pf.level_offsets = np.array(self.pf.level_offsets, 
+                                             dtype='int64')
+            self.pf.level_offsets[0] = self.pf.root_grid_offset
+            self.pf.level_art_child_masks = {}
+            cm = self.pf.root_iOctCh>0
+            cm_shape = (1,)+cm.shape 
+            self.pf.level_art_child_masks[0] = \
+                    cm.reshape(cm_shape).astype('uint8')        
+            del cm
+            root_psg = _ramses_reader.ProtoSubgrid(
+                            np.zeros(3, dtype='int64'), # left index of PSG
+                            self.pf.domain_dimensions, # dim of PSG
+                            np.zeros((1,3), dtype='int64'),# left edges of grids
+                            np.zeros((1,6), dtype='int64') # empty
+                            )
+            self.proto_grids = [[root_psg],]
+            for level in xrange(1, len(self.pf.level_info)):
+                if self.pf.level_info[level] == 0:
+                    self.proto_grids.append([])
+                    continue
+                psgs = []
+                effs,sizes = [], []
+                if self.pf.limit_level:
+                    if level > self.pf.limit_level : continue
+                #refers to the left index for the art octgrid
+                left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                        self.pf.level_oct_offsets,level,
+                        coarse_grid=self.pf.domain_dimensions[0])
+                if level>1:
+                    assert root_level == last_root_level
+                last_root_level = root_level
+                #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+                #read in the child masks for this level and save them
+                idc, art_child_mask = _read_child_mask_level(f, 
+                        self.pf.level_child_offsets,
+                    level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+                art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+                self.pf.level_art_child_masks[level]=art_child_mask
+                #child_mask is zero where child grids exist and
+                #thus where higher resolution data is available
+                #compute the hilbert indices up to a certain level
+                #the indices will associate an oct grid to the nearest
+                #hilbert index?
+                base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                                  np.log10(2))
+                hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                        level + base_level, left_index)
+                #print base_level, hilbert_indices.max(),
+                hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+                #print hilbert_indices.max()
+                # Strictly speaking, we don't care about the index of any
+                # individual oct at this point.  So we can then split them up.
+                unique_indices = np.unique(hilbert_indices)
+                mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                            level, unique_indices.size, hilbert_indices.size)
+                #use the hilbert indices to order oct grids so that consecutive
+                #items on a list are spatially near each other
+                #this is useful because we will define grid patches over these
+                #octs, which are more efficient if the octs are spatially close
+                #split into list of lists, with domains containing 
+                #lists of sub octgrid left indices and an index
+                #referring to the domain on which they live
+                pbar = get_pbar("Calc Hilbert Indices ",1)
+                locs, lefts = _ramses_reader.get_array_indices_lists(
+                            hilbert_indices, unique_indices, left_index, fl)
+                pbar.finish()
+                #iterate over the domains    
+                step=0
+                pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+                psg_eff = []
+                for ddleft_index, ddfl in zip(lefts, locs):
+                    #iterate over just the unique octs
+                    #why would we ever have non-unique octs?
+                    #perhaps the hilbert ordering may visit the same
+                    #oct multiple times - review only unique octs 
+                    #for idomain in np.unique(ddfl[:,1]):
+                    #dom_ind = ddfl[:,1] == idomain
+                    #dleft_index = ddleft_index[dom_ind,:]
+                    #dfl = ddfl[dom_ind,:]
+                    dleft_index = ddleft_index
+                    dfl = ddfl
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()
+                    idims +=2
+                    #this creates a grid patch that doesn't cover the whole leve
+                    #necessarily, but with other patches covers all the regions
+                    #with octs. This object automatically shrinks its size
+                    #to barely encompass the octs inside of it.
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    #because grid patches maybe mostly empty, and with octs
+                    #that only partially fill the grid, it may be more efficient
+                    #to split large patches into smaller patches. We split
+                    #if less than 10% the volume of a patch is covered with octs
+                    if idims.prod() > vol_max or psg.efficiency < min_eff:
+                        psg_split = _ramses_reader.recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dfl,min_eff=min_eff,use_center=True,
+                            split_on_vol=vol_max)
+                        psgs.extend(psg_split)
+                        psg_eff += [x.efficiency for x in psg_split] 
+                    else:
+                        psgs.append(psg)
+                        psg_eff =  [psg.efficiency,]
+                    tol = 1.00001
+                    step+=1
+                    pbar.update(step)
+                eff_mean = np.mean(psg_eff)
+                eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
+                eff_nall = len(psg_eff)
+                mylog.info("Average subgrid efficiency %02.1f %%",
+                            eff_mean*100.0)
+                mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                            eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
                 
-                dleft_index = ddleft_index
-                dfl = ddfl
-                initial_left = np.min(dleft_index, axis=0)
-                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
-                #this creates a grid patch that doesn't cover the whole level
-                #necessarily, but with other patches covers all the regions
-                #with octs. This object automatically shrinks its size
-                #to barely encompass the octs inside of it.
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                
-                #because grid patches may still be mostly empty, and with octs
-                #that only partially fill the grid,it  may be more efficient
-                #to split large patches into smaller patches. We split
-                #if less than 10% the volume of a patch is covered with octs
-                if idims.prod() > vol_max or psg.efficiency < min_eff:
-                    psg_split = _ramses_reader.recursive_patch_splitting(
-                        psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=vol_max)
-                    
-                    psgs.extend(psg_split)
-                    psg_eff += [x.efficiency for x in psg_split] 
-                else:
-                    psgs.append(psg)
-                    psg_eff =  [psg.efficiency,]
-                
-                tol = 1.00001
-                
-                
-                step+=1
-                pbar.update(step)
-            eff_mean = np.mean(psg_eff)
-            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
-            eff_nall = len(psg_eff)
-            mylog.info("Average subgrid efficiency %02.1f %%",
-                        eff_mean*100.0)
-            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
-                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            
-        
-            mylog.debug("Done with level % 2i", level)
-            pbar.finish()
-            self.proto_grids.append(psgs)
-            #print sum(len(psg.grid_file_locations) for psg in psgs)
-            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
-            if len(self.proto_grids[level]) == 1: continue
+                mylog.info("Done with level % 2i; max LE %i", level,
+                           np.max(left_index))
+                pbar.finish()
+                self.proto_grids.append(psgs)
+                if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
-                    
-            
-            
-
-    num_deep = 0
-
         
     def _parse_hierarchy(self):
-        """ The root grid has no octs except one which is refined.
-        Still, it is the size of 128 cells along a length.
-        Ignore the proto subgrid created for the root grid - it is wrong.
-        """
         grids = []
         gi = 0
-        
+        dd=self.pf.domain_dimensions
         for level, grid_list in enumerate(self.proto_grids):
-            #The root level spans [0,2]
-            #The next level spans [0,256]
-            #The 3rd Level spans up to 128*2^3, etc.
-            #Correct root level to span up to 128
-            correction=1L
-            if level == 0:
-                correction=64L
+            dds = ((2**level) * dd).astype("float64")
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()*correction
-                dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                props = g.get_properties()
+                start_index = props[0,:]
+                le = props[0,:].astype('float64')/dds
+                re = props[1,:].astype('float64')/dds
+                gd = props[2,:].astype('int64')
+                if level==0:
+                    le = np.zeros(3,dtype='float64')
+                    re = np.ones(3,dtype='float64')
+                    gd = dd
+                self.grid_left_edge[gi,:] = le
+                self.grid_right_edge[gi,:] = re
+                self.grid_dimensions[gi,:] = gd
+                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = np.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,props[0],
+                amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*np.array(correction).astype('int64')))
+                    start_index,le,re,gd))
                 gi += 1
         self.grids = np.empty(len(grids), dtype='object')
-        
-
-        if self.pf.file_particle_data:
+        if not self.pf.skip_particles and self.pf.file_particle_data:
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
-            Nrow     = self.pf.parameters['Nrow']
-            nstars = lspecies[-1]
-            a = self.pf.parameters['aexpn']
-            hubble = self.pf.parameters['hubble']
-            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
-            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
-            um  = self.pf.parameters['aM0'] #mass units in solar masses
-            um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles   ",5)
+            um  = self.pf.conversion_factors['Mass'] #mass units in g
+            uv  = self.pf.conversion_factors['Velocity'] #mass units in g
             self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,nstars,Nrow)
-            pbar.update(1)
-            npa,npb=0,0
-            npb = lspecies[-1]
-            clspecies = np.concatenate(([0,],lspecies))
-            if self.pf.only_particle_type is not None:
-                npb = lspecies[0]
-                if type(self.pf.only_particle_type)==type(5):
-                    npa = clspecies[self.pf.only_particle_type]
-                    npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
-            self.pf.particle_position   = self.pf.particle_position[npa:npb]
-            #do NOT correct by an offset of 1.0
-            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
-            pbar.update(2)
-            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
-            pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+                read_particles(self.pf.file_particle_data,
+                        self.pf.parameters['Nrow'])
+            nparticles = lspecies[-1]
+            if not np.all(self.pf.particle_position[nparticles:]==0.0):
+                mylog.info('WARNING: unused particles discovered from lspecies')
+            self.pf.particle_position = self.pf.particle_position[:nparticles]
+            self.pf.particle_velocity = self.pf.particle_velocity[:nparticles]
+            self.pf.particle_position  /= self.pf.domain_dimensions 
+            self.pf.particle_velocity   = self.pf.particle_velocity
             self.pf.particle_velocity  *= uv #to proper cm/s
-            pbar.update(4)
-            self.pf.particle_type         = np.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
-            
-            dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_species'] = 1.0
-            for ax in 'xyz':
-                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                #already in unitary units
-                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
-            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity']=1.0
-            self.pf.conversion_factors['particle_metallicity1']=1.0
-            self.pf.conversion_factors['particle_metallicity2']=1.0
-            self.pf.conversion_factors['particle_index']=1.0
-            self.pf.conversion_factors['particle_type']=1
-            self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
-            
-
-            a,b=0,0
+            self.pf.particle_star_index = len(wspecies)-1
+            self.pf.particle_type = np.zeros(nparticles,dtype='int')
+            self.pf.particle_mass = np.zeros(nparticles,dtype='float32')
+            a=0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if type(self.pf.only_particle_type)==type(5):
-                    if not i==self.pf.only_particle_type:
-                        continue
-                    self.pf.particle_type += i
-                    self.pf.particle_mass += m*um
-
-                else:
-                    self.pf.particle_type[a:b] = i #particle type
-                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                if i == self.pf.particle_star_index:
+                    sa,sb = a,b
+                self.pf.particle_type[a:b] = i #particle type
+                self.pf.particle_mass[a:b] = m*um #mass in grams
                 a=b
-            pbar.finish()
-
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
-            
-            self.pf.particle_star_index = i
-            
-            do_stars = (self.pf.only_particle_type is None) or \
-                       (self.pf.only_particle_type == -1) or \
-                       (self.pf.only_particle_type == len(lspecies))
-            if self.pf.file_star_data and do_stars: 
-                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
-                     = read_stars(self.pf.file_star_data,nstars,Nrow)
-                nstars = nstars[0] 
-                if nstars > 0 :
+            if not self.pf.skip_stars and self.pf.file_particle_stars: 
+                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
+                        ws_old,ws_oldi,tdum,adum \
+                     = read_stars(self.pf.file_particle_stars)
+                self.pf.nstars_rs = nstars_rs     
+                self.pf.nstars_pa = b-a
+                inconsistent=self.pf.particle_type==self.pf.particle_star_index
+                if not nstars_rs==np.sum(inconsistent):
+                    mylog.info('WARNING!: nstars is inconsistent!')
+                del inconsistent
+                if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
-                    pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
-                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= sec_per_Gyr #from Gyr to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
-                    pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    #self.pf.particle_metallicity1 *= 0.0199 
-                    #self.pf.particle_metallicity2 *= 0.0199 
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
-                    self.pf.particle_mass[-nstars:] = mass*um
-
-            done = 0
-            init = self.pf.particle_position.shape[0]
-            pos = self.pf.particle_position
-            #particle indices travel with the particle positions
-            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
-            if type(self.pf.grid_particles) == type(5):
-                particle_level = min(self.pf.max_level,self.pf.grid_particles)
-            else:
-                particle_level = 2
-            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    np.zeros(len(pos[:,0])).astype('int32')-1,
-                    particle_level, #dont grid particles past this
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
-            pbar.finish()
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
-        pbar = get_pbar("Finalizing grids ",len(grids))
-        for gi, g in enumerate(grids): 
-            self.grids[gi] = g
-        pbar.finish()
-            
-
+                    birthtimes= b2t(tbirth,n=n)
+                    birthtimes = birthtimes.astype('float64')
+                    assert birthtimes.shape == tbirth.shape    
+                    birthtimes*= 1.0e9 #from Gyr to yr
+                    birthtimes*= 365*24*3600 #to seconds
+                    ages = self.pf.current_time-birthtimes
+                    spread = self.pf.spread_age
+                    if type(spread)==type(5.5):
+                        ages = spread_ages(ages,spread=spread)
+                    elif spread:
+                        ages = spread_ages(ages)
+                    idx = self.pf.particle_type == self.pf.particle_star_index
+                    for psf in particle_star_fields:
+                        setattr(self.pf,psf,
+                                np.zeros(nparticles,dtype='float32'))
+                    self.pf.particle_age[sa:sb] = ages
+                    self.pf.particle_mass[sa:sb] = mass
+                    self.pf.particle_mass_initial[sa:sb] = imass
+                    self.pf.particle_creation_time[sa:sb] = birthtimes
+                    self.pf.particle_metallicity1[sa:sb] = metallicity1
+                    self.pf.particle_metallicity2[sa:sb] = metallicity2
+                    self.pf.particle_metallicity[sa:sb]  = metallicity1\
+                                                          + metallicity2
+        for gi,g in enumerate(grids):    
+            self.grids[gi]=g
+                    
     def _get_grid_parents(self, grid, LE, RE):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
@@ -507,53 +407,57 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = np.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            #Now we do overlapping siblings; note that one has to "win" with
+            #siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
+            #instead of gridding particles assign them all to the root grid
+            if gi==0:
+                for particle_field in particle_fields:
+                    source = getattr(self.pf,particle_field,None)
+                    if source is None:
+                        for i,ax in enumerate('xyz'):
+                            pf = particle_field.replace('_%s'%ax,'')
+                            source = getattr(self.pf,pf,None)
+                            if source is not None:
+                                source = source[:,i]
+                                break
+                    if source is not None:
+                        mylog.info("Attaching %s to the root grid",
+                                    particle_field)
+                        g.NumberOfParticles = source.shape[0]
+                        setattr(g,particle_field,source)
+        pb.finish()
         self.max_level = self.grid_levels.max()
 
-    # def _populate_grid_objects(self):
-    #     mask = np.empty(self.grids.size, dtype='int32')
-    #     pb = get_pbar("Populating grids", len(self.grids))
-    #     for gi,g in enumerate(self.grids):
-    #         pb.update(gi)
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level - 1,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask)
-    #         parents = self.grids[mask.astype("bool")]
-    #         if len(parents) > 0:
-    #             g.Parent.extend((p for p in parents.tolist()
-    #                     if p.locations[0,0] == g.locations[0,0]))
-    #             for p in parents: p.Children.append(g)
-    #         # Now we do overlapping siblings; note that one has to "win" with
-    #         # siblings, so we assume the lower ID one will "win"
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask, gi)
-    #         mask[gi] = False
-    #         siblings = self.grids[mask.astype("bool")]
-    #         if len(siblings) > 0:
-    #             g.OverlappingSiblings = siblings.tolist()
-    #         g._prepare_grid()
-    #         g._setup_dx()
-    #     pb.finish()
-    #     self.max_level = self.grid_levels.max()
-
     def _setup_field_list(self):
-        if self.parameter_file.use_particles:
+        if not self.parameter_file.skip_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
-            for field in art_particle_field_names:
+            for field in particle_fields:
                 def external_wrapper(f):
                     def _convert_function(data):
                         return data.convert(f)
@@ -580,97 +484,67 @@
     _hierarchy_class = ARTHierarchy
     _fieldinfo_fallback = ARTFieldInfo
     _fieldinfo_known = KnownARTFields
-    _handle = None
     
-    def __init__(self, filename, data_style='art',
-                 storage_filename = None, 
-                 file_particle_header=None, 
-                 file_particle_data=None,
-                 file_star_data=None,
-                 discover_particles=True,
-                 use_particles=True,
-                 limit_level=None,
-                 only_particle_type = None,
-                 grid_particles=False,
-                 single_particle_mass=False,
-                 single_particle_type=0):
-        
-        #dirn = os.path.dirname(filename)
-        base = os.path.basename(filename)
-        aexp = base.split('_')[2].replace('.d','')
-        if not aexp.startswith('a'):
-            aexp = '_'+aexp
-        
-        self.file_particle_header = file_particle_header
-        self.file_particle_data = file_particle_data
-        self.file_star_data = file_star_data
-        self.only_particle_type = only_particle_type
-        self.grid_particles = grid_particles
-        self.single_particle_mass = single_particle_mass
-        
-        if limit_level is None:
-            self.limit_level = np.inf
-        else:
-            limit_level = int(limit_level)
-            mylog.info("Using maximum level: %i",limit_level)
-            self.limit_level = limit_level
-        
-        def repu(x):
-            for i in range(5):
-                x=x.replace('__','_')
-            return x    
-        if discover_particles:
-            if file_particle_header is None:
-                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_header = loc
-                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
-            if file_particle_data is None:
-                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_data = loc
-                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
-            if file_star_data is None:
-                loc = filename.replace(base,'stars_%s.dat'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_star_data = loc
-                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
-        
-        self.use_particles = any([self.file_particle_header,
-            self.file_star_data, self.file_particle_data])
-        StaticOutput.__init__(self, filename, data_style)
-        
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = 'art'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["InitialTime"]=self.current_time
+    def __init__(self, file_amr, storage_filename = None,
+            skip_particles=False,skip_stars=False,limit_level=None,
+            spread_age=True,data_style='art'):
+        self.data_style = data_style
+        self._find_files(file_amr)
+        self.skip_particles = skip_particles
+        self.skip_stars = skip_stars
+        self.file_amr = file_amr
+        self.parameter_filename = file_amr
+        self.limit_level = limit_level
+        self.spread_age = spread_age
+        self.domain_left_edge  = np.zeros(3,dtype='float64')
+        self.domain_right_edge = np.ones(3,dtype='float64') 
+        StaticOutput.__init__(self, file_amr, data_style)
         self.storage_filename = storage_filename
-        
-        
+
+    def _find_files(self,file_amr):
+        """
+        Given the AMR base filename, attempt to find the
+        particle header, star files, etc.
+        """
+        prefix,suffix = filename_pattern['amr'].split('%s')
+        affix = os.path.basename(file_amr).replace(prefix,'')
+        affix = affix.replace(suffix,'')
+        affix = affix.replace('_','')
+        affix = affix[1:-1]
+        dirname = os.path.dirname(file_amr)
+        for filetype, pattern in filename_pattern.items():
+            #sometimes the affix is surrounded by an extraneous _
+            #so check for an extra character on either side
+            check_filename = dirname+'/'+pattern%('?%s?'%affix)
+            filenames = glob.glob(check_filename)
+            if len(filenames)==1:
+                setattr(self,"file_"+filetype,filenames[0])
+                mylog.info('discovered %s',filetype)
+            elif len(filenames)>1:
+                setattr(self,"file_"+filetype,None)
+                mylog.info("Ambiguous number of files found for %s",
+                        check_filename)
+            else:
+                setattr(self,"file_"+filetype,None)
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
         
     def _set_units(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical units based 
+		on the parameters from the header
         """
         self.units = {}
         self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        
-        
-        z = self.current_redshift
-        
-        h = self.hubble_constant
-        boxcm_cal = self["boxh"]
+        self.units['unitary'] = 1.0
+
+        #spatial units
+        z   = self.current_redshift
+        h   = self.hubble_constant
+        boxcm_cal = self.parameters["boxh"]
         boxcm_uncal = boxcm_cal / h
         box_proper = boxcm_uncal/(1+z)
         aexpn = self["aexpn"]
@@ -679,269 +553,111 @@
             self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
             self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
             self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
-        # Variable names have been chosen to reflect primary reference
-        #Om0 = self["Om0"]
-        #boxh = self["boxh"]
-        wmu = self["wmu"]
-        #ng = self.domain_dimensions[0]
-        #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
-        #v0 = r0 / t0
-        #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
-        #e0 = v0**2.0
+
+        #all other units
+        wmu = self.parameters["wmu"]
+        Om0 = self.parameters['Om0']
+        ng  = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh   = self.parameters['boxh'] 
+        aexpn  = self.parameters["aexpn"]
+        hubble = self.parameters['hubble']
+
+        r0 = boxh/ng
+        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        S_0 = 52.077 * wmu**(5.0/3.0)
+        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        t0 = r0/v0
+        rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
+        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
+
+        #factors to multiply the native code units to CGS
+        cf = defaultdict(lambda: 1.0)
+        cf['Pressure'] = P0 #already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        cf["Mass"] = aM0 * 1.98892e33
+        cf["Density"] = rho0*(aexpn**-3.0)
+        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
+        cf["Potential"] = 1.0
+        cf["Entropy"] = S_0
+        cf["Temperature"] = tr
+        self.cosmological_simulation = True
+        self.conversion_factors = cf
         
-        wmu = self["wmu"]
-        boxh = self["boxh"]
-        aexpn = self["aexpn"]
-        hubble = self.hubble_constant
-        ng = self.domain_dimensions[0]
-        self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
-        self.t0 = self.r0/self.v0
-        # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
-        # ie, critical density 
-        self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
-        tr  = self.tr
-        
-        #factors to multiply the native code units to CGS
-        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
-        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
-        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
-        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
-        #self.conversion_factors["Temperature"] = tr 
-        self.conversion_factors["Potential"] = 1.0
-        self.cosmological_simulation = True
-        
-        # Now our conversion factors
         for ax in 'xyz':
-            # Add on the 1e5 to get to cm/s
-            self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
-        seconds = self.t0
+            self.conversion_factors["%s-velocity" % ax] = v0/aexpn
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
+        for particle_field in particle_fields:
+            self.conversion_factors[particle_field] =  1.0
+        self.conversion_factors['particle_creation_time'] =  31556926.0
+        self.conversion_factors['Msun'] = 5.027e-34 
 
-        #we were already in seconds, go back in to code units
-        #self.current_time /= self.t0 
-        #self.current_time = b2t(self.current_time,n=1)
-        
-    
     def _parse_parameter_file(self):
-        # We set our domain to run from 0 .. 1 since we are otherwise
-        # unconstrained.
-        self.domain_left_edge = np.zeros(3, dtype="float64")
-        self.domain_right_edge = np.ones(3, dtype="float64")
+        """
+        Get the various simulation parameters & constants.
+        """
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.cosmological_simulation = True
+        self.parameters = {}
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self.parameters = {}
-
-        header_struct = [
-            ('>i','pad byte'),
-            ('>256s','jname'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','istep'),
-            ('>d','t'),
-            ('>d','dt'),
-            ('>f','aexpn'),
-            ('>f','ainit'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>f','boxh'),
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','Omb0'),
-            ('>f','hubble'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','nextras'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>f','extra1'),
-            ('>f','extra2'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>256s','lextra'),
-            ('>256s','lextra'),
-            ('>i','pad byte'),
-            
-            ('>i', 'pad byte'),
-            ('>i', 'min_level'),
-            ('>i', 'max_level'),
-            ('>i', 'pad byte'),
-            ]
-        
-        f = open(self.parameter_filename, "rb")
         header_vals = {}
-        for format, name in header_struct:
-            size = struct.calcsize(format)
-            # We parse single values at a time, so this will
-            # always need to be indexed with 0
-            output = struct.unpack(format, f.read(size))[0]
-            header_vals[name] = output
-        self.dimensionality = 3 # We only support three
-        self.refine_by = 2 # Octree
-        # Update our parameters with the header and with some compile-time
-        # constants we will set permanently.
-        self.parameters.update(header_vals)
-        self.parameters["Y_p"] = 0.245
-        self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
-        self.parameters["gamma"] = 5./3.
-        self.parameters["T_CMB0"] = 2.726  
-        self.parameters["T_min"] = 300.0 #T floor in K
-        self.parameters["boxh"] = header_vals['boxh']
-        self.parameters['ng'] = 128 # of 0 level cells in 1d 
+        self.parameters.update(constants)
+        with open(self.file_amr,'rb') as f:
+            amr_header_vals = _read_struct(f,amr_header_struct)
+            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
+                _skip_record(f)
+            (self.ncell,) = struct.unpack('>l', _read_record(f))
+            # Try to figure out the root grid dimensions
+            est = int(np.rint(self.ncell**(1.0/3.0)))
+            # Note here: this is the number of *cells* on the root grid.
+            # This is not the same as the number of Octs.
+            self.domain_dimensions = np.ones(3, dtype='int64')*est 
+            self.root_grid_mask_offset = f.tell()
+            root_cells = self.domain_dimensions.prod()
+            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
+                 order='F')
+            self.root_grid_offset = f.tell()
+            _skip_record(f) # hvar
+            _skip_record(f) # var
+            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.child_grid_offset = f.tell()
+        self.parameters.update(amr_header_vals)
+        if not self.skip_particles and self.file_particle_header:
+            with open(self.file_particle_header,"rb") as fh:
+                particle_header_vals = _read_struct(fh,particle_header_struct)
+                fh.seek(seek_extras)
+                n = particle_header_vals['Nspecies']
+                wspecies = np.fromfile(fh,dtype='>f',count=10)
+                lspecies = np.fromfile(fh,dtype='>i',count=10)
+            self.parameters['wspecies'] = wspecies[:n]
+            self.parameters['lspecies'] = lspecies[:n]
+            ls_nonzero = np.diff(lspecies)[:n-1]
+            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
+                *ls_nonzero)
+            self.parameters.update(particle_header_vals)
+    
+        #setup standard simulation yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.parameters['CosmologyInitialRedshift']=self.current_redshift
-        self.data_comment = header_vals['jname']
-        self.current_time_raw = header_vals['t']
-        self.current_time = header_vals['t']
-        self.omega_lambda = header_vals['Oml0']
-        self.omega_matter = header_vals['Om0']
-        self.hubble_constant = header_vals['hubble']
-        self.min_level = header_vals['min_level']
-        self.max_level = header_vals['max_level']
-        self.nhydro_vars = 10 #this gets updated later, but we'll default to this
-        #nchem is nhydrovars-8, so we typically have 2 extra chem species 
+        self.omega_lambda = amr_header_vals['Oml0']
+        self.omega_matter = amr_header_vals['Om0']
+        self.hubble_constant = amr_header_vals['hubble']
+        self.min_level = amr_header_vals['min_level']
+        self.max_level = amr_header_vals['max_level']
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
-        #self.hubble_time /= 3.168876e7 #Gyr in s 
-        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
-        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
-        # integrand_arr = integrand(spacings)
-        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-        # self.current_time *= self.hubble_time
-        self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
-        for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-            _skip_record(f)
-
-        
-        Om0 = self.parameters['Om0']
-        hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * np.sqrt(Om0)
-        ng = self.parameters['ng']
-        wmu = self.parameters["wmu"]
-        boxh = header_vals['boxh'] 
-        
-        #distance unit #boxh is units of h^-1 Mpc
-        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
-        r0 = self.parameters["r0"]
-        #time, yrs
-        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
-        #velocity velocity units in km/s
-        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                np.sqrt(self.parameters["Om0"])
-        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
-        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
-        rho0 = self.parameters["rho0"]
-        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
-        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        #T_0 = unit of temperature in K and in keV)
-        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
-        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
-        #S_0 = unit of entropy in keV * cm^2
-        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        
-        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
-        #     for non-cosmological run aM0 must be defined during initialization
-        #     [aM0] = [Msun]
-        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        
-        #CGS for everything in the next block
-    
-        (self.ncell,) = struct.unpack('>l', _read_record(f))
-        # Try to figure out the root grid dimensions
-        est = int(np.rint(self.ncell**(1.0/3.0)))
-        # Note here: this is the number of *cells* on the root grid.
-        # This is not the same as the number of Octs.
-        self.domain_dimensions = np.ones(3, dtype='int64')*est 
-
-        self.root_grid_mask_offset = f.tell()
-        #_skip_record(f) # iOctCh
-        root_cells = self.domain_dimensions.prod()
-        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
-        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
-        self.root_grid_offset = f.tell()
-        _skip_record(f) # hvar
-        _skip_record(f) # var
-
-        self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
-        self.child_grid_offset = f.tell()
-
-        f.close()
-        
-        if self.file_particle_header is not None:
-            self._read_particle_header(self.file_particle_header)
-        
-    def _read_particle_header(self,fn):    
-        """ Reads control information, various parameters from the 
-            particle data set. Adapted from Daniel Ceverino's 
-            Read_Particles_Binary in analysis_ART.F   
-        """ 
-        header_struct = [
-            ('>i','pad'),
-            ('45s','header'), 
-            ('>f','aexpn'),
-            ('>f','aexp0'),
-            ('>f','amplt'),
-            ('>f','astep'),
-
-            ('>i','istep'),
-            ('>f','partw'),
-            ('>f','tintg'),
-
-            ('>f','Ekin'),
-            ('>f','Ekin1'),
-            ('>f','Ekin2'),
-            ('>f','au0'),
-            ('>f','aeu0'),
-
-
-            ('>i','Nrow'),
-            ('>i','Ngridc'),
-            ('>i','Nspecies'),
-            ('>i','Nseed'),
-
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','hubble'),
-            ('>f','Wp5'),
-            ('>f','Ocurv'),
-            ('>f','Omb0'),
-            ('>%ds'%(396),'extras'),
-            ('>f','unknown'),
-
-            ('>i','pad')]
-        fh = open(fn,'rb')
-        vals = _read_struct(fh,header_struct)
-        
-        for k,v in vals.iteritems():
-            self.parameters[k]=v
-        
-        seek_extras = 137
-        fh.seek(seek_extras)
-        n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
-        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
-        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
-        fh.close()
-        
-        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
-        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
-        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
-        
+        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         """
-        Defined for Daniel Ceverino's file naming scheme.
+        Defined for the NMSU file naming scheme.
         This could differ for other formats.
         """
         fn = ("%s" % (os.path.basename(args[0])))


diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -1,7 +1,7 @@
 """
 Definitions specific to ART
 
-Author: Christopher E. Moody <cemoody at ucsc.ed>
+Author: Christopher E. Moody <cemoody at ucsc.edu>
 Affiliation: UC Santa Cruz
 Homepage: http://yt-project.org/
 License:
@@ -25,19 +25,128 @@
 
 """
 
-art_particle_field_names = [
-'particle_age',
-'particle_index',
-'particle_mass',
-'particle_mass_initial',
-'particle_creation_time',
-'particle_metallicity1',
-'particle_metallicity2',
-'particle_metallicity',
-'particle_position_x',
-'particle_position_y',
-'particle_position_z',
-'particle_velocity_x',
-'particle_velocity_y',
-'particle_velocity_z',
-'particle_type']
+fluid_fields= [ 
+    'Density',
+    'TotalEnergy',
+    'XMomentumDensity',
+    'YMomentumDensity',
+    'ZMomentumDensity',
+    'Pressure',
+    'Gamma',
+    'GasEnergy',
+    'MetalDensitySNII',
+    'MetalDensitySNIa',
+    'PotentialNew',
+    'PotentialOld'
+]
+
+particle_fields= [
+    'particle_age',
+    'particle_index',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+    'particle_position_x',
+    'particle_position_y',
+    'particle_position_z',
+    'particle_velocity_x',
+    'particle_velocity_y',
+    'particle_velocity_z',
+    'particle_type'
+]
+
+particle_star_fields = [
+    'particle_age',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+]
+
+filename_pattern = {				
+	'amr':'10MpcBox_csf512_%s.d',
+	'particle_header':'PMcrd%s.DAT',
+	'particle_data':'PMcrs0%s.DAT',
+	'particle_stars':'stars_%s.dat'
+}
+
+amr_header_struct = [
+    ('>i','pad byte'),
+    ('>256s','jname'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','istep'),
+    ('>d','t'),
+    ('>d','dt'),
+    ('>f','aexpn'),
+    ('>f','ainit'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','boxh'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','Omb0'),
+    ('>f','hubble'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','nextras'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','extra1'),
+    ('>f','extra2'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>256s','lextra'),
+    ('>256s','lextra'),
+    ('>i','pad byte'),
+    ('>i', 'pad byte'),
+    ('>i', 'min_level'),
+    ('>i', 'max_level'),
+    ('>i', 'pad byte'),
+]
+
+particle_header_struct =[
+    ('>i','pad'),
+    ('45s','header'), 
+    ('>f','aexpn'),
+    ('>f','aexp0'),
+    ('>f','amplt'),
+    ('>f','astep'),
+    ('>i','istep'),
+    ('>f','partw'),
+    ('>f','tintg'),
+    ('>f','Ekin'),
+    ('>f','Ekin1'),
+    ('>f','Ekin2'),
+    ('>f','au0'),
+    ('>f','aeu0'),
+    ('>i','Nrow'),
+    ('>i','Ngridc'),
+    ('>i','Nspecies'),
+    ('>i','Nseed'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','hubble'),
+    ('>f','Wp5'),
+    ('>f','Ocurv'),
+    ('>f','Omb0'),
+    ('>%ds'%(396),'extras'),
+    ('>f','unknown'),
+    ('>i','pad')
+]
+
+constants = {
+    "Y_p":0.245,
+    "gamma":5./3.,
+    "T_CMB0":2.726,
+    "T_min":300.,
+    "ng":128,
+    "wmu":4.0/(8.0-5.0*0.245)
+}
+
+seek_extras = 137


diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -34,8 +34,6 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, mass_hydrogen_cgs
 import yt.utilities.lib as amr_utils
 
 KnownARTFields = FieldInfoContainer()
@@ -62,6 +60,7 @@
 #Density
 #Temperature
 #metallicities
+#MetalDensity SNII + SNia
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -69,7 +68,6 @@
 #Pressure
 #Gamma
 #GasEnergy
-#MetalDensity SNII + SNia
 #Potentials
 #xyzvelocity
 
@@ -170,32 +168,27 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    cd = data.pf.conversion_factors["Density"]
-    cg = data.pf.conversion_factors["GasEnergy"]
-    ct = data.pf.tr
     dg = data["GasEnergy"].astype('float64')
+    dg /= data.pf.conversion_factors["GasEnergy"]
     dd = data["Density"].astype('float64')
-    di = dd==0.0
+    dd /= data.pf.conversion_factors["Density"]
+    tr = dg/dd*data.pf.tr
+    #ghost cells have zero density?
+    tr[np.isnan(tr)] = 0.0
     #dd[di] = -1.0
-    tr = dg/dd
-    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
-    #    import pdb;pdb.set_trace()
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
-    tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
     #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x = data.pf.conversion_factors["Temperature"]
+    #x = data.pf.conversion_factors["Temperature"]
     x = 1.0
     return x
 add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_converttemperature
+#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
@@ -218,28 +211,27 @@
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(data):
+def _x_velocity(field,data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
 add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(data):
+def _y_velocity(field,data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
 add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(data):
+def _z_velocity(field,data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
 add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
@@ -251,20 +243,63 @@
 
 #Particle fields
 
+def ParticleMass(field,data):
+    return data['particle_mass']
+add_field("ParticleMass",function=ParticleMass,units=r"\rm{g}",particle_type=True)
+
+
 #Derived particle fields
 
+def ParticleMassMsun(field,data):
+    return data['particle_mass']*data.pf['Msun']
+add_field("ParticleMassMsun",function=ParticleMassMsun,units=r"\rm{g}",particle_type=True)
+
+def _creation_time(field,data):
+    pa = data["particle_age"]
+    tr = np.zeros(pa.shape,dtype='float')-1.0
+    tr[pa>0] = pa[pa>0]
+    return tr
+add_field("creation_time",function=_creation_time,units=r"\rm{s}",particle_type=True)
+
 def mass_dm(field, data):
+    tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
-    tr  = data["Ones"] #create a grid in the right size
     if np.sum(idx)>0:
-        tr /= np.prod(tr.shape) #divide by the volume
-        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
+        print tr.shape
         return tr
     else:
-        return tr*0.0
+        return tr*1e-9
 
-add_field("particle_cell_mass_dm", function=mass_dm,
-          validators=[ValidateSpatial(0)])
+add_field("particle_cell_mass_dm", function=mass_dm, units = r"\mathrm{M_{sun}}",
+        validators=[ValidateSpatial(0)],        
+        take_log=False,
+        projection_conversion="1")
 
+def _spdensity(field, data):
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
+    if data.star_mass.shape[0] ==0 : return grid_mass 
+    amr_utils.CICDeposit_3(data.star_position_x,
+                           data.star_position_y,
+                           data.star_position_z,
+                           data.star_mass.astype('float32'),
+                           data.star_mass.shape[0],
+                           grid_mass, 
+                           np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    return grid_mass 
+
+#add_field("star_density", function=_spdensity,
+#          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+
+def _simple_density(field,data):
+    mass = np.sum(data.star_mass)
+    volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
+    return mass/volume
+
+add_field("star_density", function=_simple_density,
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity)


diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,7 +36,7 @@
     BaseIOHandler
 import yt.utilities.lib as au
 
-from yt.frontends.art.definitions import art_particle_field_names
+from yt.frontends.art.definitions import *
 
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
@@ -121,45 +121,19 @@
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        #This will be cleaned up later
-        idx = np.array(grid.particle_indices)
-        if field == 'particle_index':
-            return np.array(idx)
-        if field == 'particle_type':
-            return grid.pf.particle_type[idx]
-        if field == 'particle_position_x':
-            return grid.pf.particle_position[idx][:,0]
-        if field == 'particle_position_y':
-            return grid.pf.particle_position[idx][:,1]
-        if field == 'particle_position_z':
-            return grid.pf.particle_position[idx][:,2]
-        if field == 'particle_mass':
-            return grid.pf.particle_mass[idx]
-        if field == 'particle_velocity_x':
-            return grid.pf.particle_velocity[idx][:,0]
-        if field == 'particle_velocity_y':
-            return grid.pf.particle_velocity[idx][:,1]
-        if field == 'particle_velocity_z':
-            return grid.pf.particle_velocity[idx][:,2]
-        
-        #stellar fields
-        if field == 'particle_age':
-            return grid.pf.particle_age[idx]
-        if field == 'particle_metallicity':
-            return grid.pf.particle_metallicity1[idx] +\
-                   grid.pf.particle_metallicity2[idx]
-        if field == 'particle_metallicity1':
-            return grid.pf.particle_metallicity1[idx]
-        if field == 'particle_metallicity2':
-            return grid.pf.particle_metallicity2[idx]
-        if field == 'particle_mass_initial':
-            return grid.pf.particle_mass_initial[idx]
-        
-        raise 'Should have matched one of the particle fields...'
-
+        dat = getattr(grid,field,None)
+        if dat is not None: 
+            return dat
+        starfield = field.replace('star','particle')
+        dat = getattr(grid,starfield,None)
+        if dat is not None:
+            psi = grid.pf.particle_star_index
+            idx = grid.particle_type==psi
+            return dat[idx]
+        raise KeyError
         
     def _read_data_set(self, grid, field):
-        if field in art_particle_field_names:
+        if field in particle_fields:
             return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
@@ -198,9 +172,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -232,7 +206,7 @@
     f.seek(offset)
     return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
 
-def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
     pos = f.tell()
     f.seek(level_oct_offsets[level])
     #Get the info for this level, skip the rest
@@ -283,13 +257,18 @@
     le = le[idx]
     fl = fl[idx]
 
+
     #left edges are expressed as if they were on 
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
     #le = le/2**(root_level-1-level)-1
 
+    #try to find the root_level first
+    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
+    root_level = root_level.astype('int64')
+
     #try without the -1
-    le = le/2**(root_level-2-level)-1
+    le = le/2**(root_level+1-level)-1
 
     #now read the hvars and vars arrays
     #we are looking for iOctCh
@@ -299,13 +278,12 @@
     
     
     f.seek(pos)
-    return le,fl,nLevel
+    return le,fl,nLevel,root_level
 
 
-def read_particles(file,nstars,Nrow):
+def read_particles(file,Nrow):
     words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4 # for file_particle_data; not always true?
-    np = nstars # number of particles including stars, should come from lspecies[-1]
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
@@ -314,7 +292,7 @@
     data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
-def read_stars(file,nstars,Nrow):
+def read_stars(file):
     fh = open(file,'rb')
     tdum,adum   = _read_frecord(fh,'>d')
     nstars      = _read_frecord(fh,'>i')
@@ -327,7 +305,8 @@
     if fh.tell() < os.path.getsize(file):
         metallicity2 = _read_frecord(fh,'>f')     
     assert fh.tell() == os.path.getsize(file)
-    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+    return  nstars, mass, imass, tbirth, metallicity1, metallicity2,\
+            ws_old,ws_oldi,tdum,adum
 
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
@@ -346,7 +325,7 @@
         arr = arr.reshape((width, chunk), order="F")
         assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
-        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined available
         #zero in the mask means there is refinement available
         a=b
         left -= chunk
@@ -476,3 +455,29 @@
     #fb2t = interp1d(tbs,ages)
     return fb2t
 
+def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
+    #stars are formed in lumps; spread out the ages linearly
+    da= np.diff(ages)
+    assert np.all(da<=0)
+    #ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da<0)
+    idx+=1 #mark the right edges
+    #spread this age evenly out to the next age
+    lidx=0
+    lage=0
+    for i in idx:
+        n = i-lidx #n stars affected
+        rage = ages[i]
+        lage = max(rage-spread,0.0)
+        agesd[lidx:i]=np.linspace(lage,rage,n)
+        lidx=i
+        #lage=rage
+        if logger: logger(i)
+    #we didn't get the last iter
+    i=ages.shape[0]-1
+    n = i-lidx #n stars affected
+    rage = ages[i]
+    lage = max(rage-spread,0.0)
+    agesd[lidx:i]=np.linspace(lage,rage,n)
+    return agesd


diff -r b1ba33518f0b7512086a211827a32da6f608e0e9 -r c94dedb96da330f49b8e825fa43a9ce6a0a65e16 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -958,12 +958,87 @@
         return info, values
 
 class StereoPairCamera(Camera):
-    def __init__(self, original_camera, relative_separation = 0.005):
+    def __init__(self,original_camera,
+                 auto_focus=False,
+                 focal_length=None,
+                 frac_near_plane = 0.90, 
+                 frac_far_plane  = 1.10,
+                 frac_eye_separation=0.05,
+                 aperture = 60.0,
+                 relative_separation=0.005):
+        """
+        Auto-focus is adapted from a guide & code at :
+        http://paulbourke.net/miscellaneous/stereographics/stereorender/
+        """
         ParallelAnalysisInterface.__init__(self)
         self.original_camera = original_camera
-        self.relative_separation = relative_separation
+        oc = self.original_camera
+        if self.auto_focus:
+            dist = lambda x,y: na.sqrt(na.sum((x-y)**2.0))
+            if self.focal_length is None:
+                self.focal_length = dist(oc.normal_vector,0.0)
+            self.focal_far  = oc.center + frac_far_plane*oc.normal_vector
+            self.focal_near = oc.center + frac_near_plane*oc.normal_vector
+            self.wh_ratio = oc.resolution[0]/oc.resolution[1]
+            self.eye_sep  = self.focal_length*frac_eye_separation
+            self.aperture = aperture
+            self.frac_eye_separation = frac_eye_separation
+            self.center_eye_pos = oc.center + oc.normal_vector
+        else:
+            #default to old separation
+            self.relative_separation = relative_separation
+    
+    def finalize_image(self,image):
+        if self.auto_focus:
+            #we have extra frustum pixels on the left and right
+            #cameras
+            left_trim,right_trim = self.trim[0],self.trim[1]
+            left = abs(left_trim)
+            right = image.shapae[0]-abs(right_trim)
+            image = image[left:right,:]
+            return image
+
+	def auto_split(self):
+		"""We must calculate the new camera centers, as well
+        as the extended frustum pixels."""
+        oc = self.original_camera
+        nv = oc.orienter.normal_vector
+        up = oc.north_vector
+        c = oc.center
+        px = resolution[0] #pixel width
+        norm = lambda x: na.sqrt(na.dot(x,x.conj()))
+        between_eyes = na.cross(nv,up)
+        between_eyes /= norm(between_eyes)
+        between_eyes *= eye_sep/2.0
+        le_norm = nv-between_eyes 
+        le_c= c-between_eyes 
+        re_norm = nv+between_eyes 
+        re_c = c+between_eyes 
+        angular_aperture = na.tan(self.aperture/360.0*2.0*na.pi/2.0)
+        delta = na.rint(px*self.frac_eye_separation/(2.0*(angular_aperture)))
+        delta = delta.astype('int')
+        eresolution = resolution[0]+delta
+        left_camera = Camera(le_c, le_norm, oc.width,
+                     eresolution, oc.transfer_function, north_vector=up,
+                     volume=oc.volume, fields=oc.fields, 
+                     log_fields=oc.log_fields,
+                     sub_samples=oc.sub_samples, pf=oc.pf)
+        left_camera.trim = [-delta,0]
+        right_camera = Camera(re_c, re_norm, oc.width,
+                     eresolution, oc.transfer_function, north_vector=up,
+                     volume=oc.volume, fields=oc.fields, 
+                     log_fields=oc.log_fields,
+                     sub_samples=oc.sub_samples, pf=oc.pf)
+        right_camera.trim = [0,-delta]
+        return (left_camera, right_camera)
 
     def split(self):
+        if self.auto_focus:
+            return self.auto_split()
+        else:
+            return self.default_split()
+    
+    def default_split(self):
         oc = self.original_camera
         uv = oc.orienter.unit_vectors
         c = oc.center
@@ -981,6 +1056,10 @@
                              sub_samples=oc.sub_samples, pf=oc.pf)
         return (left_camera, right_camera)
 
+
+
+        
+
 class FisheyeCamera(Camera):
     def __init__(self, center, radius, fov, resolution,
                  transfer_function = None, fields = None,



https://bitbucket.org/yt_analysis/yt/changeset/d965c7412bdd/
changeset:   d965c7412bdd
branch:      yt
user:        juxtaposicion
date:        2012-11-26 23:36:35
summary:     cleaned up data_structures
affected #:  1 file

diff -r f7edfc242413e104df97fe7365f52814e43fca82 -r d965c7412bddea5ddb973514e2527cf89fec3412 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Christopher Moody <cemoody at ucsc.edu>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -18,17 +20,16 @@
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-
+.
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
 import numpy as np
+import os.path
+import glob
 import stat
 import weakref
-import cPickle
-import os
-import struct
+import cStringIO
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -42,64 +43,65 @@
 from .fields import \
     ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-try:
-    import yt.frontends.ramses._ramses_reader as _ramses_reader
-except ImportError:
-    _ramses_reader = None
+from .definitions import *
+from io import _read_child_mask_level
+from io import read_particles
+from io import read_stars
+from io import spread_ages
+from io import _count_art_octs
+from io import _read_art_level_info
+from io import _read_art_child
+from io import _skip_record
+from io import _read_record
+from io import _read_frecord
+from io import _read_record_size
+from io import _read_struct
+from io import b2t
 
+
+import yt.frontends.ramses._ramses_reader as _ramses_reader
+
+from .fields import ARTFieldInfo, KnownARTFields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
-from yt.frontends.art.definitions import art_particle_field_names
-
-from yt.frontends.art.io import _read_child_mask_level
-from yt.frontends.art.io import read_particles
-from yt.frontends.art.io import read_stars
-from yt.frontends.art.io import _count_art_octs
-from yt.frontends.art.io import _read_art_level_info
-from yt.frontends.art.io import _read_art_child
-from yt.frontends.art.io import _skip_record
-from yt.frontends.art.io import _read_record
-from yt.frontends.art.io import _read_frecord
-from yt.frontends.art.io import _read_record_size
-from yt.frontends.art.io import _read_struct
-from yt.frontends.art.io import b2t
-
-def num_deep_inc(f):
-    def wrap(self, *args, **kwargs):
-        self.num_deep += 1
-        rv = f(self, *args, **kwargs)
-        self.num_deep -= 1
-        return rv
-    return wrap
-
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
+    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
+            child_mask=None,nop=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
-        start_index = props[0]
+        start_index =start_index 
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
         
-        self.LeftEdge = props[0]
-        self.RightEdge = props[1]
-        self.ActiveDimensions = props[2] 
-        #if child_mask is not None:
-        #    self._set_child_mask(child_mask)
+        self.LeftEdge = le
+        self.RightEdge = re
+        self.ActiveDimensions = gd
+        self.NumberOfParticles=nop
+        for particle_field in particle_fields:
+            setattr(self,particle_field,np.array([]))
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
             self.dds = self.Parent[0].dds / self.pf.refine_by
@@ -109,7 +111,8 @@
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
+                = self.dds
 
     def get_global_startindex(self):
         """
@@ -124,381 +127,278 @@
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
                        np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        self.start_index = (start_index*self.pf.refine_by)\
+                           .astype('int64').ravel()
         return self.start_index
 
     def __repr__(self):
         return "ARTGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 class ARTHierarchy(AMRHierarchy):
-
     grid = ARTGrid
     _handle = None
     
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
+        if not self.pf.skip_particles:
+            self._setup_particle_grids()
+        self._setup_particle_grids()
         self._setup_field_list()
         
+    def _setup_particle_grids(self):
+        pass
+    
     def _initialize_data_storage(self):
         pass
-
+    
     def _detect_fields(self):
-        # This will need to be generalized to be used elsewhere.
-        self.field_list = [ 'Density','TotalEnergy',
-             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-             'Pressure','Gamma','GasEnergy',
-             'MetalDensitySNII', 'MetalDensitySNIa',
-             'PotentialNew','PotentialOld']
-        self.field_list += art_particle_field_names
-
+        self.field_list = []
+        self.field_list += fluid_fields
+        self.field_list += particle_fields
+        
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
-
+            
     def _count_grids(self):
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
-        
         min_eff = 0.30
-        
         vol_max = 128**3
-        
-        f = open(self.pf.parameter_filename,'rb')
-        
-        
-        (self.pf.nhydro_vars, self.pf.level_info,
-        self.pf.level_oct_offsets, 
-        self.pf.level_child_offsets) = \
-                         _count_art_octs(f, 
-                          self.pf.child_grid_offset,
-                          self.pf.min_level, self.pf.max_level)
-        self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = np.array(self.pf.level_info)        
-        self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
-        self.pf.level_offsets[0] = self.pf.root_grid_offset
-        
-        self.pf.level_art_child_masks = {}
-        cm = self.pf.root_iOctCh>0
-        cm_shape = (1,)+cm.shape 
-        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
-        del cm
-        
-        root_psg = _ramses_reader.ProtoSubgrid(
-                        np.zeros(3, dtype='int64'), # left index of PSG
-                        self.pf.domain_dimensions, # dim of PSG
-                        np.zeros((1,3), dtype='int64'), # left edges of grids
-                        np.zeros((1,6), dtype='int64') # empty
-                        )
-        
-        self.proto_grids = [[root_psg],]
-        for level in xrange(1, len(self.pf.level_info)):
-            if self.pf.level_info[level] == 0:
-                self.proto_grids.append([])
-                continue
-            psgs = []
-            effs,sizes = [], []
-
-            if level > self.pf.limit_level : continue
-            
-            #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
-            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-            
-            #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
-                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
-            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
-            self.pf.level_art_child_masks[level]=art_child_mask
-            #child_mask is zero where child grids exist and
-            #thus where higher resolution data is available
-            
-            
-            #compute the hilbert indices up to a certain level
-            #the indices will associate an oct grid to the nearest
-            #hilbert index?
-            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
-                              np.log10(2))
-            hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level, left_index)
-            #print base_level, hilbert_indices.max(),
-            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-            #print hilbert_indices.max()
-            
-            # Strictly speaking, we don't care about the index of any
-            # individual oct at this point.  So we can then split them up.
-            unique_indices = np.unique(hilbert_indices)
-            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
-                        level, unique_indices.size, hilbert_indices.size)
-            
-            #use the hilbert indices to order oct grids so that consecutive
-            #items on a list are spatially near each other
-            #this is useful because we will define grid patches over these
-            #octs, which are more efficient if the octs are spatially close
-            
-            #split into list of lists, with domains containing 
-            #lists of sub octgrid left indices and an index
-            #referring to the domain on which they live
-            pbar = get_pbar("Calc Hilbert Indices ",1)
-            locs, lefts = _ramses_reader.get_array_indices_lists(
-                        hilbert_indices, unique_indices, left_index, fl)
-            pbar.finish()
-            
-            #iterate over the domains    
-            step=0
-            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
-            psg_eff = []
-            for ddleft_index, ddfl in zip(lefts, locs):
-                #iterate over just the unique octs
-                #why would we ever have non-unique octs?
-                #perhaps the hilbert ordering may visit the same
-                #oct multiple times - review only unique octs 
-                #for idomain in np.unique(ddfl[:,1]):
-                #dom_ind = ddfl[:,1] == idomain
-                #dleft_index = ddleft_index[dom_ind,:]
-                #dfl = ddfl[dom_ind,:]
+        with open(self.pf.parameter_filename,'rb') as f:
+            (self.pf.nhydro_vars, self.pf.level_info,
+            self.pf.level_oct_offsets, 
+            self.pf.level_child_offsets) = \
+                             _count_art_octs(f, 
+                              self.pf.child_grid_offset,
+                              self.pf.min_level, self.pf.max_level)
+            self.pf.level_info[0]=self.pf.ncell
+            self.pf.level_info = np.array(self.pf.level_info)
+            self.pf.level_offsets = self.pf.level_child_offsets
+            self.pf.level_offsets = np.array(self.pf.level_offsets, 
+                                             dtype='int64')
+            self.pf.level_offsets[0] = self.pf.root_grid_offset
+            self.pf.level_art_child_masks = {}
+            cm = self.pf.root_iOctCh>0
+            cm_shape = (1,)+cm.shape 
+            self.pf.level_art_child_masks[0] = \
+                    cm.reshape(cm_shape).astype('uint8')        
+            del cm
+            root_psg = _ramses_reader.ProtoSubgrid(
+                            np.zeros(3, dtype='int64'), # left index of PSG
+                            self.pf.domain_dimensions, # dim of PSG
+                            np.zeros((1,3), dtype='int64'),# left edges of grids
+                            np.zeros((1,6), dtype='int64') # empty
+                            )
+            self.proto_grids = [[root_psg],]
+            for level in xrange(1, len(self.pf.level_info)):
+                if self.pf.level_info[level] == 0:
+                    self.proto_grids.append([])
+                    continue
+                psgs = []
+                effs,sizes = [], []
+                if self.pf.limit_level:
+                    if level > self.pf.limit_level : continue
+                #refers to the left index for the art octgrid
+                left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                        self.pf.level_oct_offsets,level,
+                        coarse_grid=self.pf.domain_dimensions[0])
+                if level>1:
+                    assert root_level == last_root_level
+                last_root_level = root_level
+                #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+                #read in the child masks for this level and save them
+                idc, art_child_mask = _read_child_mask_level(f, 
+                        self.pf.level_child_offsets,
+                    level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+                art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+                self.pf.level_art_child_masks[level]=art_child_mask
+                #child_mask is zero where child grids exist and
+                #thus where higher resolution data is available
+                #compute the hilbert indices up to a certain level
+                #the indices will associate an oct grid to the nearest
+                #hilbert index?
+                base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                                  np.log10(2))
+                hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                        level + base_level, left_index)
+                #print base_level, hilbert_indices.max(),
+                hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+                #print hilbert_indices.max()
+                # Strictly speaking, we don't care about the index of any
+                # individual oct at this point.  So we can then split them up.
+                unique_indices = np.unique(hilbert_indices)
+                mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                            level, unique_indices.size, hilbert_indices.size)
+                #use the hilbert indices to order oct grids so that consecutive
+                #items on a list are spatially near each other
+                #this is useful because we will define grid patches over these
+                #octs, which are more efficient if the octs are spatially close
+                #split into list of lists, with domains containing 
+                #lists of sub octgrid left indices and an index
+                #referring to the domain on which they live
+                pbar = get_pbar("Calc Hilbert Indices ",1)
+                locs, lefts = _ramses_reader.get_array_indices_lists(
+                            hilbert_indices, unique_indices, left_index, fl)
+                pbar.finish()
+                #iterate over the domains    
+                step=0
+                pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+                psg_eff = []
+                for ddleft_index, ddfl in zip(lefts, locs):
+                    #iterate over just the unique octs
+                    #why would we ever have non-unique octs?
+                    #perhaps the hilbert ordering may visit the same
+                    #oct multiple times - review only unique octs 
+                    #for idomain in np.unique(ddfl[:,1]):
+                    #dom_ind = ddfl[:,1] == idomain
+                    #dleft_index = ddleft_index[dom_ind,:]
+                    #dfl = ddfl[dom_ind,:]
+                    dleft_index = ddleft_index
+                    dfl = ddfl
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()
+                    idims +=2
+                    #this creates a grid patch that doesn't cover the whole leve
+                    #necessarily, but with other patches covers all the regions
+                    #with octs. This object automatically shrinks its size
+                    #to barely encompass the octs inside of it.
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    #because grid patches maybe mostly empty, and with octs
+                    #that only partially fill the grid, it may be more efficient
+                    #to split large patches into smaller patches. We split
+                    #if less than 10% the volume of a patch is covered with octs
+                    if idims.prod() > vol_max or psg.efficiency < min_eff:
+                        psg_split = _ramses_reader.recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dfl,min_eff=min_eff,use_center=True,
+                            split_on_vol=vol_max)
+                        psgs.extend(psg_split)
+                        psg_eff += [x.efficiency for x in psg_split] 
+                    else:
+                        psgs.append(psg)
+                        psg_eff =  [psg.efficiency,]
+                    tol = 1.00001
+                    step+=1
+                    pbar.update(step)
+                eff_mean = np.mean(psg_eff)
+                eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
+                eff_nall = len(psg_eff)
+                mylog.info("Average subgrid efficiency %02.1f %%",
+                            eff_mean*100.0)
+                mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                            eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
                 
-                dleft_index = ddleft_index
-                dfl = ddfl
-                initial_left = np.min(dleft_index, axis=0)
-                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
-                #this creates a grid patch that doesn't cover the whole level
-                #necessarily, but with other patches covers all the regions
-                #with octs. This object automatically shrinks its size
-                #to barely encompass the octs inside of it.
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                
-                #because grid patches may still be mostly empty, and with octs
-                #that only partially fill the grid,it  may be more efficient
-                #to split large patches into smaller patches. We split
-                #if less than 10% the volume of a patch is covered with octs
-                if idims.prod() > vol_max or psg.efficiency < min_eff:
-                    psg_split = _ramses_reader.recursive_patch_splitting(
-                        psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=vol_max)
-                    
-                    psgs.extend(psg_split)
-                    psg_eff += [x.efficiency for x in psg_split] 
-                else:
-                    psgs.append(psg)
-                    psg_eff =  [psg.efficiency,]
-                
-                tol = 1.00001
-                
-                
-                step+=1
-                pbar.update(step)
-            eff_mean = np.mean(psg_eff)
-            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
-            eff_nall = len(psg_eff)
-            mylog.info("Average subgrid efficiency %02.1f %%",
-                        eff_mean*100.0)
-            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
-                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            
-        
-            mylog.debug("Done with level % 2i", level)
-            pbar.finish()
-            self.proto_grids.append(psgs)
-            #print sum(len(psg.grid_file_locations) for psg in psgs)
-            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
-            if len(self.proto_grids[level]) == 1: continue
+                mylog.info("Done with level % 2i; max LE %i", level,
+                           np.max(left_index))
+                pbar.finish()
+                self.proto_grids.append(psgs)
+                if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
-                    
-            
-            
-
-    num_deep = 0
-
         
     def _parse_hierarchy(self):
-        """ The root grid has no octs except one which is refined.
-        Still, it is the size of 128 cells along a length.
-        Ignore the proto subgrid created for the root grid - it is wrong.
-        """
         grids = []
         gi = 0
-        
+        dd=self.pf.domain_dimensions
         for level, grid_list in enumerate(self.proto_grids):
-            #The root level spans [0,2]
-            #The next level spans [0,256]
-            #The 3rd Level spans up to 128*2^3, etc.
-            #Correct root level to span up to 128
-            correction=1L
-            if level == 0:
-                correction=64L
+            dds = ((2**level) * dd).astype("float64")
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()*correction
-                dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                props = g.get_properties()
+                start_index = props[0,:]
+                le = props[0,:].astype('float64')/dds
+                re = props[1,:].astype('float64')/dds
+                gd = props[2,:].astype('int64')
+                if level==0:
+                    le = np.zeros(3,dtype='float64')
+                    re = np.ones(3,dtype='float64')
+                    gd = dd
+                self.grid_left_edge[gi,:] = le
+                self.grid_right_edge[gi,:] = re
+                self.grid_dimensions[gi,:] = gd
+                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = np.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,props[0],
+                amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*np.array(correction).astype('int64')))
+                    start_index,le,re,gd))
                 gi += 1
         self.grids = np.empty(len(grids), dtype='object')
-        
-
-        if self.pf.file_particle_data:
+        if not self.pf.skip_particles and self.pf.file_particle_data:
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
-            Nrow     = self.pf.parameters['Nrow']
-            nstars = lspecies[-1]
-            a = self.pf.parameters['aexpn']
-            hubble = self.pf.parameters['hubble']
-            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
-            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
-            um  = self.pf.parameters['aM0'] #mass units in solar masses
-            um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles   ",5)
+            um  = self.pf.conversion_factors['Mass'] #mass units in g
+            uv  = self.pf.conversion_factors['Velocity'] #mass units in g
             self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,nstars,Nrow)
-            pbar.update(1)
-            npa,npb=0,0
-            npb = lspecies[-1]
-            clspecies = np.concatenate(([0,],lspecies))
-            if self.pf.only_particle_type is not None:
-                npb = lspecies[0]
-                if type(self.pf.only_particle_type)==type(5):
-                    npa = clspecies[self.pf.only_particle_type]
-                    npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
-            self.pf.particle_position   = self.pf.particle_position[npa:npb]
-            #do NOT correct by an offset of 1.0
-            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
-            pbar.update(2)
-            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
-            pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+                read_particles(self.pf.file_particle_data,
+                        self.pf.parameters['Nrow'])
+            nparticles = lspecies[-1]
+            if not np.all(self.pf.particle_position[nparticles:]==0.0):
+                mylog.info('WARNING: unused particles discovered from lspecies')
+            self.pf.particle_position = self.pf.particle_position[:nparticles]
+            self.pf.particle_velocity = self.pf.particle_velocity[:nparticles]
+            self.pf.particle_position  /= self.pf.domain_dimensions 
+            self.pf.particle_velocity   = self.pf.particle_velocity
             self.pf.particle_velocity  *= uv #to proper cm/s
-            pbar.update(4)
-            self.pf.particle_type         = np.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
-            
-            dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_species'] = 1.0
-            for ax in 'xyz':
-                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                #already in unitary units
-                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
-            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity']=1.0
-            self.pf.conversion_factors['particle_metallicity1']=1.0
-            self.pf.conversion_factors['particle_metallicity2']=1.0
-            self.pf.conversion_factors['particle_index']=1.0
-            self.pf.conversion_factors['particle_type']=1
-            self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
-            
-
-            a,b=0,0
+            self.pf.particle_star_index = len(wspecies)-1
+            self.pf.particle_type = np.zeros(nparticles,dtype='int')
+            self.pf.particle_mass = np.zeros(nparticles,dtype='float32')
+            a=0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if type(self.pf.only_particle_type)==type(5):
-                    if not i==self.pf.only_particle_type:
-                        continue
-                    self.pf.particle_type += i
-                    self.pf.particle_mass += m*um
-
-                else:
-                    self.pf.particle_type[a:b] = i #particle type
-                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                if i == self.pf.particle_star_index:
+                    sa,sb = a,b
+                self.pf.particle_type[a:b] = i #particle type
+                self.pf.particle_mass[a:b] = m*um #mass in grams
                 a=b
-            pbar.finish()
-
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
-            
-            self.pf.particle_star_index = i
-            
-            do_stars = (self.pf.only_particle_type is None) or \
-                       (self.pf.only_particle_type == -1) or \
-                       (self.pf.only_particle_type == len(lspecies))
-            if self.pf.file_star_data and do_stars: 
-                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
-                     = read_stars(self.pf.file_star_data,nstars,Nrow)
-                nstars = nstars[0] 
-                if nstars > 0 :
+            if not self.pf.skip_stars and self.pf.file_particle_stars: 
+                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
+                        ws_old,ws_oldi,tdum,adum \
+                     = read_stars(self.pf.file_particle_stars)
+                self.pf.nstars_rs = nstars_rs     
+                self.pf.nstars_pa = b-a
+                inconsistent=self.pf.particle_type==self.pf.particle_star_index
+                if not nstars_rs==np.sum(inconsistent):
+                    mylog.info('WARNING!: nstars is inconsistent!')
+                del inconsistent
+                if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
-                    pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
-                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= sec_per_Gyr #from Gyr to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
-                    pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    #self.pf.particle_metallicity1 *= 0.0199 
-                    #self.pf.particle_metallicity2 *= 0.0199 
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
-                    self.pf.particle_mass[-nstars:] = mass*um
-
-            done = 0
-            init = self.pf.particle_position.shape[0]
-            pos = self.pf.particle_position
-            #particle indices travel with the particle positions
-            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
-            if type(self.pf.grid_particles) == type(5):
-                particle_level = min(self.pf.max_level,self.pf.grid_particles)
-            else:
-                particle_level = 2
-            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    np.zeros(len(pos[:,0])).astype('int32')-1,
-                    particle_level, #dont grid particles past this
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
-            pbar.finish()
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
-        pbar = get_pbar("Finalizing grids ",len(grids))
-        for gi, g in enumerate(grids): 
-            self.grids[gi] = g
-        pbar.finish()
-            
-
+                    birthtimes= b2t(tbirth,n=n)
+                    birthtimes = birthtimes.astype('float64')
+                    assert birthtimes.shape == tbirth.shape    
+                    birthtimes*= 1.0e9 #from Gyr to yr
+                    birthtimes*= 365*24*3600 #to seconds
+                    ages = self.pf.current_time-birthtimes
+                    spread = self.pf.spread_age
+                    if type(spread)==type(5.5):
+                        ages = spread_ages(ages,spread=spread)
+                    elif spread:
+                        ages = spread_ages(ages)
+                    idx = self.pf.particle_type == self.pf.particle_star_index
+                    for psf in particle_star_fields:
+                        setattr(self.pf,psf,
+                                np.zeros(nparticles,dtype='float32'))
+                    self.pf.particle_age[sa:sb] = ages
+                    self.pf.particle_mass[sa:sb] = mass
+                    self.pf.particle_mass_initial[sa:sb] = imass
+                    self.pf.particle_creation_time[sa:sb] = birthtimes
+                    self.pf.particle_metallicity1[sa:sb] = metallicity1
+                    self.pf.particle_metallicity2[sa:sb] = metallicity2
+                    self.pf.particle_metallicity[sa:sb]  = metallicity1\
+                                                          + metallicity2
+        for gi,g in enumerate(grids):    
+            self.grids[gi]=g
+                    
     def _get_grid_parents(self, grid, LE, RE):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
@@ -507,53 +407,57 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = np.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            #Now we do overlapping siblings; note that one has to "win" with
+            #siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
+            #instead of gridding particles assign them all to the root grid
+            if gi==0:
+                for particle_field in particle_fields:
+                    source = getattr(self.pf,particle_field,None)
+                    if source is None:
+                        for i,ax in enumerate('xyz'):
+                            pf = particle_field.replace('_%s'%ax,'')
+                            source = getattr(self.pf,pf,None)
+                            if source is not None:
+                                source = source[:,i]
+                                break
+                    if source is not None:
+                        mylog.info("Attaching %s to the root grid",
+                                    particle_field)
+                        g.NumberOfParticles = source.shape[0]
+                        setattr(g,particle_field,source)
+        pb.finish()
         self.max_level = self.grid_levels.max()
 
-    # def _populate_grid_objects(self):
-    #     mask = np.empty(self.grids.size, dtype='int32')
-    #     pb = get_pbar("Populating grids", len(self.grids))
-    #     for gi,g in enumerate(self.grids):
-    #         pb.update(gi)
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level - 1,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask)
-    #         parents = self.grids[mask.astype("bool")]
-    #         if len(parents) > 0:
-    #             g.Parent.extend((p for p in parents.tolist()
-    #                     if p.locations[0,0] == g.locations[0,0]))
-    #             for p in parents: p.Children.append(g)
-    #         # Now we do overlapping siblings; note that one has to "win" with
-    #         # siblings, so we assume the lower ID one will "win"
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask, gi)
-    #         mask[gi] = False
-    #         siblings = self.grids[mask.astype("bool")]
-    #         if len(siblings) > 0:
-    #             g.OverlappingSiblings = siblings.tolist()
-    #         g._prepare_grid()
-    #         g._setup_dx()
-    #     pb.finish()
-    #     self.max_level = self.grid_levels.max()
-
     def _setup_field_list(self):
-        if self.parameter_file.use_particles:
+        if not self.parameter_file.skip_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
-            for field in art_particle_field_names:
+            for field in particle_fields:
                 def external_wrapper(f):
                     def _convert_function(data):
                         return data.convert(f)
@@ -580,97 +484,67 @@
     _hierarchy_class = ARTHierarchy
     _fieldinfo_fallback = ARTFieldInfo
     _fieldinfo_known = KnownARTFields
-    _handle = None
     
-    def __init__(self, filename, data_style='art',
-                 storage_filename = None, 
-                 file_particle_header=None, 
-                 file_particle_data=None,
-                 file_star_data=None,
-                 discover_particles=True,
-                 use_particles=True,
-                 limit_level=None,
-                 only_particle_type = None,
-                 grid_particles=False,
-                 single_particle_mass=False,
-                 single_particle_type=0):
-        
-        #dirn = os.path.dirname(filename)
-        base = os.path.basename(filename)
-        aexp = base.split('_')[2].replace('.d','')
-        if not aexp.startswith('a'):
-            aexp = '_'+aexp
-        
-        self.file_particle_header = file_particle_header
-        self.file_particle_data = file_particle_data
-        self.file_star_data = file_star_data
-        self.only_particle_type = only_particle_type
-        self.grid_particles = grid_particles
-        self.single_particle_mass = single_particle_mass
-        
-        if limit_level is None:
-            self.limit_level = np.inf
-        else:
-            limit_level = int(limit_level)
-            mylog.info("Using maximum level: %i",limit_level)
-            self.limit_level = limit_level
-        
-        def repu(x):
-            for i in range(5):
-                x=x.replace('__','_')
-            return x    
-        if discover_particles:
-            if file_particle_header is None:
-                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_header = loc
-                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
-            if file_particle_data is None:
-                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_data = loc
-                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
-            if file_star_data is None:
-                loc = filename.replace(base,'stars_%s.dat'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_star_data = loc
-                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
-        
-        self.use_particles = any([self.file_particle_header,
-            self.file_star_data, self.file_particle_data])
-        StaticOutput.__init__(self, filename, data_style)
-        
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = 'art'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["InitialTime"]=self.current_time
+    def __init__(self, file_amr, storage_filename = None,
+            skip_particles=False,skip_stars=False,limit_level=None,
+            spread_age=True,data_style='art'):
+        self.data_style = data_style
+        self._find_files(file_amr)
+        self.skip_particles = skip_particles
+        self.skip_stars = skip_stars
+        self.file_amr = file_amr
+        self.parameter_filename = file_amr
+        self.limit_level = limit_level
+        self.spread_age = spread_age
+        self.domain_left_edge  = np.zeros(3,dtype='float64')
+        self.domain_right_edge = np.ones(3,dtype='float64') 
+        StaticOutput.__init__(self, file_amr, data_style)
         self.storage_filename = storage_filename
-        
-        
+
+    def _find_files(self,file_amr):
+        """
+        Given the AMR base filename, attempt to find the
+        particle header, star files, etc.
+        """
+        prefix,suffix = filename_pattern['amr'].split('%s')
+        affix = os.path.basename(file_amr).replace(prefix,'')
+        affix = affix.replace(suffix,'')
+        affix = affix.replace('_','')
+        affix = affix[1:-1]
+        dirname = os.path.dirname(file_amr)
+        for filetype, pattern in filename_pattern.items():
+            #sometimes the affix is surrounded by an extraneous _
+            #so check for an extra character on either side
+            check_filename = dirname+'/'+pattern%('?%s?'%affix)
+            filenames = glob.glob(check_filename)
+            if len(filenames)==1:
+                setattr(self,"file_"+filetype,filenames[0])
+                mylog.info('discovered %s',filetype)
+            elif len(filenames)>1:
+                setattr(self,"file_"+filetype,None)
+                mylog.info("Ambiguous number of files found for %s",
+                        check_filename)
+            else:
+                setattr(self,"file_"+filetype,None)
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
         
     def _set_units(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical units based 
+		on the parameters from the header
         """
         self.units = {}
         self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        
-        
-        z = self.current_redshift
-        
-        h = self.hubble_constant
-        boxcm_cal = self["boxh"]
+        self.units['unitary'] = 1.0
+
+        #spatial units
+        z   = self.current_redshift
+        h   = self.hubble_constant
+        boxcm_cal = self.parameters["boxh"]
         boxcm_uncal = boxcm_cal / h
         box_proper = boxcm_uncal/(1+z)
         aexpn = self["aexpn"]
@@ -679,269 +553,111 @@
             self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
             self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
             self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
-        # Variable names have been chosen to reflect primary reference
-        #Om0 = self["Om0"]
-        #boxh = self["boxh"]
-        wmu = self["wmu"]
-        #ng = self.domain_dimensions[0]
-        #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
-        #v0 = r0 / t0
-        #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
-        #e0 = v0**2.0
+
+        #all other units
+        wmu = self.parameters["wmu"]
+        Om0 = self.parameters['Om0']
+        ng  = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh   = self.parameters['boxh'] 
+        aexpn  = self.parameters["aexpn"]
+        hubble = self.parameters['hubble']
+
+        r0 = boxh/ng
+        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        S_0 = 52.077 * wmu**(5.0/3.0)
+        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        t0 = r0/v0
+        rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
+        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
+
+        #factors to multiply the native code units to CGS
+        cf = defaultdict(lambda: 1.0)
+        cf['Pressure'] = P0 #already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        cf["Mass"] = aM0 * 1.98892e33
+        cf["Density"] = rho0*(aexpn**-3.0)
+        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
+        cf["Potential"] = 1.0
+        cf["Entropy"] = S_0
+        cf["Temperature"] = tr
+        self.cosmological_simulation = True
+        self.conversion_factors = cf
         
-        wmu = self["wmu"]
-        boxh = self["boxh"]
-        aexpn = self["aexpn"]
-        hubble = self.hubble_constant
-        ng = self.domain_dimensions[0]
-        self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
-        self.t0 = self.r0/self.v0
-        # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
-        # ie, critical density 
-        self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
-        tr  = self.tr
-        
-        #factors to multiply the native code units to CGS
-        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
-        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
-        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
-        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
-        #self.conversion_factors["Temperature"] = tr 
-        self.conversion_factors["Potential"] = 1.0
-        self.cosmological_simulation = True
-        
-        # Now our conversion factors
         for ax in 'xyz':
-            # Add on the 1e5 to get to cm/s
-            self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
-        seconds = self.t0
+            self.conversion_factors["%s-velocity" % ax] = v0/aexpn
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
+        for particle_field in particle_fields:
+            self.conversion_factors[particle_field] =  1.0
+        self.conversion_factors['particle_creation_time'] =  31556926.0
+        self.conversion_factors['Msun'] = 5.027e-34 
 
-        #we were already in seconds, go back in to code units
-        #self.current_time /= self.t0 
-        #self.current_time = b2t(self.current_time,n=1)
-        
-    
     def _parse_parameter_file(self):
-        # We set our domain to run from 0 .. 1 since we are otherwise
-        # unconstrained.
-        self.domain_left_edge = np.zeros(3, dtype="float64")
-        self.domain_right_edge = np.ones(3, dtype="float64")
+        """
+        Get the various simulation parameters & constants.
+        """
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.cosmological_simulation = True
+        self.parameters = {}
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self.parameters = {}
-
-        header_struct = [
-            ('>i','pad byte'),
-            ('>256s','jname'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','istep'),
-            ('>d','t'),
-            ('>d','dt'),
-            ('>f','aexpn'),
-            ('>f','ainit'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>f','boxh'),
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','Omb0'),
-            ('>f','hubble'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','nextras'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>f','extra1'),
-            ('>f','extra2'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>256s','lextra'),
-            ('>256s','lextra'),
-            ('>i','pad byte'),
-            
-            ('>i', 'pad byte'),
-            ('>i', 'min_level'),
-            ('>i', 'max_level'),
-            ('>i', 'pad byte'),
-            ]
-        
-        f = open(self.parameter_filename, "rb")
         header_vals = {}
-        for format, name in header_struct:
-            size = struct.calcsize(format)
-            # We parse single values at a time, so this will
-            # always need to be indexed with 0
-            output = struct.unpack(format, f.read(size))[0]
-            header_vals[name] = output
-        self.dimensionality = 3 # We only support three
-        self.refine_by = 2 # Octree
-        # Update our parameters with the header and with some compile-time
-        # constants we will set permanently.
-        self.parameters.update(header_vals)
-        self.parameters["Y_p"] = 0.245
-        self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
-        self.parameters["gamma"] = 5./3.
-        self.parameters["T_CMB0"] = 2.726  
-        self.parameters["T_min"] = 300.0 #T floor in K
-        self.parameters["boxh"] = header_vals['boxh']
-        self.parameters['ng'] = 128 # of 0 level cells in 1d 
+        self.parameters.update(constants)
+        with open(self.file_amr,'rb') as f:
+            amr_header_vals = _read_struct(f,amr_header_struct)
+            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
+                _skip_record(f)
+            (self.ncell,) = struct.unpack('>l', _read_record(f))
+            # Try to figure out the root grid dimensions
+            est = int(np.rint(self.ncell**(1.0/3.0)))
+            # Note here: this is the number of *cells* on the root grid.
+            # This is not the same as the number of Octs.
+            self.domain_dimensions = np.ones(3, dtype='int64')*est 
+            self.root_grid_mask_offset = f.tell()
+            root_cells = self.domain_dimensions.prod()
+            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
+                 order='F')
+            self.root_grid_offset = f.tell()
+            _skip_record(f) # hvar
+            _skip_record(f) # var
+            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.child_grid_offset = f.tell()
+        self.parameters.update(amr_header_vals)
+        if not self.skip_particles and self.file_particle_header:
+            with open(self.file_particle_header,"rb") as fh:
+                particle_header_vals = _read_struct(fh,particle_header_struct)
+                fh.seek(seek_extras)
+                n = particle_header_vals['Nspecies']
+                wspecies = np.fromfile(fh,dtype='>f',count=10)
+                lspecies = np.fromfile(fh,dtype='>i',count=10)
+            self.parameters['wspecies'] = wspecies[:n]
+            self.parameters['lspecies'] = lspecies[:n]
+            ls_nonzero = np.diff(lspecies)[:n-1]
+            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
+                *ls_nonzero)
+            self.parameters.update(particle_header_vals)
+    
+        #setup standard simulation yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.parameters['CosmologyInitialRedshift']=self.current_redshift
-        self.data_comment = header_vals['jname']
-        self.current_time_raw = header_vals['t']
-        self.current_time = header_vals['t']
-        self.omega_lambda = header_vals['Oml0']
-        self.omega_matter = header_vals['Om0']
-        self.hubble_constant = header_vals['hubble']
-        self.min_level = header_vals['min_level']
-        self.max_level = header_vals['max_level']
-        self.nhydro_vars = 10 #this gets updated later, but we'll default to this
-        #nchem is nhydrovars-8, so we typically have 2 extra chem species 
+        self.omega_lambda = amr_header_vals['Oml0']
+        self.omega_matter = amr_header_vals['Om0']
+        self.hubble_constant = amr_header_vals['hubble']
+        self.min_level = amr_header_vals['min_level']
+        self.max_level = amr_header_vals['max_level']
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
-        #self.hubble_time /= 3.168876e7 #Gyr in s 
-        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
-        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
-        # integrand_arr = integrand(spacings)
-        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-        # self.current_time *= self.hubble_time
-        self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
-        for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-            _skip_record(f)
-
-        
-        Om0 = self.parameters['Om0']
-        hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * np.sqrt(Om0)
-        ng = self.parameters['ng']
-        wmu = self.parameters["wmu"]
-        boxh = header_vals['boxh'] 
-        
-        #distance unit #boxh is units of h^-1 Mpc
-        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
-        r0 = self.parameters["r0"]
-        #time, yrs
-        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
-        #velocity velocity units in km/s
-        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                np.sqrt(self.parameters["Om0"])
-        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
-        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
-        rho0 = self.parameters["rho0"]
-        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
-        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        #T_0 = unit of temperature in K and in keV)
-        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
-        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
-        #S_0 = unit of entropy in keV * cm^2
-        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        
-        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
-        #     for non-cosmological run aM0 must be defined during initialization
-        #     [aM0] = [Msun]
-        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        
-        #CGS for everything in the next block
-    
-        (self.ncell,) = struct.unpack('>l', _read_record(f))
-        # Try to figure out the root grid dimensions
-        est = int(np.rint(self.ncell**(1.0/3.0)))
-        # Note here: this is the number of *cells* on the root grid.
-        # This is not the same as the number of Octs.
-        self.domain_dimensions = np.ones(3, dtype='int64')*est 
-
-        self.root_grid_mask_offset = f.tell()
-        #_skip_record(f) # iOctCh
-        root_cells = self.domain_dimensions.prod()
-        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
-        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
-        self.root_grid_offset = f.tell()
-        _skip_record(f) # hvar
-        _skip_record(f) # var
-
-        self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
-        self.child_grid_offset = f.tell()
-
-        f.close()
-        
-        if self.file_particle_header is not None:
-            self._read_particle_header(self.file_particle_header)
-        
-    def _read_particle_header(self,fn):    
-        """ Reads control information, various parameters from the 
-            particle data set. Adapted from Daniel Ceverino's 
-            Read_Particles_Binary in analysis_ART.F   
-        """ 
-        header_struct = [
-            ('>i','pad'),
-            ('45s','header'), 
-            ('>f','aexpn'),
-            ('>f','aexp0'),
-            ('>f','amplt'),
-            ('>f','astep'),
-
-            ('>i','istep'),
-            ('>f','partw'),
-            ('>f','tintg'),
-
-            ('>f','Ekin'),
-            ('>f','Ekin1'),
-            ('>f','Ekin2'),
-            ('>f','au0'),
-            ('>f','aeu0'),
-
-
-            ('>i','Nrow'),
-            ('>i','Ngridc'),
-            ('>i','Nspecies'),
-            ('>i','Nseed'),
-
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','hubble'),
-            ('>f','Wp5'),
-            ('>f','Ocurv'),
-            ('>f','Omb0'),
-            ('>%ds'%(396),'extras'),
-            ('>f','unknown'),
-
-            ('>i','pad')]
-        fh = open(fn,'rb')
-        vals = _read_struct(fh,header_struct)
-        
-        for k,v in vals.iteritems():
-            self.parameters[k]=v
-        
-        seek_extras = 137
-        fh.seek(seek_extras)
-        n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
-        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
-        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
-        fh.close()
-        
-        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
-        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
-        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
-        
+        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         """
-        Defined for Daniel Ceverino's file naming scheme.
+        Defined for the NMSU file naming scheme.
         This could differ for other formats.
         """
         fn = ("%s" % (os.path.basename(args[0])))



https://bitbucket.org/yt_analysis/yt/changeset/572b60dd7353/
changeset:   572b60dd7353
branch:      yt
user:        juxtaposicion
date:        2012-11-26 23:37:08
summary:     cleaned up the new IO
affected #:  1 file

diff -r d965c7412bddea5ddb973514e2527cf89fec3412 -r 572b60dd73537ff6860dccff35220f2ff20e54d8 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,7 +36,7 @@
     BaseIOHandler
 import yt.utilities.lib as au
 
-from yt.frontends.art.definitions import art_particle_field_names
+from yt.frontends.art.definitions import *
 
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
@@ -121,45 +121,19 @@
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        #This will be cleaned up later
-        idx = np.array(grid.particle_indices)
-        if field == 'particle_index':
-            return np.array(idx)
-        if field == 'particle_type':
-            return grid.pf.particle_type[idx]
-        if field == 'particle_position_x':
-            return grid.pf.particle_position[idx][:,0]
-        if field == 'particle_position_y':
-            return grid.pf.particle_position[idx][:,1]
-        if field == 'particle_position_z':
-            return grid.pf.particle_position[idx][:,2]
-        if field == 'particle_mass':
-            return grid.pf.particle_mass[idx]
-        if field == 'particle_velocity_x':
-            return grid.pf.particle_velocity[idx][:,0]
-        if field == 'particle_velocity_y':
-            return grid.pf.particle_velocity[idx][:,1]
-        if field == 'particle_velocity_z':
-            return grid.pf.particle_velocity[idx][:,2]
-        
-        #stellar fields
-        if field == 'particle_age':
-            return grid.pf.particle_age[idx]
-        if field == 'particle_metallicity':
-            return grid.pf.particle_metallicity1[idx] +\
-                   grid.pf.particle_metallicity2[idx]
-        if field == 'particle_metallicity1':
-            return grid.pf.particle_metallicity1[idx]
-        if field == 'particle_metallicity2':
-            return grid.pf.particle_metallicity2[idx]
-        if field == 'particle_mass_initial':
-            return grid.pf.particle_mass_initial[idx]
-        
-        raise 'Should have matched one of the particle fields...'
-
+        dat = getattr(grid,field,None)
+        if dat is not None: 
+            return dat
+        starfield = field.replace('star','particle')
+        dat = getattr(grid,starfield,None)
+        if dat is not None:
+            psi = grid.pf.particle_star_index
+            idx = grid.particle_type==psi
+            return dat[idx]
+        raise KeyError
         
     def _read_data_set(self, grid, field):
-        if field in art_particle_field_names:
+        if field in particle_fields:
             return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
@@ -198,9 +172,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -232,7 +206,7 @@
     f.seek(offset)
     return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
 
-def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
     pos = f.tell()
     f.seek(level_oct_offsets[level])
     #Get the info for this level, skip the rest
@@ -283,13 +257,18 @@
     le = le[idx]
     fl = fl[idx]
 
+
     #left edges are expressed as if they were on 
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
     #le = le/2**(root_level-1-level)-1
 
+    #try to find the root_level first
+    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
+    root_level = root_level.astype('int64')
+
     #try without the -1
-    le = le/2**(root_level-2-level)-1
+    le = le/2**(root_level+1-level)-1
 
     #now read the hvars and vars arrays
     #we are looking for iOctCh
@@ -299,13 +278,12 @@
     
     
     f.seek(pos)
-    return le,fl,nLevel
+    return le,fl,nLevel,root_level
 
 
-def read_particles(file,nstars,Nrow):
+def read_particles(file,Nrow):
     words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4 # for file_particle_data; not always true?
-    np = nstars # number of particles including stars, should come from lspecies[-1]
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
@@ -314,7 +292,7 @@
     data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
-def read_stars(file,nstars,Nrow):
+def read_stars(file):
     fh = open(file,'rb')
     tdum,adum   = _read_frecord(fh,'>d')
     nstars      = _read_frecord(fh,'>i')
@@ -327,7 +305,8 @@
     if fh.tell() < os.path.getsize(file):
         metallicity2 = _read_frecord(fh,'>f')     
     assert fh.tell() == os.path.getsize(file)
-    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+    return  nstars, mass, imass, tbirth, metallicity1, metallicity2,\
+            ws_old,ws_oldi,tdum,adum
 
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
@@ -346,7 +325,7 @@
         arr = arr.reshape((width, chunk), order="F")
         assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
-        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined available
         #zero in the mask means there is refinement available
         a=b
         left -= chunk
@@ -476,3 +455,29 @@
     #fb2t = interp1d(tbs,ages)
     return fb2t
 
+def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
+    #stars are formed in lumps; spread out the ages linearly
+    da= np.diff(ages)
+    assert np.all(da<=0)
+    #ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da<0)
+    idx+=1 #mark the right edges
+    #spread this age evenly out to the next age
+    lidx=0
+    lage=0
+    for i in idx:
+        n = i-lidx #n stars affected
+        rage = ages[i]
+        lage = max(rage-spread,0.0)
+        agesd[lidx:i]=np.linspace(lage,rage,n)
+        lidx=i
+        #lage=rage
+        if logger: logger(i)
+    #we didn't get the last iter
+    i=ages.shape[0]-1
+    n = i-lidx #n stars affected
+    rage = ages[i]
+    lage = max(rage-spread,0.0)
+    agesd[lidx:i]=np.linspace(lage,rage,n)
+    return agesd



https://bitbucket.org/yt_analysis/yt/changeset/7f3606d458da/
changeset:   7f3606d458da
branch:      yt
user:        juxtaposicion
date:        2012-11-26 23:37:31
summary:     moved ART constants and file structures to definition
affected #:  1 file

diff -r 572b60dd73537ff6860dccff35220f2ff20e54d8 -r 7f3606d458da0c4e78b703a34c71f3abb020279e yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -1,7 +1,7 @@
 """
 Definitions specific to ART
 
-Author: Christopher E. Moody <cemoody at ucsc.ed>
+Author: Christopher E. Moody <cemoody at ucsc.edu>
 Affiliation: UC Santa Cruz
 Homepage: http://yt-project.org/
 License:
@@ -25,19 +25,128 @@
 
 """
 
-art_particle_field_names = [
-'particle_age',
-'particle_index',
-'particle_mass',
-'particle_mass_initial',
-'particle_creation_time',
-'particle_metallicity1',
-'particle_metallicity2',
-'particle_metallicity',
-'particle_position_x',
-'particle_position_y',
-'particle_position_z',
-'particle_velocity_x',
-'particle_velocity_y',
-'particle_velocity_z',
-'particle_type']
+fluid_fields= [ 
+    'Density',
+    'TotalEnergy',
+    'XMomentumDensity',
+    'YMomentumDensity',
+    'ZMomentumDensity',
+    'Pressure',
+    'Gamma',
+    'GasEnergy',
+    'MetalDensitySNII',
+    'MetalDensitySNIa',
+    'PotentialNew',
+    'PotentialOld'
+]
+
+particle_fields= [
+    'particle_age',
+    'particle_index',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+    'particle_position_x',
+    'particle_position_y',
+    'particle_position_z',
+    'particle_velocity_x',
+    'particle_velocity_y',
+    'particle_velocity_z',
+    'particle_type'
+]
+
+particle_star_fields = [
+    'particle_age',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+]
+
+filename_pattern = {				
+	'amr':'10MpcBox_csf512_%s.d',
+	'particle_header':'PMcrd%s.DAT',
+	'particle_data':'PMcrs0%s.DAT',
+	'particle_stars':'stars_%s.dat'
+}
+
+amr_header_struct = [
+    ('>i','pad byte'),
+    ('>256s','jname'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','istep'),
+    ('>d','t'),
+    ('>d','dt'),
+    ('>f','aexpn'),
+    ('>f','ainit'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','boxh'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','Omb0'),
+    ('>f','hubble'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','nextras'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','extra1'),
+    ('>f','extra2'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>256s','lextra'),
+    ('>256s','lextra'),
+    ('>i','pad byte'),
+    ('>i', 'pad byte'),
+    ('>i', 'min_level'),
+    ('>i', 'max_level'),
+    ('>i', 'pad byte'),
+]
+
+particle_header_struct =[
+    ('>i','pad'),
+    ('45s','header'), 
+    ('>f','aexpn'),
+    ('>f','aexp0'),
+    ('>f','amplt'),
+    ('>f','astep'),
+    ('>i','istep'),
+    ('>f','partw'),
+    ('>f','tintg'),
+    ('>f','Ekin'),
+    ('>f','Ekin1'),
+    ('>f','Ekin2'),
+    ('>f','au0'),
+    ('>f','aeu0'),
+    ('>i','Nrow'),
+    ('>i','Ngridc'),
+    ('>i','Nspecies'),
+    ('>i','Nseed'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','hubble'),
+    ('>f','Wp5'),
+    ('>f','Ocurv'),
+    ('>f','Omb0'),
+    ('>%ds'%(396),'extras'),
+    ('>f','unknown'),
+    ('>i','pad')
+]
+
+constants = {
+    "Y_p":0.245,
+    "gamma":5./3.,
+    "T_CMB0":2.726,
+    "T_min":300.,
+    "ng":128,
+    "wmu":4.0/(8.0-5.0*0.245)
+}
+
+seek_extras = 137



https://bitbucket.org/yt_analysis/yt/changeset/085a37148950/
changeset:   085a37148950
branch:      yt
user:        juxtaposicion
date:        2012-11-26 23:51:54
summary:     added **kwargs to timer series; gets passed to load()
affected #:  1 file

diff -r 7f3606d458da0c4e78b703a34c71f3abb020279e -r 085a37148950b9286bea51660a61e47d946589e3 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -78,7 +78,7 @@
         raise AttributeError(attr)
 
 class TimeSeriesData(object):
-    def __init__(self, outputs, parallel = True):
+    def __init__(self, outputs, parallel = True ,**kwargs):
         r"""The TimeSeriesData object is a container of multiple datasets,
         allowing easy iteration and computation on them.
 
@@ -107,12 +107,13 @@
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
         self.parallel = parallel
+        self.kwargs = kwargs
 
     def __iter__(self):
         # We can make this fancier, but this works
         for o in self._pre_outputs:
             if isinstance(o, types.StringTypes):
-                yield load(o)
+                yield load(o,**self.kwargs)
             else:
                 yield o
 
@@ -124,7 +125,7 @@
             return TimeSeriesData(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
         if isinstance(o, types.StringTypes):
-            o = load(o)
+            o = load(o,**self.kwargs)
         return o
 
     def __len__(self):
@@ -223,7 +224,7 @@
         return [v for k, v in sorted(return_values.items())]
 
     @classmethod
-    def from_filenames(cls, filenames, parallel = True):
+    def from_filenames(cls, filenames, parallel = True, **kwargs):
         r"""Create a time series from either a filename pattern or a list of
         filenames.
 
@@ -258,12 +259,9 @@
 
         """
         if isinstance(filenames, types.StringTypes):
-            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
-            if len(filenames) == 0:
-                raise YTNoFilenamesMatchPattern(pattern)
-        obj = cls(filenames[:], parallel = parallel)
+        obj = cls(filenames[:], parallel = parallel, **kwargs)
         return obj
 
     @classmethod



https://bitbucket.org/yt_analysis/yt/changeset/0195527b3385/
changeset:   0195527b3385
branch:      yt
user:        juxtaposicion
date:        2012-11-27 00:01:49
summary:     Merge
affected #:  2 files

diff -r 085a37148950b9286bea51660a61e47d946589e3 -r 0195527b33856ba897b40a7b012b878d123db90f yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -206,11 +206,11 @@
     os.chdir(oldcwd)
 
 def can_run_pf(pf_fn):
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     path = ytcfg.get("yt", "test_data_dir")
     if not os.path.isdir(path):
         return False
-    if isinstance(pf_fn, StaticOutput):
-        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)


diff -r 085a37148950b9286bea51660a61e47d946589e3 -r 0195527b33856ba897b40a7b012b878d123db90f yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -307,14 +307,17 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
+                 min_level=None, max_level=None):
         """
         annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
         *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
-        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.
+        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.  If *min_level* 
+        is specified, only draw grids at or above min_level.  If *max_level* is specified, only 
+        draw grids at or below max_level.
         """
         PlotCallback.__init__(self)
         self.alpha = alpha
@@ -322,6 +325,8 @@
         self.min_pix_ids = min_pix_ids
         self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
+        self.min_level = min_level
+        self.max_level = max_level
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -341,6 +346,14 @@
             pxs, pys = np.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
+        grid_levels = plot.data.grid_levels
+        min_level = self.min_level
+        max_level = self.max_level
+        if min_level is None:
+            min_level = 0
+        if max_level is None:
+            max_level = plot.data.max_level
+
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
@@ -349,7 +362,9 @@
             right_edge_x = (GRE[:,px_index]+pxo-x0)*dx + xx0
             right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
             visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
-                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
+                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix ) & \
+                       ( grid_levels >= min_level) & \
+                       ( grid_levels <= max_level)
             if visible.nonzero()[0].size == 0: continue
             verts = np.array(
                 [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),



https://bitbucket.org/yt_analysis/yt/changeset/e6840f9099dd/
changeset:   e6840f9099dd
branch:      yt
user:        juxtaposicion
date:        2012-11-27 00:09:49
summary:     removed the empty particle grid setup
affected #:  1 file

diff -r 0195527b33856ba897b40a7b012b878d123db90f -r e6840f9099ddafc9528f437f20800196e2d874e1 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -148,12 +148,8 @@
         AMRHierarchy.__init__(self,pf,data_style)
         if not self.pf.skip_particles:
             self._setup_particle_grids()
-        self._setup_particle_grids()
         self._setup_field_list()
-        
-    def _setup_particle_grids(self):
-        pass
-    
+
     def _initialize_data_storage(self):
         pass
     



https://bitbucket.org/yt_analysis/yt/changeset/45d9b67f03da/
changeset:   45d9b67f03da
branch:      yt
user:        juxtaposicion
date:        2012-11-27 00:24:17
summary:     Merge
affected #:  10 files

diff -r e6840f9099ddafc9528f437f20800196e2d874e1 -r 45d9b67f03daf4bec6ee21870dcad916993fb976 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,4 +6,6 @@
 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
\ No newline at end of file
+with-xunit=1
+#with-answer-testing=1
+#answer-compare=gold001


diff -r e6840f9099ddafc9528f437f20800196e2d874e1 -r 45d9b67f03daf4bec6ee21870dcad916993fb976 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -26,141 +26,23 @@
 from yt.mods import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
+
 from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
-from yt.config import ytcfg
-
 import rockstar_interface
-
 import socket
 import time
-import threading
-import signal
-import os
-from os import environ
-from os import mkdir
-from os import path
 
-# Get some definitions from Rockstar directly.
-if "ROCKSTAR_DIR" in os.environ:
-    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
-elif os.path.exists("rockstar.cfg"):
-    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
-else:
-    print "Reading Rockstar location from rockstar.cfg failed."
-    print "Please place the base directory of your"
-    print "Rockstar install in rockstar.cfg and restart."
-    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
-    sys.exit(1)
-lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
-READER_TYPE = None
-WRITER_TYPE = None
-for line in lines:
-    if "READER_TYPE" in line:
-        line = line.split()
-        READER_TYPE = int(line[-1])
-    if "WRITER_TYPE" in line:
-        line = line.split()
-        WRITER_TYPE = int(line[-1])
-    if READER_TYPE != None and WRITER_TYPE != None:
-        break
-lines.close()
+class DomainDecomposer(ParallelAnalysisInterface):
+    def __init__(self, pf, comm):
+        ParallelAnalysisInterface.__init__(self, comm=comm)
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
 
-class InlineRunner(ParallelAnalysisInterface):
-    def __init__(self, num_writers):
-        # If this is being run inline, num_readers == comm.size, always.
-        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
-        if num_writers is None:
-            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-
-    def split_work(self, pool):
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're inline, everyone is a reader.
-        self.readers = avail[:]
-        if self.num_writers == pool.comm.size:
-            # And everyone is a writer!
-            self.writers = avail[:]
-        else:
-            # Everyone is not a writer.
-            # Cyclically assign writers which should approximate
-            # memory load balancing (depending on the mpirun call,
-            # but this should do it in most cases).
-            stride = int(ceil(float(pool.comm.size) / self.num_writers))
-            while len(self.writers) < self.num_writers:
-                self.writers.extend(avail[::stride])
-                for r in readers:
-                    avail.pop(avail.index(r))
-
-    def run(self, handler, pool):
-        # If inline, we use forks.
-        server_pid = 0
-        # Start a server on only one machine/fork.
-        if pool.comm.rank == 0:
-            server_pid = os.fork()
-            if server_pid == 0:
-                handler.start_server()
-                os._exit(0)
-        # Start writers.
-        writer_pid = 0
-        if pool.comm.rank in self.writers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            writer_pid = os.fork()
-            if writer_pid == 0:
-                handler.start_client(WRITER_TYPE)
-                os._exit(0)
-        # Start readers, not forked.
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        # Make sure the forks are done, which they should be.
-        if writer_pid != 0:
-            os.waitpid(writer_pid, 0)
-        if server_pid != 0:
-            os.waitpid(server_pid, 0)
-
-class StandardRunner(ParallelAnalysisInterface):
-    def __init__(self, num_readers, num_writers):
-        self.num_readers = num_readers
-        if num_writers is None:
-            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
-                - num_readers - 1
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
-                "__global_parallel_size"):
-            mylog.error('%i reader + %i writers != %i mpi',
-                    self.num_readers, self.num_writers,
-                    ytcfg.getint("yt", "__global_parallel_size"))
-            raise RuntimeError
-    
-    def split_work(self, pool):
-        # Who is going to do what.
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're not running inline, rank 0 should be removed immediately.
-        avail.pop(0)
-        # Now we assign the rest.
-        for i in range(self.num_readers):
-            self.readers.append(avail.pop(0))
-        for i in range(self.num_writers):
-            self.writers.append(avail.pop(0))
-    
-    def run(self, handler, pool):
-        # Not inline so we just launch them directly from our MPI threads.
-        if pool.comm.rank == 0:
-            handler.start_server()
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        if pool.comm.rank in self.writers:
-            time.sleep(0.2 + pool.comm.rank/10.0)
-            handler.start_client(WRITER_TYPE)
+    def decompose(self):
+        dd = self.pf.h.all_data()
+        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
+        return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
@@ -183,30 +65,23 @@
             The number of reader can be increased from the default
             of 1 in the event that a single snapshot is split among
             many files. This can help in cases where performance is
-            IO-limited. Default is 1. If run inline, it is
-            equal to the number of MPI threads.
+            IO-limited. Default is 1.
         num_writers: int
             The number of writers determines the number of processing threads
             as well as the number of threads writing output data.
-            The default is set to comm.size-num_readers-1. If run inline,
-            the default is equal to the number of MPI threads.
+            The default is set comm.size-num_readers-1.
         outbase: str
             This is where the out*list files that Rockstar makes should be
-            placed. Default is 'rockstar_halos'.
+            placed. Default is str(pf)+'_rockstar'.
         particle_mass: float
             This sets the DM particle mass used in Rockstar.
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: float
-            This parameter specifies the force resolution that Rockstar uses
-            in units of Mpc/h.
-            If no value is provided, this parameter is automatically set to
-            the width of the smallest grid element in the simulation from the
-            last data snapshot (i.e. the one where time has evolved the
-            longest) in the time series:
-            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
-            
+        force_res: None
+            The default force resolution is 0.0012 comoving Mpc/H
+            This overrides Rockstars' defaults
+
         Returns
         -------
         None
@@ -218,6 +93,7 @@
 
         test_rockstar.py:
 
+        from mpi4py import MPI
         from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
         from yt.mods import *
         import sys
@@ -229,64 +105,50 @@
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()
         """
-        # Decide how we're working.
-        if ytcfg.getboolean("yt", "inline") == True:
-            self.runner = InlineRunner(num_writers)
-        else:
-            self.runner = StandardRunner(num_readers, num_writers)
-        self.num_readers = self.runner.num_readers
-        self.num_writers = self.runner.num_writers
-        mylog.info("Rockstar is using %d readers and %d writers",
-            self.num_readers, self.num_writers)
-        # Note that Rockstar does not support subvolumes.
-        # We assume that all of the snapshots in the time series
-        # use the same domain info as the first snapshots.
+        ParallelAnalysisInterface.__init__(self)
+        # No subvolume support
+        #we assume that all of the snapshots in the time series
+        #use the same domain info as the first snapshots
         if not isinstance(ts,TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
+        if self.comm.size > 1: 
+            self.comm.barrier()            
         tpf = ts.__iter__().next()
-        def _particle_count(field, data):
-            try:
-                return (data["particle_type"]==dm_type).sum()
-            except KeyError:
-                return np.prod(data["particle_position_x"].shape)
-        add_field("particle_count",function=_particle_count, not_in_all=True,
-            particle_type=True)
-        # Get total_particles in parallel.
         dd = tpf.h.all_data()
-        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
         if outbase is None:
-            outbase = 'rockstar_halos'
-        self.outbase = outbase
-        self.particle_mass = particle_mass
-        if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
-        else:
-            self.force_res = force_res
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
+            outbase = str(tpf)+'_rockstar'
+        self.outbase = outbase        
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
+        self.num_readers = num_readers
+        self.num_writers = num_writers
+        if self.num_readers + self.num_writers + 1 != self.comm.size:
+            #we need readers+writers+1 server = comm size        
+            raise RuntimeError
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        # We set up the workgroups *before* initializing
-        # ParallelAnalysisInterface. Everyone is their own workgroup!
-        self.pool = ProcessorPool()
-        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
-             self.pool.add_workgroup(size=1)
-        ParallelAnalysisInterface.__init__(self)
-        for wg in self.pool.workgroups:
-            if self.pool.comm.rank in wg.ranks:
-                self.workgroup = wg
+        data_source = tpf.h.all_data()
+        self.comm.barrier()
+        self.force_res = force_res
+        def _pcount(field,data):
+            return (data["particle_type"]=dm_type).sum()
+        add_field("pcount",function=_pcount,particle_type=True)
+        total_particles = dd.quantities['TotalQuantity']('pcount')
+        self.total_particles = total_particles
+        mylog.info("Found %i halo particles",total_particles)
         self.handler = rockstar_interface.RockstarInterface(
-                self.ts, dd)
+                self.ts, data_source)
 
     def __del__(self):
         self.pool.free_all()
 
     def _get_hosts(self):
-        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
+        if self.comm.size == 1 or self.workgroup.name == "server":
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -294,7 +156,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.pool.comm.mpi_bcast(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -302,13 +164,21 @@
         """
         
         """
+        if self.comm.size > 1:
+            self.pool = ProcessorPool()
+            mylog.debug("Num Writers = %s Num Readers = %s",
+                        self.num_writers, self.num_readers)
+            self.pool.add_workgroup(1, name = "server")
+            self.pool.add_workgroup(self.num_readers, name = "readers")
+            self.pool.add_workgroup(self.num_writers, name = "writers")
+            for wg in self.pool.workgroups:
+                if self.comm.rank in wg.ranks: self.workgroup = wg
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
         self.handler.setup_rockstar(self.server_address, self.port,
-                    len(self.ts), self.total_particles, 
-                    self.dm_type,
-                    parallel = self.pool.comm.size > 1,
+                    len(self.ts), self.total_particles, self.dm_type,
+                    parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
@@ -317,29 +187,27 @@
                     force_res=self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
-        # Make the directory to store the halo lists in.
-        if self.pool.comm.rank == 0:
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
             if not os.path.exists(self.outbase):
                 os.mkdir(self.outbase)
-            # Make a record of which dataset corresponds to which set of
-            # output files because it will be easy to lose this connection.
-            fp = open(self.outbase + '/pfs.txt', 'w')
-            fp.write("# pfname\tindex\n")
-            for i, pf in enumerate(self.ts):
-                pfloc = path.join(path.relpath(pf.fullpath), pf.basename)
-                line = "%s\t%d\n" % (pfloc, i)
-                fp.write(line)
-            fp.close()
-        # This barrier makes sure the directory exists before it might be used.
-        self.pool.comm.barrier()
-        if self.pool.comm.size == 1:
+        if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            # Split up the work.
-            self.runner.split_work(self.pool)
-            # And run it!
-            self.runner.run(self.handler, self.pool)
-        self.pool.comm.barrier()
+            self.comm.barrier()
+            if self.workgroup.name == "server":
+                self.handler.start_server()
+            elif self.workgroup.name == "readers":
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            elif self.workgroup.name == "writers":
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            self.pool.free_all()
+        self.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
@@ -347,4 +215,5 @@
         Reads in the out_0.list file and generates RockstarHaloList
         and RockstarHalo objects.
         """
-        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)
+        tpf = self.ts[0]
+        return RockstarHaloList(tpf,file_name)


diff -r e6840f9099ddafc9528f437f20800196e2d874e1 -r 45d9b67f03daf4bec6ee21870dcad916993fb976 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -29,8 +29,6 @@
 cimport cython
 from libc.stdlib cimport malloc
 
-from yt.config import ytcfg
-
 cdef import from "particle.h":
     struct particle:
         np.int64_t id
@@ -46,11 +44,11 @@
 cdef import from "config.h":
     void setup_config()
 
-cdef import from "server.h" nogil:
+cdef import from "server.h":
     int server()
 
-cdef import from "client.h" nogil:
-    void client(np.int64_t in_type)
+cdef import from "client.h":
+    void client()
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
@@ -239,54 +237,26 @@
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
-    global SCALE_NOW
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
+    global SCALE_NOW, TOTAL_PARTICLES
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
-    pf = rh.tsl.next()
-    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     block = int(str(filename).rsplit(".")[-1])
+    
+
+    # Now we want to grab data from only a subset of the grids.
     n = rh.block_ratio
-
-    all_grids = pf.h.grids
+    dd = pf.h.all_data()
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-    # Now we want to grab data from only a subset of the grids for each reader.
-    if NUM_BLOCKS == 1:
-        grids = all_grids
-    else:
-        if ytcfg.getboolean("yt", "inline") == False:
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
-        else:
-            # We must be inline, grap only the local grids.
-            grids  = [g for g in all_grids if g.proc_num ==
-                          ytcfg.getint('yt','__topcomm_parallel_rank')]
-    
-    all_fields = set(pf.h.derived_field_list + pf.h.field_list)
-
-    # First we need to find out how many this reader is going to read in
-    # if the number of readers > 1.
-    if NUM_BLOCKS > 1:
-        local_parts = 0
-        for g in grids:
-            if g.NumberOfParticles == 0: continue
-            if "particle_type" in all_fields:
-                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
-                iddm = g["particle_type"] == rh.dm_type
-            else:
-                iddm = Ellipsis
-            arri = g["particle_index"].astype("int64")
-            arri = arri[iddm] #pick only DM
-            local_parts += arri.size
-    else:
-        local_parts = TOTAL_PARTICLES
-
-    #print "local_parts", local_parts
-
-    p[0] = <particle *> malloc(sizeof(particle) * local_parts)
-
+    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
+    tnpart = 0
+    for g in grids:
+        tnpart += np.sum(dd._get_data_from_grid(g, "particle_type")==rh.dm_type)
+    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
+    #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
     left_edge[0] = pf.domain_left_edge[0]
@@ -295,12 +265,8 @@
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
-        if g.NumberOfParticles == 0: continue
-        if "particle_type" in all_fields:
-            iddm = g["particle_type"] == rh.dm_type
-        else:
-            iddm = Ellipsis
-        arri = g["particle_index"].astype("int64")
+        iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
         arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
@@ -310,13 +276,22 @@
                       "particle_position_z",
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
-            arr = g[field].astype("float64")
+            arr = dd._get_data_from_grid(g, field).astype("float64")
             arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
-    num_p[0] = local_parts
+    num_p[0] = tnpart
+    TOTAL_PARTICLES = tnpart
+    #print 'first particle coordinates'
+    #for i in range(3):
+    #    print p[0][0].pos[i],
+    #print ""
+    #print 'last particle coordinates'
+    #for i in range(3):
+    #    print p[0][tnpart-1].pos[i],
+    #print ""
 
 cdef class RockstarInterface:
 
@@ -348,10 +323,10 @@
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        global OVERLAP_LENGTH, FORCE_RES
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
-            #print "set force res to ",FORCE_RES
+            print "set force res to ",FORCE_RES
         OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
@@ -392,7 +367,6 @@
                     tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
-        rh.dm_type = dm_type
         cdef LPG func = rh_read_particles
         set_load_particles_generic(func)
 
@@ -402,9 +376,7 @@
         output_and_free_halos(0, 0, 0, NULL)
 
     def start_server(self):
-        with nogil:
-            server()
+        server()
 
-    def start_client(self, in_type):
-        in_type = np.int64(in_type)
-        client(in_type)
+    def start_client(self):
+        client()


diff -r e6840f9099ddafc9528f437f20800196e2d874e1 -r 45d9b67f03daf4bec6ee21870dcad916993fb976 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,16 +33,11 @@
 
 import time
 import numpy as np
-import numpy.linalg as linalg
-import collections
-
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
-debug = True
-
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
@@ -77,7 +72,6 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-
     fc = np.array(fc)
     fwidth = np.array(fwidth)
     
@@ -95,7 +89,7 @@
     #Create a list of the star particle properties in PARTICLE_DATA
     #Include ID, parent-ID, position, velocity, creation_mass, 
     #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+    particle_data,nstars = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
                                            dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
@@ -109,7 +103,7 @@
 
     create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    return fle,fre,ile,ire,dd,nleaf
+    return fle,fre,ile,ire,dd,nleaf,nstars
 
 def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
                                         halo_list,domains_list=None,**kwargs):
@@ -193,17 +187,23 @@
     domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
-def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
-    add_fields() #add the metal mass field that sunrise wants
+def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
+    try:
+        dd['MetalMass']
+    except KeyError:
+        add_fields() #add the metal mass field that sunrise wants
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
     
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    if dd is None:
-        #we keep passing dd around to not regenerate the data all the time
-        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -251,6 +251,7 @@
     output   = np.zeros((o_length,len(fields)), dtype='float64')
     refined  = np.zeros(r_length, dtype='int32')
     levels   = np.zeros(r_length, dtype='int32')
+    ids      = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -259,7 +260,7 @@
             c = center*pf['kpc']
         else:
             c = ile*1.0/pf.domain_dimensions*pf['kpc']
-        printing = lambda x: print_oct(x,pf['kpc'],c)
+        printing = lambda x: print_oct(x)
     else:
         printing = None
     pbar = get_pbar("Building Hilbert DFO octree",len(refined))
@@ -271,6 +272,7 @@
             output,refined,levels,
             grids,
             start_level,
+            ids,
             debug=printing,
             tracker=pbar)
     pbar.finish()
@@ -278,6 +280,7 @@
     #for the next spot, so we're off by 1
     print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    print 'first few entries :',refined[:12]
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
@@ -287,6 +290,7 @@
     ci = data['cell_index']
     l  = data['level']
     g  = data['grid']
+    o  = g.offset
     fle = g.left_edges+g.dx*ci
     fre = g.left_edges+g.dx*(ci+1)
     if nd is not None:
@@ -295,12 +299,14 @@
         if nc is not None:
             fle -= nc
             fre -= nc
-    txt  = '%1i '
-    txt += '%1.3f '*3+'- '
-    txt += '%1.3f '*3
-    print txt%((l,)+tuple(fle)+tuple(fre))
+    txt  = '%+1i '
+    txt += '%+1i '
+    txt += '%+1.3f '*3+'- '
+    txt += '%+1.3f '*3
+    if l<2:
+        print txt%((l,)+(o,)+tuple(fle)+tuple(fre))
 
-def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index]
                             pos, #the output hydro data position and refinement position
                             grid,  #grid that this oct lives on (not its children)
                             hilbert,  #the hilbert state
@@ -309,6 +315,7 @@
                             levels, #For a given Oct, what is the level
                             grids, #list of all patch grids available to us
                             level, #starting level of the oct (not the children)
+                            ids, #record the oct ID
                             debug=None,tracker=True):
     if tracker is not None:
         if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
@@ -316,16 +323,19 @@
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    refined[pos.refined_pos] = child_grid_index!=-1
-    levels[pos.output_pos]  = level
+    levels[pos.refined_pos]  = level
+    is_leaf = (child_grid_index==-1) and (level>0)
+    refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
+    ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf
     pos.refined_pos+= 1 
-    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+    if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
         for field_index in range(grid.fields.shape[0]):
             output[pos.output_pos,field_index] = \
                     grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
         pos.output_pos+= 1 
     else:
+        assert child_grid_index>-1
         #find the grid we descend into
         #then find the eight cells we break up into
         subgrid = grids[child_grid_index]
@@ -338,18 +348,21 @@
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + vertex*2**(-level)
+                #child_ile = cell_index + np.array(vertex)*2**(-level)
+                child_ile = cell_index + np.array(vertex)*2**(-(level+1))
+                child_ile = child_ile.astype('int')
             else:
                 child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
+
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
-                    debug=debug,tracker=tracker)
+                subgrid,hilbert_child,output,refined,levels,grids,
+                level+1,ids = ids,
+                debug=debug,tracker=tracker)
 
 
 
 def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
-
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
@@ -360,8 +373,6 @@
     for i,a in enumerate('xyz'):
         st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
         st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
@@ -457,6 +468,7 @@
             #quit if idxq is true:
             idxq = idx[0]>0 and np.all(idx==idx[0])
             out  = np.all(fle>cfle) and np.all(fre<cfre) 
+            out &= abs(np.log2(idx[0])-np.rint(np.log2(idx[0])))<1e-5 #nwide should be a power of 2
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
@@ -495,11 +507,15 @@
                           dd=None):
     if dd is None:
         dd = pf.h.all_data()
-    idx = dd["particle_type"] == star_type
+    idxst = dd["particle_type"] == star_type
+
+    #make sure we select more than a single particle
+    assert na.sum(idxst)>0
     if pos is None:
         pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    idx = idxst & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    assert np.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
@@ -518,8 +534,7 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
-        #metallicity *=0.0198
-        #print 'WARNING: multiplying metallicirt by 0.0198'
+        assert np.all(metallicity>0.0)
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
     formation_time = pf.current_time*pf['years']-age
@@ -534,19 +549,19 @@
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
     col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
-    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
     pd_table = pyfits.new_table(cols)
     pd_table.name = "PARTICLEDATA"
-    return pd_table
+    
+    #make sure we have nonzero particle number
+    assert pd_table.data.shape[0]>0
+    return pd_table,na.sum(idx)
 
 
 def add_fields():
@@ -556,10 +571,8 @@
         
     def _convMetalMass(data):
         return 1.0
-    
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
-
     def _initial_mass_cen_ostriker(field, data):
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
@@ -576,9 +589,6 @@
 
     add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-    def _temp_times_mass(field, data):
-        return data["Temperature"]*data["CellMassMsun"]
-    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
 class position:
     def __init__(self):
@@ -668,254 +678,3 @@
         j+=1
         yield vertex, self.descend(j)
 
-def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
-    if cameraset is None:
-        cameraset =cameraset_vertex 
-    campos =[]
-    names = []
-    dd = pf.h.all_data()
-    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
-        kwargs['scene_position']=scene_pos
-        kwargs['scene_up']=scene_up
-        kwargs['scene_rot']=scene_rot
-        kwargs['dd']=dd
-        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
-        campos += line,
-        names += name,
-    return names,campos     
-
-def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
-                                     sim_sphere_radius=None,sim_halo_radius=None,
-                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
-                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
-                                     dd=None):
-    """Translate the simulation to center on sim_center, 
-    then rotate such that sim_up is along the +z direction. Then we are in the 
-    'scene' basis coordinates from which scene_up and scene_offset are defined.
-    Then a position vector, direction vector, up vector and angular field of view
-    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
-    The angular field of view is in radians. The 10 numbers should match the inputs to
-    camera_positions in Sunrise.
-    """
-
-    sim_center = np.array(sim_center)
-    if sim_sphere_radius is None:
-        sim_sphere_radius = 10.0/pf['kpc']
-    if sim_axis_short is None:
-        if dd is None:
-            dd = pf.h.all_data()
-        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
-        mas = dd["particle_mass"]
-        pos = pos[idx]
-        mas = mas[idx]
-        mo_inertia = position_moment(pos,mas)
-        eigva, eigvc = linalg.eig(mo_inertia)
-        #order into short, long axes
-        order = eigva.real.argsort()
-        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
-    else:
-        ax_short = sim_axis_short
-        ax_long  = sim_axis_long
-    if sim_halo_radius is None:
-        sim_halo_radius = 200.0/pf['kpc']
-    if scene_distance is  None:
-        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
-    if scene_fov is None:
-        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
-        #idx= radii < sim_halo_radius*0.10
-        #radii = radii[idx]
-        #mass  = mas[idx] #copying mass into mas
-        si = np.argsort(radii)
-        radii = radii[si]
-        mass  = mas[si]
-        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
-        re = radii[idx[0]]
-        scene_fov = 5*re
-        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
-        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
-    #find rotation matrix
-    angles=find_half_euler_angles(ax_short,ax_long)
-    rotation  = euler_matrix(*angles)
-    irotation = numpy.linalg.inv(rotation)
-    axs = (ax_short,ax_med,ax_long)
-    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
-    axs = ([1,0,0],[0,1,0],[0,0,1])
-    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
-    
-    #rotate the camera
-    if scene_rot :
-        irotation = np.eye(3)
-    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
-    sunrise_up  = matmul(irotation,scene_up)
-    sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
-
-    #change to physical kpc
-    sunrise_pos *= pf['kpc']
-    sunrise_direction *= pf['kpc']
-    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
-
-def matmul(m, v):
-    """Multiply a matrix times a set of vectors, or a single vector.
-    My nPart x nDim convention leads to two transpositions, which is
-    why this is hidden away in a function.  Note that if you try to
-    use this to muliply two matricies, it will think that you're
-    trying to multiply by a set of vectors and all hell will break
-    loose."""    
-    assert type(v) is not np.matrix
-    v = np.asarray(v)
-    m, vs = [np.asmatrix(a) for a in (m, v)]
-
-    result = np.asarray(np.transpose(m * np.transpose(vs)))    
-    if len(v.shape) == 1:
-        return result[0]
-    return result
-
-
-def mag(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return np.sqrt( (vs**2).sum() )
-    return np.sqrt( (vs**2).sum(axis=1) )
-
-def mag2(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return (vs**2).sum()
-    return (vs**2).sum(axis=1)
-
-
-def position_moment(rs, ms=None, axes=None):
-    """Find second position moment tensor.
-    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = np.asarray(rs)
-    Npart, N = rs.shape
-    if ms is None: ms = np.ones(Npart)
-    else: ms = np.asarray(ms)    
-    if axes is not None:
-        axes = np.asarray(axes,dtype=float64)
-        axes = axes/axes.max()
-        norms2 = mag2(rs/axes)
-    else:
-        norms2 = np.ones(Npart)
-    M = ms.sum()
-    result = np.zeros((N,N))
-    # matrix is symmetric, so only compute half of it then fill in the
-    # other half
-    for i in range(N):
-        for j in range(i+1):
-            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
-        
-    result = result + result.transpose() - np.identity(N)*result
-    return result
-    
-
-
-def find_half_euler_angles(v,w,check=True):
-    """Find the passive euler angles that will make v lie along the z
-    axis and w lie along the x axis.  v and w are uncertain up to
-    inversions (ie, eigenvectors) so this routine removes degeneracies
-    associated with that
-
-    (old) Calculate angles to bring a body into alignment with the
-    coordinate system.  If v1 is the SHORTEST axis and v2 is the
-    LONGEST axis, then this will return the angle (Euler angles) to
-    make the long axis line up with the x axis and the short axis line
-    up with the x (z) axis for the 2 (3) dimensional case."""
-    # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: np.sqrt(np.sum(x**2.0))
-    v = v/mag(v)
-    w = w/mag(w)    
-    if check:
-        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
-
-    # Break eigenvector scaling degeneracy by forcing it to have a positive
-    # z component
-    if v[2] < 0: v = -v
-    phi,theta = find_euler_phi_theta(v)
-
-    # Rotate w according to phi,theta and then break inversion
-    # degeneracy by requiring that resulting vector has positive
-    # x component
-    w_prime = euler_passive(w,phi,theta,0.)
-    if w_prime[0] < 0: w_prime = -w_prime
-    # Now last Euler angle should just be this:
-    psi = np.arctan2(w_prime[1],w_prime[0])
-    return phi, theta, psi
-
-def find_euler_phi_theta(v):
-    """Find (passive) euler angles that will make v point in the z
-    direction"""
-    # Make sure the vector is normalized
-    v = v/mag(v)
-    theta = np.arccos(v[2])
-    phi = np.arctan2(v[0],-v[1])
-    return phi,theta
-
-def euler_matrix(phi, the, psi):
-    """Make an Euler transformation matrix"""
-    cpsi=np.cos(psi)
-    spsi=np.sin(psi)
-    cphi=np.cos(phi)
-    sphi=np.sin(phi)
-    cthe=np.cos(the)
-    sthe=np.sin(the)
-    m = np.mat(np.zeros((3,3)))
-    m[0,0] = cpsi*cphi - cthe*sphi*spsi
-    m[0,1] = cpsi*sphi + cthe*cphi*spsi
-    m[0,2] = spsi*sthe
-    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
-    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
-    m[1,2] = cpsi*sthe
-    m[2,0] = sthe*sphi
-    m[2,1] = -sthe*cphi
-    m[2,2] = cthe
-    return m
-
-def euler_passive(v, phi, the, psi):
-    """Passive Euler transform"""
-    m = euler_matrix(phi, the, psi)
-    return matmul(m,v)
-
-
-#the format for these camerasets is name,up vector,camera location, 
-#rotate to the galaxy's up direction?
-cameraset_compass = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
-    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
-    ])
-
-cameraset_vertex = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
-    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
-    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
-    ])
-
-#up is 45deg down from z, towards north
-#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
-#up is -45deg down from z, towards north
-
-cameraset_ring = collections.OrderedDict()
-
-segments = 20
-for angle in np.linspace(0,360,segments):
-    pos = [np.cos(angle),0.,np.sin(angle)]
-    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
-    cameraset_ring['02i'%angle]=(pos,vc)
-            
-
-








diff -r e6840f9099ddafc9528f437f20800196e2d874e1 -r 45d9b67f03daf4bec6ee21870dcad916993fb976 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -34,8 +34,6 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, mass_hydrogen_cgs
 import yt.utilities.lib as amr_utils
 
 KnownARTFields = FieldInfoContainer()
@@ -62,6 +60,7 @@
 #Density
 #Temperature
 #metallicities
+#MetalDensity SNII + SNia
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -69,7 +68,6 @@
 #Pressure
 #Gamma
 #GasEnergy
-#MetalDensity SNII + SNia
 #Potentials
 #xyzvelocity
 
@@ -170,32 +168,27 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    cd = data.pf.conversion_factors["Density"]
-    cg = data.pf.conversion_factors["GasEnergy"]
-    ct = data.pf.tr
     dg = data["GasEnergy"].astype('float64')
+    dg /= data.pf.conversion_factors["GasEnergy"]
     dd = data["Density"].astype('float64')
-    di = dd==0.0
+    dd /= data.pf.conversion_factors["Density"]
+    tr = dg/dd*data.pf.tr
+    #ghost cells have zero density?
+    tr[np.isnan(tr)] = 0.0
     #dd[di] = -1.0
-    tr = dg/dd
-    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
-    #    import pdb;pdb.set_trace()
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
-    tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
     #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x = data.pf.conversion_factors["Temperature"]
+    #x = data.pf.conversion_factors["Temperature"]
     x = 1.0
     return x
 add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_converttemperature
+#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
@@ -218,28 +211,27 @@
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(data):
+def _x_velocity(field,data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
 add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(data):
+def _y_velocity(field,data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
 add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(data):
+def _z_velocity(field,data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
 add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
@@ -251,20 +243,63 @@
 
 #Particle fields
 
+def ParticleMass(field,data):
+    return data['particle_mass']
+add_field("ParticleMass",function=ParticleMass,units=r"\rm{g}",particle_type=True)
+
+
 #Derived particle fields
 
+def ParticleMassMsun(field,data):
+    return data['particle_mass']*data.pf['Msun']
+add_field("ParticleMassMsun",function=ParticleMassMsun,units=r"\rm{g}",particle_type=True)
+
+def _creation_time(field,data):
+    pa = data["particle_age"]
+    tr = np.zeros(pa.shape,dtype='float')-1.0
+    tr[pa>0] = pa[pa>0]
+    return tr
+add_field("creation_time",function=_creation_time,units=r"\rm{s}",particle_type=True)
+
 def mass_dm(field, data):
+    tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
-    tr  = data["Ones"] #create a grid in the right size
     if np.sum(idx)>0:
-        tr /= np.prod(tr.shape) #divide by the volume
-        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
+        print tr.shape
         return tr
     else:
-        return tr*0.0
+        return tr*1e-9
 
-add_field("particle_cell_mass_dm", function=mass_dm,
-          validators=[ValidateSpatial(0)])
+add_field("particle_cell_mass_dm", function=mass_dm, units = r"\mathrm{M_{sun}}",
+        validators=[ValidateSpatial(0)],        
+        take_log=False,
+        projection_conversion="1")
 
+def _spdensity(field, data):
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
+    if data.star_mass.shape[0] ==0 : return grid_mass 
+    amr_utils.CICDeposit_3(data.star_position_x,
+                           data.star_position_y,
+                           data.star_position_z,
+                           data.star_mass.astype('float32'),
+                           data.star_mass.shape[0],
+                           grid_mass, 
+                           np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    return grid_mass 
+
+#add_field("star_density", function=_spdensity,
+#          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+
+def _simple_density(field,data):
+    mass = np.sum(data.star_mass)
+    volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
+    return mass/volume
+
+add_field("star_density", function=_simple_density,
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity)







https://bitbucket.org/yt_analysis/yt/changeset/d45af85d7404/
changeset:   d45af85d7404
branch:      yt
user:        juxtaposicion
date:        2012-11-27 00:35:12
summary:     reverting rockstar to stephen's commits.
reverting setup.cfg
affected #:  3 files

diff -r 45d9b67f03daf4bec6ee21870dcad916993fb976 -r d45af85d740408f353dd3d088b1edfc5c21d92e3 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,6 +6,4 @@
 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
-#with-answer-testing=1
-#answer-compare=gold001
+with-xunit=1
\ No newline at end of file


diff -r 45d9b67f03daf4bec6ee21870dcad916993fb976 -r d45af85d740408f353dd3d088b1edfc5c21d92e3 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -26,23 +26,141 @@
 from yt.mods import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
+from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
+from yt.config import ytcfg
 
-from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
 import rockstar_interface
+
 import socket
 import time
+import threading
+import signal
+import os
+from os import environ
+from os import mkdir
+from os import path
 
-class DomainDecomposer(ParallelAnalysisInterface):
-    def __init__(self, pf, comm):
-        ParallelAnalysisInterface.__init__(self, comm=comm)
-        self.pf = pf
-        self.hierarchy = pf.h
-        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
+# Get some definitions from Rockstar directly.
+if "ROCKSTAR_DIR" in os.environ:
+    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
+elif os.path.exists("rockstar.cfg"):
+    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
+else:
+    print "Reading Rockstar location from rockstar.cfg failed."
+    print "Please place the base directory of your"
+    print "Rockstar install in rockstar.cfg and restart."
+    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
+    sys.exit(1)
+lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
+READER_TYPE = None
+WRITER_TYPE = None
+for line in lines:
+    if "READER_TYPE" in line:
+        line = line.split()
+        READER_TYPE = int(line[-1])
+    if "WRITER_TYPE" in line:
+        line = line.split()
+        WRITER_TYPE = int(line[-1])
+    if READER_TYPE != None and WRITER_TYPE != None:
+        break
+lines.close()
 
-    def decompose(self):
-        dd = self.pf.h.all_data()
-        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
-        return data_source
+class InlineRunner(ParallelAnalysisInterface):
+    def __init__(self, num_writers):
+        # If this is being run inline, num_readers == comm.size, always.
+        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
+        if num_writers is None:
+            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
+        else:
+            self.num_writers = min(num_writers,
+                ytcfg.getint("yt", "__global_parallel_size"))
+
+    def split_work(self, pool):
+        avail = range(pool.comm.size)
+        self.writers = []
+        self.readers = []
+        # If we're inline, everyone is a reader.
+        self.readers = avail[:]
+        if self.num_writers == pool.comm.size:
+            # And everyone is a writer!
+            self.writers = avail[:]
+        else:
+            # Everyone is not a writer.
+            # Cyclically assign writers which should approximate
+            # memory load balancing (depending on the mpirun call,
+            # but this should do it in most cases).
+            stride = int(ceil(float(pool.comm.size) / self.num_writers))
+            while len(self.writers) < self.num_writers:
+                self.writers.extend(avail[::stride])
+                for r in readers:
+                    avail.pop(avail.index(r))
+
+    def run(self, handler, pool):
+        # If inline, we use forks.
+        server_pid = 0
+        # Start a server on only one machine/fork.
+        if pool.comm.rank == 0:
+            server_pid = os.fork()
+            if server_pid == 0:
+                handler.start_server()
+                os._exit(0)
+        # Start writers.
+        writer_pid = 0
+        if pool.comm.rank in self.writers:
+            time.sleep(0.1 + pool.comm.rank/10.0)
+            writer_pid = os.fork()
+            if writer_pid == 0:
+                handler.start_client(WRITER_TYPE)
+                os._exit(0)
+        # Start readers, not forked.
+        if pool.comm.rank in self.readers:
+            time.sleep(0.1 + pool.comm.rank/10.0)
+            handler.start_client(READER_TYPE)
+        # Make sure the forks are done, which they should be.
+        if writer_pid != 0:
+            os.waitpid(writer_pid, 0)
+        if server_pid != 0:
+            os.waitpid(server_pid, 0)
+
+class StandardRunner(ParallelAnalysisInterface):
+    def __init__(self, num_readers, num_writers):
+        self.num_readers = num_readers
+        if num_writers is None:
+            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
+                - num_readers - 1
+        else:
+            self.num_writers = min(num_writers,
+                ytcfg.getint("yt", "__global_parallel_size"))
+        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
+                "__global_parallel_size"):
+            mylog.error('%i reader + %i writers != %i mpi',
+                    self.num_readers, self.num_writers,
+                    ytcfg.getint("yt", "__global_parallel_size"))
+            raise RuntimeError
+    
+    def split_work(self, pool):
+        # Who is going to do what.
+        avail = range(pool.comm.size)
+        self.writers = []
+        self.readers = []
+        # If we're not running inline, rank 0 should be removed immediately.
+        avail.pop(0)
+        # Now we assign the rest.
+        for i in range(self.num_readers):
+            self.readers.append(avail.pop(0))
+        for i in range(self.num_writers):
+            self.writers.append(avail.pop(0))
+    
+    def run(self, handler, pool):
+        # Not inline so we just launch them directly from our MPI threads.
+        if pool.comm.rank == 0:
+            handler.start_server()
+        if pool.comm.rank in self.readers:
+            time.sleep(0.1 + pool.comm.rank/10.0)
+            handler.start_client(READER_TYPE)
+        if pool.comm.rank in self.writers:
+            time.sleep(0.2 + pool.comm.rank/10.0)
+            handler.start_client(WRITER_TYPE)
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
@@ -65,23 +183,30 @@
             The number of reader can be increased from the default
             of 1 in the event that a single snapshot is split among
             many files. This can help in cases where performance is
-            IO-limited. Default is 1.
+            IO-limited. Default is 1. If run inline, it is
+            equal to the number of MPI threads.
         num_writers: int
             The number of writers determines the number of processing threads
             as well as the number of threads writing output data.
-            The default is set comm.size-num_readers-1.
+            The default is set to comm.size-num_readers-1. If run inline,
+            the default is equal to the number of MPI threads.
         outbase: str
             This is where the out*list files that Rockstar makes should be
-            placed. Default is str(pf)+'_rockstar'.
+            placed. Default is 'rockstar_halos'.
         particle_mass: float
             This sets the DM particle mass used in Rockstar.
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: None
-            The default force resolution is 0.0012 comoving Mpc/H
-            This overrides Rockstars' defaults
-
+        force_res: float
+            This parameter specifies the force resolution that Rockstar uses
+            in units of Mpc/h.
+            If no value is provided, this parameter is automatically set to
+            the width of the smallest grid element in the simulation from the
+            last data snapshot (i.e. the one where time has evolved the
+            longest) in the time series:
+            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+            
         Returns
         -------
         None
@@ -93,7 +218,6 @@
 
         test_rockstar.py:
 
-        from mpi4py import MPI
         from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
         from yt.mods import *
         import sys
@@ -105,50 +229,64 @@
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()
         """
-        ParallelAnalysisInterface.__init__(self)
-        # No subvolume support
-        #we assume that all of the snapshots in the time series
-        #use the same domain info as the first snapshots
+        # Decide how we're working.
+        if ytcfg.getboolean("yt", "inline") == True:
+            self.runner = InlineRunner(num_writers)
+        else:
+            self.runner = StandardRunner(num_readers, num_writers)
+        self.num_readers = self.runner.num_readers
+        self.num_writers = self.runner.num_writers
+        mylog.info("Rockstar is using %d readers and %d writers",
+            self.num_readers, self.num_writers)
+        # Note that Rockstar does not support subvolumes.
+        # We assume that all of the snapshots in the time series
+        # use the same domain info as the first snapshots.
         if not isinstance(ts,TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
-        if self.comm.size > 1: 
-            self.comm.barrier()            
         tpf = ts.__iter__().next()
+        def _particle_count(field, data):
+            try:
+                return (data["particle_type"]==dm_type).sum()
+            except KeyError:
+                return np.prod(data["particle_position_x"].shape)
+        add_field("particle_count",function=_particle_count, not_in_all=True,
+            particle_type=True)
+        # Get total_particles in parallel.
         dd = tpf.h.all_data()
+        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        data_source = tpf.h.all_data()
         if outbase is None:
-            outbase = str(tpf)+'_rockstar'
-        self.outbase = outbase        
-        if num_writers is None:
-            num_writers = self.comm.size - num_readers -1
-        self.num_readers = num_readers
-        self.num_writers = num_writers
-        if self.num_readers + self.num_writers + 1 != self.comm.size:
-            #we need readers+writers+1 server = comm size        
-            raise RuntimeError
+            outbase = 'rockstar_halos'
+        self.outbase = outbase
+        self.particle_mass = particle_mass
+        if force_res is None:
+            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
+        else:
+            self.force_res = force_res
+        self.left_edge = tpf.domain_left_edge
+        self.right_edge = tpf.domain_right_edge
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        data_source = tpf.h.all_data()
-        self.comm.barrier()
-        self.force_res = force_res
-        def _pcount(field,data):
-            return (data["particle_type"]=dm_type).sum()
-        add_field("pcount",function=_pcount,particle_type=True)
-        total_particles = dd.quantities['TotalQuantity']('pcount')
-        self.total_particles = total_particles
-        mylog.info("Found %i halo particles",total_particles)
+        # We set up the workgroups *before* initializing
+        # ParallelAnalysisInterface. Everyone is their own workgroup!
+        self.pool = ProcessorPool()
+        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
+             self.pool.add_workgroup(size=1)
+        ParallelAnalysisInterface.__init__(self)
+        for wg in self.pool.workgroups:
+            if self.pool.comm.rank in wg.ranks:
+                self.workgroup = wg
         self.handler = rockstar_interface.RockstarInterface(
-                self.ts, data_source)
+                self.ts, dd)
 
     def __del__(self):
         self.pool.free_all()
 
     def _get_hosts(self):
-        if self.comm.size == 1 or self.workgroup.name == "server":
+        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -156,7 +294,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.comm.mpi_bcast(
+        self.server_address, self.port = self.pool.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -164,21 +302,13 @@
         """
         
         """
-        if self.comm.size > 1:
-            self.pool = ProcessorPool()
-            mylog.debug("Num Writers = %s Num Readers = %s",
-                        self.num_writers, self.num_readers)
-            self.pool.add_workgroup(1, name = "server")
-            self.pool.add_workgroup(self.num_readers, name = "readers")
-            self.pool.add_workgroup(self.num_writers, name = "writers")
-            for wg in self.pool.workgroups:
-                if self.comm.rank in wg.ranks: self.workgroup = wg
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
         self.handler.setup_rockstar(self.server_address, self.port,
-                    len(self.ts), self.total_particles, self.dm_type,
-                    parallel = self.comm.size > 1,
+                    len(self.ts), self.total_particles, 
+                    self.dm_type,
+                    parallel = self.pool.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
@@ -187,27 +317,29 @@
                     force_res=self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
-        #because rockstar *always* write to exactly the same
-        #out_0.list filename we make a directory for it
-        #to sit inside so it doesn't get accidentally
-        #overwritten 
-        if self.workgroup.name == "server":
+        # Make the directory to store the halo lists in.
+        if self.pool.comm.rank == 0:
             if not os.path.exists(self.outbase):
                 os.mkdir(self.outbase)
-        if self.comm.size == 1:
+            # Make a record of which dataset corresponds to which set of
+            # output files because it will be easy to lose this connection.
+            fp = open(self.outbase + '/pfs.txt', 'w')
+            fp.write("# pfname\tindex\n")
+            for i, pf in enumerate(self.ts):
+                pfloc = path.join(path.relpath(pf.fullpath), pf.basename)
+                line = "%s\t%d\n" % (pfloc, i)
+                fp.write(line)
+            fp.close()
+        # This barrier makes sure the directory exists before it might be used.
+        self.pool.comm.barrier()
+        if self.pool.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            self.comm.barrier()
-            if self.workgroup.name == "server":
-                self.handler.start_server()
-            elif self.workgroup.name == "readers":
-                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
-                self.handler.start_client()
-            elif self.workgroup.name == "writers":
-                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
-                self.handler.start_client()
-            self.pool.free_all()
-        self.comm.barrier()
+            # Split up the work.
+            self.runner.split_work(self.pool)
+            # And run it!
+            self.runner.run(self.handler, self.pool)
+        self.pool.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
@@ -215,5 +347,4 @@
         Reads in the out_0.list file and generates RockstarHaloList
         and RockstarHalo objects.
         """
-        tpf = self.ts[0]
-        return RockstarHaloList(tpf,file_name)
+        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)


diff -r 45d9b67f03daf4bec6ee21870dcad916993fb976 -r d45af85d740408f353dd3d088b1edfc5c21d92e3 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -29,6 +29,8 @@
 cimport cython
 from libc.stdlib cimport malloc
 
+from yt.config import ytcfg
+
 cdef import from "particle.h":
     struct particle:
         np.int64_t id
@@ -44,11 +46,11 @@
 cdef import from "config.h":
     void setup_config()
 
-cdef import from "server.h":
+cdef import from "server.h" nogil:
     int server()
 
-cdef import from "client.h":
-    void client()
+cdef import from "client.h" nogil:
+    void client(np.int64_t in_type)
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
@@ -237,26 +239,54 @@
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
-    global SCALE_NOW, TOTAL_PARTICLES
-    pf = rh.tsl.next()
-    print 'reading from particle filename %s: %s'%(filename,pf.basename)
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
+    global SCALE_NOW
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     block = int(str(filename).rsplit(".")[-1])
+    n = rh.block_ratio
+
+    all_grids = pf.h.grids
+    SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+    # Now we want to grab data from only a subset of the grids for each reader.
+    if NUM_BLOCKS == 1:
+        grids = all_grids
+    else:
+        if ytcfg.getboolean("yt", "inline") == False:
+            fnames = np.array([g.filename for g in all_grids])
+            sort = fnames.argsort()
+            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
+        else:
+            # We must be inline, grap only the local grids.
+            grids  = [g for g in all_grids if g.proc_num ==
+                          ytcfg.getint('yt','__topcomm_parallel_rank')]
     
+    all_fields = set(pf.h.derived_field_list + pf.h.field_list)
 
-    # Now we want to grab data from only a subset of the grids.
-    n = rh.block_ratio
-    dd = pf.h.all_data()
-    SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
-    tnpart = 0
-    for g in grids:
-        tnpart += np.sum(dd._get_data_from_grid(g, "particle_type")==rh.dm_type)
-    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
-    #print "Loading indices: size = ", tnpart
+    # First we need to find out how many this reader is going to read in
+    # if the number of readers > 1.
+    if NUM_BLOCKS > 1:
+        local_parts = 0
+        for g in grids:
+            if g.NumberOfParticles == 0: continue
+            if "particle_type" in all_fields:
+                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+                iddm = g["particle_type"] == rh.dm_type
+            else:
+                iddm = Ellipsis
+            arri = g["particle_index"].astype("int64")
+            arri = arri[iddm] #pick only DM
+            local_parts += arri.size
+    else:
+        local_parts = TOTAL_PARTICLES
+
+    #print "local_parts", local_parts
+
+    p[0] = <particle *> malloc(sizeof(particle) * local_parts)
+
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
     left_edge[0] = pf.domain_left_edge[0]
@@ -265,8 +295,12 @@
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
-        iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
-        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+        if g.NumberOfParticles == 0: continue
+        if "particle_type" in all_fields:
+            iddm = g["particle_type"] == rh.dm_type
+        else:
+            iddm = Ellipsis
+        arri = g["particle_index"].astype("int64")
         arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
@@ -276,22 +310,13 @@
                       "particle_position_z",
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
-            arr = dd._get_data_from_grid(g, field).astype("float64")
+            arr = g[field].astype("float64")
             arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
-    num_p[0] = tnpart
-    TOTAL_PARTICLES = tnpart
-    #print 'first particle coordinates'
-    #for i in range(3):
-    #    print p[0][0].pos[i],
-    #print ""
-    #print 'last particle coordinates'
-    #for i in range(3):
-    #    print p[0][tnpart-1].pos[i],
-    #print ""
+    num_p[0] = local_parts
 
 cdef class RockstarInterface:
 
@@ -323,10 +348,10 @@
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-        global OVERLAP_LENGTH, FORCE_RES
+        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
-            print "set force res to ",FORCE_RES
+            #print "set force res to ",FORCE_RES
         OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
@@ -367,6 +392,7 @@
                     tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
+        rh.dm_type = dm_type
         cdef LPG func = rh_read_particles
         set_load_particles_generic(func)
 
@@ -376,7 +402,9 @@
         output_and_free_halos(0, 0, 0, NULL)
 
     def start_server(self):
-        server()
+        with nogil:
+            server()
 
-    def start_client(self):
-        client()
+    def start_client(self, in_type):
+        in_type = np.int64(in_type)
+        client(in_type)



https://bitbucket.org/yt_analysis/yt/changeset/fdc4c1e77d6c/
changeset:   fdc4c1e77d6c
branch:      yt
user:        Christopher Moody
date:        2012-11-27 05:51:01
summary:     finished removing references to _setup_aprticle_grids
affected #:  1 file

diff -r d45af85d740408f353dd3d088b1edfc5c21d92e3 -r fdc4c1e77d6c10ffce7ff768b734c1163a0e2024 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -146,8 +146,6 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
-        if not self.pf.skip_particles:
-            self._setup_particle_grids()
         self._setup_field_list()
 
     def _initialize_data_storage(self):



https://bitbucket.org/yt_analysis/yt/changeset/01662d3efcd3/
changeset:   01662d3efcd3
branch:      yt
user:        Christopher Moody
date:        2012-11-27 06:05:11
summary:     adding particle_index field
affected #:  1 file

diff -r fdc4c1e77d6c10ffce7ff768b734c1163a0e2024 -r 01662d3efcd365c5477bb939ee376164622c982e yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -444,6 +444,7 @@
                                     particle_field)
                         g.NumberOfParticles = source.shape[0]
                         setattr(g,particle_field,source)
+                g.particle_index = np.arange(g.NumberOfParticles)
         pb.finish()
         self.max_level = self.grid_levels.max()
 



https://bitbucket.org/yt_analysis/yt/changeset/bd0ac35d628e/
changeset:   bd0ac35d628e
branch:      yt
user:        MatthewTurk
date:        2012-11-27 11:17:43
summary:     Merged in juxtaposicion/yt-dev (pull request #247)
affected #:  10 files







diff -r 1597098bb078178b6569c3e37c345cc46bc58351 -r bd0ac35d628e271f53e041fa253d79af12b3ae0e yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,16 +33,11 @@
 
 import time
 import numpy as np
-import numpy.linalg as linalg
-import collections
-
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
-debug = True
-
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
@@ -77,7 +72,6 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-
     fc = np.array(fc)
     fwidth = np.array(fwidth)
     
@@ -95,7 +89,7 @@
     #Create a list of the star particle properties in PARTICLE_DATA
     #Include ID, parent-ID, position, velocity, creation_mass, 
     #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+    particle_data,nstars = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
                                            dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
@@ -109,7 +103,7 @@
 
     create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    return fle,fre,ile,ire,dd,nleaf
+    return fle,fre,ile,ire,dd,nleaf,nstars
 
 def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
                                         halo_list,domains_list=None,**kwargs):
@@ -193,17 +187,23 @@
     domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
-def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
-    add_fields() #add the metal mass field that sunrise wants
+def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
+    try:
+        dd['MetalMass']
+    except KeyError:
+        add_fields() #add the metal mass field that sunrise wants
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
     
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    if dd is None:
-        #we keep passing dd around to not regenerate the data all the time
-        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -251,6 +251,7 @@
     output   = np.zeros((o_length,len(fields)), dtype='float64')
     refined  = np.zeros(r_length, dtype='int32')
     levels   = np.zeros(r_length, dtype='int32')
+    ids      = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -259,7 +260,7 @@
             c = center*pf['kpc']
         else:
             c = ile*1.0/pf.domain_dimensions*pf['kpc']
-        printing = lambda x: print_oct(x,pf['kpc'],c)
+        printing = lambda x: print_oct(x)
     else:
         printing = None
     pbar = get_pbar("Building Hilbert DFO octree",len(refined))
@@ -271,6 +272,7 @@
             output,refined,levels,
             grids,
             start_level,
+            ids,
             debug=printing,
             tracker=pbar)
     pbar.finish()
@@ -278,6 +280,7 @@
     #for the next spot, so we're off by 1
     print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    print 'first few entries :',refined[:12]
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
@@ -287,6 +290,7 @@
     ci = data['cell_index']
     l  = data['level']
     g  = data['grid']
+    o  = g.offset
     fle = g.left_edges+g.dx*ci
     fre = g.left_edges+g.dx*(ci+1)
     if nd is not None:
@@ -295,12 +299,14 @@
         if nc is not None:
             fle -= nc
             fre -= nc
-    txt  = '%1i '
-    txt += '%1.3f '*3+'- '
-    txt += '%1.3f '*3
-    print txt%((l,)+tuple(fle)+tuple(fre))
+    txt  = '%+1i '
+    txt += '%+1i '
+    txt += '%+1.3f '*3+'- '
+    txt += '%+1.3f '*3
+    if l<2:
+        print txt%((l,)+(o,)+tuple(fle)+tuple(fre))
 
-def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index]
                             pos, #the output hydro data position and refinement position
                             grid,  #grid that this oct lives on (not its children)
                             hilbert,  #the hilbert state
@@ -309,6 +315,7 @@
                             levels, #For a given Oct, what is the level
                             grids, #list of all patch grids available to us
                             level, #starting level of the oct (not the children)
+                            ids, #record the oct ID
                             debug=None,tracker=True):
     if tracker is not None:
         if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
@@ -316,16 +323,19 @@
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    refined[pos.refined_pos] = child_grid_index!=-1
-    levels[pos.output_pos]  = level
+    levels[pos.refined_pos]  = level
+    is_leaf = (child_grid_index==-1) and (level>0)
+    refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
+    ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf
     pos.refined_pos+= 1 
-    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+    if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
         for field_index in range(grid.fields.shape[0]):
             output[pos.output_pos,field_index] = \
                     grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
         pos.output_pos+= 1 
     else:
+        assert child_grid_index>-1
         #find the grid we descend into
         #then find the eight cells we break up into
         subgrid = grids[child_grid_index]
@@ -338,18 +348,21 @@
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + vertex*2**(-level)
+                #child_ile = cell_index + np.array(vertex)*2**(-level)
+                child_ile = cell_index + np.array(vertex)*2**(-(level+1))
+                child_ile = child_ile.astype('int')
             else:
                 child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
+
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
-                    debug=debug,tracker=tracker)
+                subgrid,hilbert_child,output,refined,levels,grids,
+                level+1,ids = ids,
+                debug=debug,tracker=tracker)
 
 
 
 def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
-
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
@@ -360,8 +373,6 @@
     for i,a in enumerate('xyz'):
         st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
         st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
@@ -457,6 +468,7 @@
             #quit if idxq is true:
             idxq = idx[0]>0 and np.all(idx==idx[0])
             out  = np.all(fle>cfle) and np.all(fre<cfre) 
+            out &= abs(np.log2(idx[0])-np.rint(np.log2(idx[0])))<1e-5 #nwide should be a power of 2
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
@@ -495,11 +507,15 @@
                           dd=None):
     if dd is None:
         dd = pf.h.all_data()
-    idx = dd["particle_type"] == star_type
+    idxst = dd["particle_type"] == star_type
+
+    #make sure we select more than a single particle
+    assert na.sum(idxst)>0
     if pos is None:
         pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    idx = idxst & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    assert np.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
@@ -518,8 +534,7 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
-        #metallicity *=0.0198
-        #print 'WARNING: multiplying metallicirt by 0.0198'
+        assert np.all(metallicity>0.0)
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
     formation_time = pf.current_time*pf['years']-age
@@ -534,19 +549,19 @@
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
     col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
-    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
     pd_table = pyfits.new_table(cols)
     pd_table.name = "PARTICLEDATA"
-    return pd_table
+    
+    #make sure we have nonzero particle number
+    assert pd_table.data.shape[0]>0
+    return pd_table,na.sum(idx)
 
 
 def add_fields():
@@ -556,10 +571,8 @@
         
     def _convMetalMass(data):
         return 1.0
-    
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
-
     def _initial_mass_cen_ostriker(field, data):
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
@@ -576,9 +589,6 @@
 
     add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-    def _temp_times_mass(field, data):
-        return data["Temperature"]*data["CellMassMsun"]
-    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
 class position:
     def __init__(self):
@@ -668,254 +678,3 @@
         j+=1
         yield vertex, self.descend(j)
 
-def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
-    if cameraset is None:
-        cameraset =cameraset_vertex 
-    campos =[]
-    names = []
-    dd = pf.h.all_data()
-    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
-        kwargs['scene_position']=scene_pos
-        kwargs['scene_up']=scene_up
-        kwargs['scene_rot']=scene_rot
-        kwargs['dd']=dd
-        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
-        campos += line,
-        names += name,
-    return names,campos     
-
-def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
-                                     sim_sphere_radius=None,sim_halo_radius=None,
-                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
-                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
-                                     dd=None):
-    """Translate the simulation to center on sim_center, 
-    then rotate such that sim_up is along the +z direction. Then we are in the 
-    'scene' basis coordinates from which scene_up and scene_offset are defined.
-    Then a position vector, direction vector, up vector and angular field of view
-    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
-    The angular field of view is in radians. The 10 numbers should match the inputs to
-    camera_positions in Sunrise.
-    """
-
-    sim_center = np.array(sim_center)
-    if sim_sphere_radius is None:
-        sim_sphere_radius = 10.0/pf['kpc']
-    if sim_axis_short is None:
-        if dd is None:
-            dd = pf.h.all_data()
-        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
-        mas = dd["particle_mass"]
-        pos = pos[idx]
-        mas = mas[idx]
-        mo_inertia = position_moment(pos,mas)
-        eigva, eigvc = linalg.eig(mo_inertia)
-        #order into short, long axes
-        order = eigva.real.argsort()
-        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
-    else:
-        ax_short = sim_axis_short
-        ax_long  = sim_axis_long
-    if sim_halo_radius is None:
-        sim_halo_radius = 200.0/pf['kpc']
-    if scene_distance is  None:
-        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
-    if scene_fov is None:
-        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
-        #idx= radii < sim_halo_radius*0.10
-        #radii = radii[idx]
-        #mass  = mas[idx] #copying mass into mas
-        si = np.argsort(radii)
-        radii = radii[si]
-        mass  = mas[si]
-        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
-        re = radii[idx[0]]
-        scene_fov = 5*re
-        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
-        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
-    #find rotation matrix
-    angles=find_half_euler_angles(ax_short,ax_long)
-    rotation  = euler_matrix(*angles)
-    irotation = numpy.linalg.inv(rotation)
-    axs = (ax_short,ax_med,ax_long)
-    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
-    axs = ([1,0,0],[0,1,0],[0,0,1])
-    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
-    
-    #rotate the camera
-    if scene_rot :
-        irotation = np.eye(3)
-    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
-    sunrise_up  = matmul(irotation,scene_up)
-    sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
-
-    #change to physical kpc
-    sunrise_pos *= pf['kpc']
-    sunrise_direction *= pf['kpc']
-    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
-
-def matmul(m, v):
-    """Multiply a matrix times a set of vectors, or a single vector.
-    My nPart x nDim convention leads to two transpositions, which is
-    why this is hidden away in a function.  Note that if you try to
-    use this to muliply two matricies, it will think that you're
-    trying to multiply by a set of vectors and all hell will break
-    loose."""    
-    assert type(v) is not np.matrix
-    v = np.asarray(v)
-    m, vs = [np.asmatrix(a) for a in (m, v)]
-
-    result = np.asarray(np.transpose(m * np.transpose(vs)))    
-    if len(v.shape) == 1:
-        return result[0]
-    return result
-
-
-def mag(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return np.sqrt( (vs**2).sum() )
-    return np.sqrt( (vs**2).sum(axis=1) )
-
-def mag2(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return (vs**2).sum()
-    return (vs**2).sum(axis=1)
-
-
-def position_moment(rs, ms=None, axes=None):
-    """Find second position moment tensor.
-    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = np.asarray(rs)
-    Npart, N = rs.shape
-    if ms is None: ms = np.ones(Npart)
-    else: ms = np.asarray(ms)    
-    if axes is not None:
-        axes = np.asarray(axes,dtype=float64)
-        axes = axes/axes.max()
-        norms2 = mag2(rs/axes)
-    else:
-        norms2 = np.ones(Npart)
-    M = ms.sum()
-    result = np.zeros((N,N))
-    # matrix is symmetric, so only compute half of it then fill in the
-    # other half
-    for i in range(N):
-        for j in range(i+1):
-            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
-        
-    result = result + result.transpose() - np.identity(N)*result
-    return result
-    
-
-
-def find_half_euler_angles(v,w,check=True):
-    """Find the passive euler angles that will make v lie along the z
-    axis and w lie along the x axis.  v and w are uncertain up to
-    inversions (ie, eigenvectors) so this routine removes degeneracies
-    associated with that
-
-    (old) Calculate angles to bring a body into alignment with the
-    coordinate system.  If v1 is the SHORTEST axis and v2 is the
-    LONGEST axis, then this will return the angle (Euler angles) to
-    make the long axis line up with the x axis and the short axis line
-    up with the x (z) axis for the 2 (3) dimensional case."""
-    # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: np.sqrt(np.sum(x**2.0))
-    v = v/mag(v)
-    w = w/mag(w)    
-    if check:
-        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
-
-    # Break eigenvector scaling degeneracy by forcing it to have a positive
-    # z component
-    if v[2] < 0: v = -v
-    phi,theta = find_euler_phi_theta(v)
-
-    # Rotate w according to phi,theta and then break inversion
-    # degeneracy by requiring that resulting vector has positive
-    # x component
-    w_prime = euler_passive(w,phi,theta,0.)
-    if w_prime[0] < 0: w_prime = -w_prime
-    # Now last Euler angle should just be this:
-    psi = np.arctan2(w_prime[1],w_prime[0])
-    return phi, theta, psi
-
-def find_euler_phi_theta(v):
-    """Find (passive) euler angles that will make v point in the z
-    direction"""
-    # Make sure the vector is normalized
-    v = v/mag(v)
-    theta = np.arccos(v[2])
-    phi = np.arctan2(v[0],-v[1])
-    return phi,theta
-
-def euler_matrix(phi, the, psi):
-    """Make an Euler transformation matrix"""
-    cpsi=np.cos(psi)
-    spsi=np.sin(psi)
-    cphi=np.cos(phi)
-    sphi=np.sin(phi)
-    cthe=np.cos(the)
-    sthe=np.sin(the)
-    m = np.mat(np.zeros((3,3)))
-    m[0,0] = cpsi*cphi - cthe*sphi*spsi
-    m[0,1] = cpsi*sphi + cthe*cphi*spsi
-    m[0,2] = spsi*sthe
-    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
-    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
-    m[1,2] = cpsi*sthe
-    m[2,0] = sthe*sphi
-    m[2,1] = -sthe*cphi
-    m[2,2] = cthe
-    return m
-
-def euler_passive(v, phi, the, psi):
-    """Passive Euler transform"""
-    m = euler_matrix(phi, the, psi)
-    return matmul(m,v)
-
-
-#the format for these camerasets is name,up vector,camera location, 
-#rotate to the galaxy's up direction?
-cameraset_compass = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
-    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
-    ])
-
-cameraset_vertex = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
-    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
-    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
-    ])
-
-#up is 45deg down from z, towards north
-#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
-#up is -45deg down from z, towards north
-
-cameraset_ring = collections.OrderedDict()
-
-segments = 20
-for angle in np.linspace(0,360,segments):
-    pos = [np.cos(angle),0.,np.sin(angle)]
-    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
-    cameraset_ring['02i'%angle]=(pos,vc)
-            
-
-


diff -r 1597098bb078178b6569c3e37c345cc46bc58351 -r bd0ac35d628e271f53e041fa253d79af12b3ae0e yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -78,7 +78,7 @@
         raise AttributeError(attr)
 
 class TimeSeriesData(object):
-    def __init__(self, outputs, parallel = True):
+    def __init__(self, outputs, parallel = True ,**kwargs):
         r"""The TimeSeriesData object is a container of multiple datasets,
         allowing easy iteration and computation on them.
 
@@ -107,12 +107,13 @@
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
         self.parallel = parallel
+        self.kwargs = kwargs
 
     def __iter__(self):
         # We can make this fancier, but this works
         for o in self._pre_outputs:
             if isinstance(o, types.StringTypes):
-                yield load(o)
+                yield load(o,**self.kwargs)
             else:
                 yield o
 
@@ -124,7 +125,7 @@
             return TimeSeriesData(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
         if isinstance(o, types.StringTypes):
-            o = load(o)
+            o = load(o,**self.kwargs)
         return o
 
     def __len__(self):
@@ -223,7 +224,7 @@
         return [v for k, v in sorted(return_values.items())]
 
     @classmethod
-    def from_filenames(cls, filenames, parallel = True):
+    def from_filenames(cls, filenames, parallel = True, **kwargs):
         r"""Create a time series from either a filename pattern or a list of
         filenames.
 
@@ -258,12 +259,9 @@
 
         """
         if isinstance(filenames, types.StringTypes):
-            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
-            if len(filenames) == 0:
-                raise YTNoFilenamesMatchPattern(pattern)
-        obj = cls(filenames[:], parallel = parallel)
+        obj = cls(filenames[:], parallel = parallel, **kwargs)
         return obj
 
     @classmethod


diff -r 1597098bb078178b6569c3e37c345cc46bc58351 -r bd0ac35d628e271f53e041fa253d79af12b3ae0e yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Christopher Moody <cemoody at ucsc.edu>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -18,17 +20,16 @@
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-
+.
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
 import numpy as np
+import os.path
+import glob
 import stat
 import weakref
-import cPickle
-import os
-import struct
+import cStringIO
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -42,64 +43,65 @@
 from .fields import \
     ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-try:
-    import yt.frontends.ramses._ramses_reader as _ramses_reader
-except ImportError:
-    _ramses_reader = None
+from .definitions import *
+from io import _read_child_mask_level
+from io import read_particles
+from io import read_stars
+from io import spread_ages
+from io import _count_art_octs
+from io import _read_art_level_info
+from io import _read_art_child
+from io import _skip_record
+from io import _read_record
+from io import _read_frecord
+from io import _read_record_size
+from io import _read_struct
+from io import b2t
 
+
+import yt.frontends.ramses._ramses_reader as _ramses_reader
+
+from .fields import ARTFieldInfo, KnownARTFields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
-from yt.frontends.art.definitions import art_particle_field_names
-
-from yt.frontends.art.io import _read_child_mask_level
-from yt.frontends.art.io import read_particles
-from yt.frontends.art.io import read_stars
-from yt.frontends.art.io import _count_art_octs
-from yt.frontends.art.io import _read_art_level_info
-from yt.frontends.art.io import _read_art_child
-from yt.frontends.art.io import _skip_record
-from yt.frontends.art.io import _read_record
-from yt.frontends.art.io import _read_frecord
-from yt.frontends.art.io import _read_record_size
-from yt.frontends.art.io import _read_struct
-from yt.frontends.art.io import b2t
-
-def num_deep_inc(f):
-    def wrap(self, *args, **kwargs):
-        self.num_deep += 1
-        rv = f(self, *args, **kwargs)
-        self.num_deep -= 1
-        return rv
-    return wrap
-
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
+    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
+            child_mask=None,nop=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
-        start_index = props[0]
+        start_index =start_index 
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
         
-        self.LeftEdge = props[0]
-        self.RightEdge = props[1]
-        self.ActiveDimensions = props[2] 
-        #if child_mask is not None:
-        #    self._set_child_mask(child_mask)
+        self.LeftEdge = le
+        self.RightEdge = re
+        self.ActiveDimensions = gd
+        self.NumberOfParticles=nop
+        for particle_field in particle_fields:
+            setattr(self,particle_field,np.array([]))
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
             self.dds = self.Parent[0].dds / self.pf.refine_by
@@ -109,7 +111,8 @@
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
+                = self.dds
 
     def get_global_startindex(self):
         """
@@ -124,381 +127,272 @@
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
                        np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        self.start_index = (start_index*self.pf.refine_by)\
+                           .astype('int64').ravel()
         return self.start_index
 
     def __repr__(self):
         return "ARTGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 class ARTHierarchy(AMRHierarchy):
-
     grid = ARTGrid
     _handle = None
     
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
-        
+
     def _initialize_data_storage(self):
         pass
-
+    
     def _detect_fields(self):
-        # This will need to be generalized to be used elsewhere.
-        self.field_list = [ 'Density','TotalEnergy',
-             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-             'Pressure','Gamma','GasEnergy',
-             'MetalDensitySNII', 'MetalDensitySNIa',
-             'PotentialNew','PotentialOld']
-        self.field_list += art_particle_field_names
-
+        self.field_list = []
+        self.field_list += fluid_fields
+        self.field_list += particle_fields
+        
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
-
+            
     def _count_grids(self):
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
-        
         min_eff = 0.30
-        
         vol_max = 128**3
-        
-        f = open(self.pf.parameter_filename,'rb')
-        
-        
-        (self.pf.nhydro_vars, self.pf.level_info,
-        self.pf.level_oct_offsets, 
-        self.pf.level_child_offsets) = \
-                         _count_art_octs(f, 
-                          self.pf.child_grid_offset,
-                          self.pf.min_level, self.pf.max_level)
-        self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = np.array(self.pf.level_info)        
-        self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
-        self.pf.level_offsets[0] = self.pf.root_grid_offset
-        
-        self.pf.level_art_child_masks = {}
-        cm = self.pf.root_iOctCh>0
-        cm_shape = (1,)+cm.shape 
-        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
-        del cm
-        
-        root_psg = _ramses_reader.ProtoSubgrid(
-                        np.zeros(3, dtype='int64'), # left index of PSG
-                        self.pf.domain_dimensions, # dim of PSG
-                        np.zeros((1,3), dtype='int64'), # left edges of grids
-                        np.zeros((1,6), dtype='int64') # empty
-                        )
-        
-        self.proto_grids = [[root_psg],]
-        for level in xrange(1, len(self.pf.level_info)):
-            if self.pf.level_info[level] == 0:
-                self.proto_grids.append([])
-                continue
-            psgs = []
-            effs,sizes = [], []
-
-            if level > self.pf.limit_level : continue
-            
-            #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
-            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-            
-            #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
-                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
-            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
-            self.pf.level_art_child_masks[level]=art_child_mask
-            #child_mask is zero where child grids exist and
-            #thus where higher resolution data is available
-            
-            
-            #compute the hilbert indices up to a certain level
-            #the indices will associate an oct grid to the nearest
-            #hilbert index?
-            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
-                              np.log10(2))
-            hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level, left_index)
-            #print base_level, hilbert_indices.max(),
-            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-            #print hilbert_indices.max()
-            
-            # Strictly speaking, we don't care about the index of any
-            # individual oct at this point.  So we can then split them up.
-            unique_indices = np.unique(hilbert_indices)
-            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
-                        level, unique_indices.size, hilbert_indices.size)
-            
-            #use the hilbert indices to order oct grids so that consecutive
-            #items on a list are spatially near each other
-            #this is useful because we will define grid patches over these
-            #octs, which are more efficient if the octs are spatially close
-            
-            #split into list of lists, with domains containing 
-            #lists of sub octgrid left indices and an index
-            #referring to the domain on which they live
-            pbar = get_pbar("Calc Hilbert Indices ",1)
-            locs, lefts = _ramses_reader.get_array_indices_lists(
-                        hilbert_indices, unique_indices, left_index, fl)
-            pbar.finish()
-            
-            #iterate over the domains    
-            step=0
-            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
-            psg_eff = []
-            for ddleft_index, ddfl in zip(lefts, locs):
-                #iterate over just the unique octs
-                #why would we ever have non-unique octs?
-                #perhaps the hilbert ordering may visit the same
-                #oct multiple times - review only unique octs 
-                #for idomain in np.unique(ddfl[:,1]):
-                #dom_ind = ddfl[:,1] == idomain
-                #dleft_index = ddleft_index[dom_ind,:]
-                #dfl = ddfl[dom_ind,:]
+        with open(self.pf.parameter_filename,'rb') as f:
+            (self.pf.nhydro_vars, self.pf.level_info,
+            self.pf.level_oct_offsets, 
+            self.pf.level_child_offsets) = \
+                             _count_art_octs(f, 
+                              self.pf.child_grid_offset,
+                              self.pf.min_level, self.pf.max_level)
+            self.pf.level_info[0]=self.pf.ncell
+            self.pf.level_info = np.array(self.pf.level_info)
+            self.pf.level_offsets = self.pf.level_child_offsets
+            self.pf.level_offsets = np.array(self.pf.level_offsets, 
+                                             dtype='int64')
+            self.pf.level_offsets[0] = self.pf.root_grid_offset
+            self.pf.level_art_child_masks = {}
+            cm = self.pf.root_iOctCh>0
+            cm_shape = (1,)+cm.shape 
+            self.pf.level_art_child_masks[0] = \
+                    cm.reshape(cm_shape).astype('uint8')        
+            del cm
+            root_psg = _ramses_reader.ProtoSubgrid(
+                            np.zeros(3, dtype='int64'), # left index of PSG
+                            self.pf.domain_dimensions, # dim of PSG
+                            np.zeros((1,3), dtype='int64'),# left edges of grids
+                            np.zeros((1,6), dtype='int64') # empty
+                            )
+            self.proto_grids = [[root_psg],]
+            for level in xrange(1, len(self.pf.level_info)):
+                if self.pf.level_info[level] == 0:
+                    self.proto_grids.append([])
+                    continue
+                psgs = []
+                effs,sizes = [], []
+                if self.pf.limit_level:
+                    if level > self.pf.limit_level : continue
+                #refers to the left index for the art octgrid
+                left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                        self.pf.level_oct_offsets,level,
+                        coarse_grid=self.pf.domain_dimensions[0])
+                if level>1:
+                    assert root_level == last_root_level
+                last_root_level = root_level
+                #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+                #read in the child masks for this level and save them
+                idc, art_child_mask = _read_child_mask_level(f, 
+                        self.pf.level_child_offsets,
+                    level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+                art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+                self.pf.level_art_child_masks[level]=art_child_mask
+                #child_mask is zero where child grids exist and
+                #thus where higher resolution data is available
+                #compute the hilbert indices up to a certain level
+                #the indices will associate an oct grid to the nearest
+                #hilbert index?
+                base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                                  np.log10(2))
+                hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                        level + base_level, left_index)
+                #print base_level, hilbert_indices.max(),
+                hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+                #print hilbert_indices.max()
+                # Strictly speaking, we don't care about the index of any
+                # individual oct at this point.  So we can then split them up.
+                unique_indices = np.unique(hilbert_indices)
+                mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                            level, unique_indices.size, hilbert_indices.size)
+                #use the hilbert indices to order oct grids so that consecutive
+                #items on a list are spatially near each other
+                #this is useful because we will define grid patches over these
+                #octs, which are more efficient if the octs are spatially close
+                #split into list of lists, with domains containing 
+                #lists of sub octgrid left indices and an index
+                #referring to the domain on which they live
+                pbar = get_pbar("Calc Hilbert Indices ",1)
+                locs, lefts = _ramses_reader.get_array_indices_lists(
+                            hilbert_indices, unique_indices, left_index, fl)
+                pbar.finish()
+                #iterate over the domains    
+                step=0
+                pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+                psg_eff = []
+                for ddleft_index, ddfl in zip(lefts, locs):
+                    #iterate over just the unique octs
+                    #why would we ever have non-unique octs?
+                    #perhaps the hilbert ordering may visit the same
+                    #oct multiple times - review only unique octs 
+                    #for idomain in np.unique(ddfl[:,1]):
+                    #dom_ind = ddfl[:,1] == idomain
+                    #dleft_index = ddleft_index[dom_ind,:]
+                    #dfl = ddfl[dom_ind,:]
+                    dleft_index = ddleft_index
+                    dfl = ddfl
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()
+                    idims +=2
+                    #this creates a grid patch that doesn't cover the whole leve
+                    #necessarily, but with other patches covers all the regions
+                    #with octs. This object automatically shrinks its size
+                    #to barely encompass the octs inside of it.
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    #because grid patches maybe mostly empty, and with octs
+                    #that only partially fill the grid, it may be more efficient
+                    #to split large patches into smaller patches. We split
+                    #if less than 10% the volume of a patch is covered with octs
+                    if idims.prod() > vol_max or psg.efficiency < min_eff:
+                        psg_split = _ramses_reader.recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dfl,min_eff=min_eff,use_center=True,
+                            split_on_vol=vol_max)
+                        psgs.extend(psg_split)
+                        psg_eff += [x.efficiency for x in psg_split] 
+                    else:
+                        psgs.append(psg)
+                        psg_eff =  [psg.efficiency,]
+                    tol = 1.00001
+                    step+=1
+                    pbar.update(step)
+                eff_mean = np.mean(psg_eff)
+                eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
+                eff_nall = len(psg_eff)
+                mylog.info("Average subgrid efficiency %02.1f %%",
+                            eff_mean*100.0)
+                mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                            eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
                 
-                dleft_index = ddleft_index
-                dfl = ddfl
-                initial_left = np.min(dleft_index, axis=0)
-                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
-                #this creates a grid patch that doesn't cover the whole level
-                #necessarily, but with other patches covers all the regions
-                #with octs. This object automatically shrinks its size
-                #to barely encompass the octs inside of it.
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                
-                #because grid patches may still be mostly empty, and with octs
-                #that only partially fill the grid,it  may be more efficient
-                #to split large patches into smaller patches. We split
-                #if less than 10% the volume of a patch is covered with octs
-                if idims.prod() > vol_max or psg.efficiency < min_eff:
-                    psg_split = _ramses_reader.recursive_patch_splitting(
-                        psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=vol_max)
-                    
-                    psgs.extend(psg_split)
-                    psg_eff += [x.efficiency for x in psg_split] 
-                else:
-                    psgs.append(psg)
-                    psg_eff =  [psg.efficiency,]
-                
-                tol = 1.00001
-                
-                
-                step+=1
-                pbar.update(step)
-            eff_mean = np.mean(psg_eff)
-            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
-            eff_nall = len(psg_eff)
-            mylog.info("Average subgrid efficiency %02.1f %%",
-                        eff_mean*100.0)
-            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
-                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            
-        
-            mylog.debug("Done with level % 2i", level)
-            pbar.finish()
-            self.proto_grids.append(psgs)
-            #print sum(len(psg.grid_file_locations) for psg in psgs)
-            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
-            if len(self.proto_grids[level]) == 1: continue
+                mylog.info("Done with level % 2i; max LE %i", level,
+                           np.max(left_index))
+                pbar.finish()
+                self.proto_grids.append(psgs)
+                if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
-                    
-            
-            
-
-    num_deep = 0
-
         
     def _parse_hierarchy(self):
-        """ The root grid has no octs except one which is refined.
-        Still, it is the size of 128 cells along a length.
-        Ignore the proto subgrid created for the root grid - it is wrong.
-        """
         grids = []
         gi = 0
-        
+        dd=self.pf.domain_dimensions
         for level, grid_list in enumerate(self.proto_grids):
-            #The root level spans [0,2]
-            #The next level spans [0,256]
-            #The 3rd Level spans up to 128*2^3, etc.
-            #Correct root level to span up to 128
-            correction=1L
-            if level == 0:
-                correction=64L
+            dds = ((2**level) * dd).astype("float64")
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()*correction
-                dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                props = g.get_properties()
+                start_index = props[0,:]
+                le = props[0,:].astype('float64')/dds
+                re = props[1,:].astype('float64')/dds
+                gd = props[2,:].astype('int64')
+                if level==0:
+                    le = np.zeros(3,dtype='float64')
+                    re = np.ones(3,dtype='float64')
+                    gd = dd
+                self.grid_left_edge[gi,:] = le
+                self.grid_right_edge[gi,:] = re
+                self.grid_dimensions[gi,:] = gd
+                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = np.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,props[0],
+                amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*np.array(correction).astype('int64')))
+                    start_index,le,re,gd))
                 gi += 1
         self.grids = np.empty(len(grids), dtype='object')
-        
-
-        if self.pf.file_particle_data:
+        if not self.pf.skip_particles and self.pf.file_particle_data:
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
-            Nrow     = self.pf.parameters['Nrow']
-            nstars = lspecies[-1]
-            a = self.pf.parameters['aexpn']
-            hubble = self.pf.parameters['hubble']
-            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
-            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
-            um  = self.pf.parameters['aM0'] #mass units in solar masses
-            um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles   ",5)
+            um  = self.pf.conversion_factors['Mass'] #mass units in g
+            uv  = self.pf.conversion_factors['Velocity'] #mass units in g
             self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,nstars,Nrow)
-            pbar.update(1)
-            npa,npb=0,0
-            npb = lspecies[-1]
-            clspecies = np.concatenate(([0,],lspecies))
-            if self.pf.only_particle_type is not None:
-                npb = lspecies[0]
-                if type(self.pf.only_particle_type)==type(5):
-                    npa = clspecies[self.pf.only_particle_type]
-                    npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
-            self.pf.particle_position   = self.pf.particle_position[npa:npb]
-            #do NOT correct by an offset of 1.0
-            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
-            pbar.update(2)
-            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
-            pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+                read_particles(self.pf.file_particle_data,
+                        self.pf.parameters['Nrow'])
+            nparticles = lspecies[-1]
+            if not np.all(self.pf.particle_position[nparticles:]==0.0):
+                mylog.info('WARNING: unused particles discovered from lspecies')
+            self.pf.particle_position = self.pf.particle_position[:nparticles]
+            self.pf.particle_velocity = self.pf.particle_velocity[:nparticles]
+            self.pf.particle_position  /= self.pf.domain_dimensions 
+            self.pf.particle_velocity   = self.pf.particle_velocity
             self.pf.particle_velocity  *= uv #to proper cm/s
-            pbar.update(4)
-            self.pf.particle_type         = np.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
-            
-            dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_species'] = 1.0
-            for ax in 'xyz':
-                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                #already in unitary units
-                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
-            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity']=1.0
-            self.pf.conversion_factors['particle_metallicity1']=1.0
-            self.pf.conversion_factors['particle_metallicity2']=1.0
-            self.pf.conversion_factors['particle_index']=1.0
-            self.pf.conversion_factors['particle_type']=1
-            self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
-            
-
-            a,b=0,0
+            self.pf.particle_star_index = len(wspecies)-1
+            self.pf.particle_type = np.zeros(nparticles,dtype='int')
+            self.pf.particle_mass = np.zeros(nparticles,dtype='float32')
+            a=0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if type(self.pf.only_particle_type)==type(5):
-                    if not i==self.pf.only_particle_type:
-                        continue
-                    self.pf.particle_type += i
-                    self.pf.particle_mass += m*um
-
-                else:
-                    self.pf.particle_type[a:b] = i #particle type
-                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                if i == self.pf.particle_star_index:
+                    sa,sb = a,b
+                self.pf.particle_type[a:b] = i #particle type
+                self.pf.particle_mass[a:b] = m*um #mass in grams
                 a=b
-            pbar.finish()
-
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
-            
-            self.pf.particle_star_index = i
-            
-            do_stars = (self.pf.only_particle_type is None) or \
-                       (self.pf.only_particle_type == -1) or \
-                       (self.pf.only_particle_type == len(lspecies))
-            if self.pf.file_star_data and do_stars: 
-                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
-                     = read_stars(self.pf.file_star_data,nstars,Nrow)
-                nstars = nstars[0] 
-                if nstars > 0 :
+            if not self.pf.skip_stars and self.pf.file_particle_stars: 
+                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
+                        ws_old,ws_oldi,tdum,adum \
+                     = read_stars(self.pf.file_particle_stars)
+                self.pf.nstars_rs = nstars_rs     
+                self.pf.nstars_pa = b-a
+                inconsistent=self.pf.particle_type==self.pf.particle_star_index
+                if not nstars_rs==np.sum(inconsistent):
+                    mylog.info('WARNING!: nstars is inconsistent!')
+                del inconsistent
+                if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
-                    pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
-                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= sec_per_Gyr #from Gyr to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
-                    pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    #self.pf.particle_metallicity1 *= 0.0199 
-                    #self.pf.particle_metallicity2 *= 0.0199 
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
-                    self.pf.particle_mass[-nstars:] = mass*um
-
-            done = 0
-            init = self.pf.particle_position.shape[0]
-            pos = self.pf.particle_position
-            #particle indices travel with the particle positions
-            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
-            if type(self.pf.grid_particles) == type(5):
-                particle_level = min(self.pf.max_level,self.pf.grid_particles)
-            else:
-                particle_level = 2
-            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    np.zeros(len(pos[:,0])).astype('int32')-1,
-                    particle_level, #dont grid particles past this
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
-            pbar.finish()
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
-        pbar = get_pbar("Finalizing grids ",len(grids))
-        for gi, g in enumerate(grids): 
-            self.grids[gi] = g
-        pbar.finish()
-            
-
+                    birthtimes= b2t(tbirth,n=n)
+                    birthtimes = birthtimes.astype('float64')
+                    assert birthtimes.shape == tbirth.shape    
+                    birthtimes*= 1.0e9 #from Gyr to yr
+                    birthtimes*= 365*24*3600 #to seconds
+                    ages = self.pf.current_time-birthtimes
+                    spread = self.pf.spread_age
+                    if type(spread)==type(5.5):
+                        ages = spread_ages(ages,spread=spread)
+                    elif spread:
+                        ages = spread_ages(ages)
+                    idx = self.pf.particle_type == self.pf.particle_star_index
+                    for psf in particle_star_fields:
+                        setattr(self.pf,psf,
+                                np.zeros(nparticles,dtype='float32'))
+                    self.pf.particle_age[sa:sb] = ages
+                    self.pf.particle_mass[sa:sb] = mass
+                    self.pf.particle_mass_initial[sa:sb] = imass
+                    self.pf.particle_creation_time[sa:sb] = birthtimes
+                    self.pf.particle_metallicity1[sa:sb] = metallicity1
+                    self.pf.particle_metallicity2[sa:sb] = metallicity2
+                    self.pf.particle_metallicity[sa:sb]  = metallicity1\
+                                                          + metallicity2
+        for gi,g in enumerate(grids):    
+            self.grids[gi]=g
+                    
     def _get_grid_parents(self, grid, LE, RE):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
@@ -507,53 +401,58 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = np.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            #Now we do overlapping siblings; note that one has to "win" with
+            #siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
+            #instead of gridding particles assign them all to the root grid
+            if gi==0:
+                for particle_field in particle_fields:
+                    source = getattr(self.pf,particle_field,None)
+                    if source is None:
+                        for i,ax in enumerate('xyz'):
+                            pf = particle_field.replace('_%s'%ax,'')
+                            source = getattr(self.pf,pf,None)
+                            if source is not None:
+                                source = source[:,i]
+                                break
+                    if source is not None:
+                        mylog.info("Attaching %s to the root grid",
+                                    particle_field)
+                        g.NumberOfParticles = source.shape[0]
+                        setattr(g,particle_field,source)
+                g.particle_index = np.arange(g.NumberOfParticles)
+        pb.finish()
         self.max_level = self.grid_levels.max()
 
-    # def _populate_grid_objects(self):
-    #     mask = np.empty(self.grids.size, dtype='int32')
-    #     pb = get_pbar("Populating grids", len(self.grids))
-    #     for gi,g in enumerate(self.grids):
-    #         pb.update(gi)
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level - 1,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask)
-    #         parents = self.grids[mask.astype("bool")]
-    #         if len(parents) > 0:
-    #             g.Parent.extend((p for p in parents.tolist()
-    #                     if p.locations[0,0] == g.locations[0,0]))
-    #             for p in parents: p.Children.append(g)
-    #         # Now we do overlapping siblings; note that one has to "win" with
-    #         # siblings, so we assume the lower ID one will "win"
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask, gi)
-    #         mask[gi] = False
-    #         siblings = self.grids[mask.astype("bool")]
-    #         if len(siblings) > 0:
-    #             g.OverlappingSiblings = siblings.tolist()
-    #         g._prepare_grid()
-    #         g._setup_dx()
-    #     pb.finish()
-    #     self.max_level = self.grid_levels.max()
-
     def _setup_field_list(self):
-        if self.parameter_file.use_particles:
+        if not self.parameter_file.skip_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
-            for field in art_particle_field_names:
+            for field in particle_fields:
                 def external_wrapper(f):
                     def _convert_function(data):
                         return data.convert(f)
@@ -580,97 +479,67 @@
     _hierarchy_class = ARTHierarchy
     _fieldinfo_fallback = ARTFieldInfo
     _fieldinfo_known = KnownARTFields
-    _handle = None
     
-    def __init__(self, filename, data_style='art',
-                 storage_filename = None, 
-                 file_particle_header=None, 
-                 file_particle_data=None,
-                 file_star_data=None,
-                 discover_particles=True,
-                 use_particles=True,
-                 limit_level=None,
-                 only_particle_type = None,
-                 grid_particles=False,
-                 single_particle_mass=False,
-                 single_particle_type=0):
-        
-        #dirn = os.path.dirname(filename)
-        base = os.path.basename(filename)
-        aexp = base.split('_')[2].replace('.d','')
-        if not aexp.startswith('a'):
-            aexp = '_'+aexp
-        
-        self.file_particle_header = file_particle_header
-        self.file_particle_data = file_particle_data
-        self.file_star_data = file_star_data
-        self.only_particle_type = only_particle_type
-        self.grid_particles = grid_particles
-        self.single_particle_mass = single_particle_mass
-        
-        if limit_level is None:
-            self.limit_level = np.inf
-        else:
-            limit_level = int(limit_level)
-            mylog.info("Using maximum level: %i",limit_level)
-            self.limit_level = limit_level
-        
-        def repu(x):
-            for i in range(5):
-                x=x.replace('__','_')
-            return x    
-        if discover_particles:
-            if file_particle_header is None:
-                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_header = loc
-                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
-            if file_particle_data is None:
-                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_data = loc
-                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
-            if file_star_data is None:
-                loc = filename.replace(base,'stars_%s.dat'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_star_data = loc
-                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
-        
-        self.use_particles = any([self.file_particle_header,
-            self.file_star_data, self.file_particle_data])
-        StaticOutput.__init__(self, filename, data_style)
-        
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = 'art'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["InitialTime"]=self.current_time
+    def __init__(self, file_amr, storage_filename = None,
+            skip_particles=False,skip_stars=False,limit_level=None,
+            spread_age=True,data_style='art'):
+        self.data_style = data_style
+        self._find_files(file_amr)
+        self.skip_particles = skip_particles
+        self.skip_stars = skip_stars
+        self.file_amr = file_amr
+        self.parameter_filename = file_amr
+        self.limit_level = limit_level
+        self.spread_age = spread_age
+        self.domain_left_edge  = np.zeros(3,dtype='float64')
+        self.domain_right_edge = np.ones(3,dtype='float64') 
+        StaticOutput.__init__(self, file_amr, data_style)
         self.storage_filename = storage_filename
-        
-        
+
+    def _find_files(self,file_amr):
+        """
+        Given the AMR base filename, attempt to find the
+        particle header, star files, etc.
+        """
+        prefix,suffix = filename_pattern['amr'].split('%s')
+        affix = os.path.basename(file_amr).replace(prefix,'')
+        affix = affix.replace(suffix,'')
+        affix = affix.replace('_','')
+        affix = affix[1:-1]
+        dirname = os.path.dirname(file_amr)
+        for filetype, pattern in filename_pattern.items():
+            #sometimes the affix is surrounded by an extraneous _
+            #so check for an extra character on either side
+            check_filename = dirname+'/'+pattern%('?%s?'%affix)
+            filenames = glob.glob(check_filename)
+            if len(filenames)==1:
+                setattr(self,"file_"+filetype,filenames[0])
+                mylog.info('discovered %s',filetype)
+            elif len(filenames)>1:
+                setattr(self,"file_"+filetype,None)
+                mylog.info("Ambiguous number of files found for %s",
+                        check_filename)
+            else:
+                setattr(self,"file_"+filetype,None)
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
         
     def _set_units(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical units based 
+		on the parameters from the header
         """
         self.units = {}
         self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        
-        
-        z = self.current_redshift
-        
-        h = self.hubble_constant
-        boxcm_cal = self["boxh"]
+        self.units['unitary'] = 1.0
+
+        #spatial units
+        z   = self.current_redshift
+        h   = self.hubble_constant
+        boxcm_cal = self.parameters["boxh"]
         boxcm_uncal = boxcm_cal / h
         box_proper = boxcm_uncal/(1+z)
         aexpn = self["aexpn"]
@@ -679,269 +548,111 @@
             self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
             self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
             self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
-        # Variable names have been chosen to reflect primary reference
-        #Om0 = self["Om0"]
-        #boxh = self["boxh"]
-        wmu = self["wmu"]
-        #ng = self.domain_dimensions[0]
-        #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
-        #v0 = r0 / t0
-        #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
-        #e0 = v0**2.0
+
+        #all other units
+        wmu = self.parameters["wmu"]
+        Om0 = self.parameters['Om0']
+        ng  = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh   = self.parameters['boxh'] 
+        aexpn  = self.parameters["aexpn"]
+        hubble = self.parameters['hubble']
+
+        r0 = boxh/ng
+        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        S_0 = 52.077 * wmu**(5.0/3.0)
+        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        t0 = r0/v0
+        rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
+        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
+
+        #factors to multiply the native code units to CGS
+        cf = defaultdict(lambda: 1.0)
+        cf['Pressure'] = P0 #already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        cf["Mass"] = aM0 * 1.98892e33
+        cf["Density"] = rho0*(aexpn**-3.0)
+        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
+        cf["Potential"] = 1.0
+        cf["Entropy"] = S_0
+        cf["Temperature"] = tr
+        self.cosmological_simulation = True
+        self.conversion_factors = cf
         
-        wmu = self["wmu"]
-        boxh = self["boxh"]
-        aexpn = self["aexpn"]
-        hubble = self.hubble_constant
-        ng = self.domain_dimensions[0]
-        self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
-        self.t0 = self.r0/self.v0
-        # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
-        # ie, critical density 
-        self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
-        tr  = self.tr
-        
-        #factors to multiply the native code units to CGS
-        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
-        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
-        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
-        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
-        #self.conversion_factors["Temperature"] = tr 
-        self.conversion_factors["Potential"] = 1.0
-        self.cosmological_simulation = True
-        
-        # Now our conversion factors
         for ax in 'xyz':
-            # Add on the 1e5 to get to cm/s
-            self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
-        seconds = self.t0
+            self.conversion_factors["%s-velocity" % ax] = v0/aexpn
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
+        for particle_field in particle_fields:
+            self.conversion_factors[particle_field] =  1.0
+        self.conversion_factors['particle_creation_time'] =  31556926.0
+        self.conversion_factors['Msun'] = 5.027e-34 
 
-        #we were already in seconds, go back in to code units
-        #self.current_time /= self.t0 
-        #self.current_time = b2t(self.current_time,n=1)
-        
-    
     def _parse_parameter_file(self):
-        # We set our domain to run from 0 .. 1 since we are otherwise
-        # unconstrained.
-        self.domain_left_edge = np.zeros(3, dtype="float64")
-        self.domain_right_edge = np.ones(3, dtype="float64")
+        """
+        Get the various simulation parameters & constants.
+        """
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.cosmological_simulation = True
+        self.parameters = {}
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self.parameters = {}
-
-        header_struct = [
-            ('>i','pad byte'),
-            ('>256s','jname'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','istep'),
-            ('>d','t'),
-            ('>d','dt'),
-            ('>f','aexpn'),
-            ('>f','ainit'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>f','boxh'),
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','Omb0'),
-            ('>f','hubble'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','nextras'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>f','extra1'),
-            ('>f','extra2'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>256s','lextra'),
-            ('>256s','lextra'),
-            ('>i','pad byte'),
-            
-            ('>i', 'pad byte'),
-            ('>i', 'min_level'),
-            ('>i', 'max_level'),
-            ('>i', 'pad byte'),
-            ]
-        
-        f = open(self.parameter_filename, "rb")
         header_vals = {}
-        for format, name in header_struct:
-            size = struct.calcsize(format)
-            # We parse single values at a time, so this will
-            # always need to be indexed with 0
-            output = struct.unpack(format, f.read(size))[0]
-            header_vals[name] = output
-        self.dimensionality = 3 # We only support three
-        self.refine_by = 2 # Octree
-        # Update our parameters with the header and with some compile-time
-        # constants we will set permanently.
-        self.parameters.update(header_vals)
-        self.parameters["Y_p"] = 0.245
-        self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
-        self.parameters["gamma"] = 5./3.
-        self.parameters["T_CMB0"] = 2.726  
-        self.parameters["T_min"] = 300.0 #T floor in K
-        self.parameters["boxh"] = header_vals['boxh']
-        self.parameters['ng'] = 128 # of 0 level cells in 1d 
+        self.parameters.update(constants)
+        with open(self.file_amr,'rb') as f:
+            amr_header_vals = _read_struct(f,amr_header_struct)
+            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
+                _skip_record(f)
+            (self.ncell,) = struct.unpack('>l', _read_record(f))
+            # Try to figure out the root grid dimensions
+            est = int(np.rint(self.ncell**(1.0/3.0)))
+            # Note here: this is the number of *cells* on the root grid.
+            # This is not the same as the number of Octs.
+            self.domain_dimensions = np.ones(3, dtype='int64')*est 
+            self.root_grid_mask_offset = f.tell()
+            root_cells = self.domain_dimensions.prod()
+            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
+                 order='F')
+            self.root_grid_offset = f.tell()
+            _skip_record(f) # hvar
+            _skip_record(f) # var
+            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.child_grid_offset = f.tell()
+        self.parameters.update(amr_header_vals)
+        if not self.skip_particles and self.file_particle_header:
+            with open(self.file_particle_header,"rb") as fh:
+                particle_header_vals = _read_struct(fh,particle_header_struct)
+                fh.seek(seek_extras)
+                n = particle_header_vals['Nspecies']
+                wspecies = np.fromfile(fh,dtype='>f',count=10)
+                lspecies = np.fromfile(fh,dtype='>i',count=10)
+            self.parameters['wspecies'] = wspecies[:n]
+            self.parameters['lspecies'] = lspecies[:n]
+            ls_nonzero = np.diff(lspecies)[:n-1]
+            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
+                *ls_nonzero)
+            self.parameters.update(particle_header_vals)
+    
+        #setup standard simulation yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.parameters['CosmologyInitialRedshift']=self.current_redshift
-        self.data_comment = header_vals['jname']
-        self.current_time_raw = header_vals['t']
-        self.current_time = header_vals['t']
-        self.omega_lambda = header_vals['Oml0']
-        self.omega_matter = header_vals['Om0']
-        self.hubble_constant = header_vals['hubble']
-        self.min_level = header_vals['min_level']
-        self.max_level = header_vals['max_level']
-        self.nhydro_vars = 10 #this gets updated later, but we'll default to this
-        #nchem is nhydrovars-8, so we typically have 2 extra chem species 
+        self.omega_lambda = amr_header_vals['Oml0']
+        self.omega_matter = amr_header_vals['Om0']
+        self.hubble_constant = amr_header_vals['hubble']
+        self.min_level = amr_header_vals['min_level']
+        self.max_level = amr_header_vals['max_level']
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
-        #self.hubble_time /= 3.168876e7 #Gyr in s 
-        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
-        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
-        # integrand_arr = integrand(spacings)
-        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-        # self.current_time *= self.hubble_time
-        self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
-        for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-            _skip_record(f)
-
-        
-        Om0 = self.parameters['Om0']
-        hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * np.sqrt(Om0)
-        ng = self.parameters['ng']
-        wmu = self.parameters["wmu"]
-        boxh = header_vals['boxh'] 
-        
-        #distance unit #boxh is units of h^-1 Mpc
-        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
-        r0 = self.parameters["r0"]
-        #time, yrs
-        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
-        #velocity velocity units in km/s
-        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                np.sqrt(self.parameters["Om0"])
-        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
-        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
-        rho0 = self.parameters["rho0"]
-        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
-        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        #T_0 = unit of temperature in K and in keV)
-        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
-        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
-        #S_0 = unit of entropy in keV * cm^2
-        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        
-        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
-        #     for non-cosmological run aM0 must be defined during initialization
-        #     [aM0] = [Msun]
-        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        
-        #CGS for everything in the next block
-    
-        (self.ncell,) = struct.unpack('>l', _read_record(f))
-        # Try to figure out the root grid dimensions
-        est = int(np.rint(self.ncell**(1.0/3.0)))
-        # Note here: this is the number of *cells* on the root grid.
-        # This is not the same as the number of Octs.
-        self.domain_dimensions = np.ones(3, dtype='int64')*est 
-
-        self.root_grid_mask_offset = f.tell()
-        #_skip_record(f) # iOctCh
-        root_cells = self.domain_dimensions.prod()
-        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
-        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
-        self.root_grid_offset = f.tell()
-        _skip_record(f) # hvar
-        _skip_record(f) # var
-
-        self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
-        self.child_grid_offset = f.tell()
-
-        f.close()
-        
-        if self.file_particle_header is not None:
-            self._read_particle_header(self.file_particle_header)
-        
-    def _read_particle_header(self,fn):    
-        """ Reads control information, various parameters from the 
-            particle data set. Adapted from Daniel Ceverino's 
-            Read_Particles_Binary in analysis_ART.F   
-        """ 
-        header_struct = [
-            ('>i','pad'),
-            ('45s','header'), 
-            ('>f','aexpn'),
-            ('>f','aexp0'),
-            ('>f','amplt'),
-            ('>f','astep'),
-
-            ('>i','istep'),
-            ('>f','partw'),
-            ('>f','tintg'),
-
-            ('>f','Ekin'),
-            ('>f','Ekin1'),
-            ('>f','Ekin2'),
-            ('>f','au0'),
-            ('>f','aeu0'),
-
-
-            ('>i','Nrow'),
-            ('>i','Ngridc'),
-            ('>i','Nspecies'),
-            ('>i','Nseed'),
-
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','hubble'),
-            ('>f','Wp5'),
-            ('>f','Ocurv'),
-            ('>f','Omb0'),
-            ('>%ds'%(396),'extras'),
-            ('>f','unknown'),
-
-            ('>i','pad')]
-        fh = open(fn,'rb')
-        vals = _read_struct(fh,header_struct)
-        
-        for k,v in vals.iteritems():
-            self.parameters[k]=v
-        
-        seek_extras = 137
-        fh.seek(seek_extras)
-        n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
-        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
-        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
-        fh.close()
-        
-        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
-        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
-        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
-        
+        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         """
-        Defined for Daniel Ceverino's file naming scheme.
+        Defined for the NMSU file naming scheme.
         This could differ for other formats.
         """
         fn = ("%s" % (os.path.basename(args[0])))


diff -r 1597098bb078178b6569c3e37c345cc46bc58351 -r bd0ac35d628e271f53e041fa253d79af12b3ae0e yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -1,7 +1,7 @@
 """
 Definitions specific to ART
 
-Author: Christopher E. Moody <cemoody at ucsc.ed>
+Author: Christopher E. Moody <cemoody at ucsc.edu>
 Affiliation: UC Santa Cruz
 Homepage: http://yt-project.org/
 License:
@@ -25,19 +25,128 @@
 
 """
 
-art_particle_field_names = [
-'particle_age',
-'particle_index',
-'particle_mass',
-'particle_mass_initial',
-'particle_creation_time',
-'particle_metallicity1',
-'particle_metallicity2',
-'particle_metallicity',
-'particle_position_x',
-'particle_position_y',
-'particle_position_z',
-'particle_velocity_x',
-'particle_velocity_y',
-'particle_velocity_z',
-'particle_type']
+fluid_fields= [ 
+    'Density',
+    'TotalEnergy',
+    'XMomentumDensity',
+    'YMomentumDensity',
+    'ZMomentumDensity',
+    'Pressure',
+    'Gamma',
+    'GasEnergy',
+    'MetalDensitySNII',
+    'MetalDensitySNIa',
+    'PotentialNew',
+    'PotentialOld'
+]
+
+particle_fields= [
+    'particle_age',
+    'particle_index',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+    'particle_position_x',
+    'particle_position_y',
+    'particle_position_z',
+    'particle_velocity_x',
+    'particle_velocity_y',
+    'particle_velocity_z',
+    'particle_type'
+]
+
+particle_star_fields = [
+    'particle_age',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+]
+
+filename_pattern = {				
+	'amr':'10MpcBox_csf512_%s.d',
+	'particle_header':'PMcrd%s.DAT',
+	'particle_data':'PMcrs0%s.DAT',
+	'particle_stars':'stars_%s.dat'
+}
+
+amr_header_struct = [
+    ('>i','pad byte'),
+    ('>256s','jname'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','istep'),
+    ('>d','t'),
+    ('>d','dt'),
+    ('>f','aexpn'),
+    ('>f','ainit'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','boxh'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','Omb0'),
+    ('>f','hubble'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','nextras'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','extra1'),
+    ('>f','extra2'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>256s','lextra'),
+    ('>256s','lextra'),
+    ('>i','pad byte'),
+    ('>i', 'pad byte'),
+    ('>i', 'min_level'),
+    ('>i', 'max_level'),
+    ('>i', 'pad byte'),
+]
+
+particle_header_struct =[
+    ('>i','pad'),
+    ('45s','header'), 
+    ('>f','aexpn'),
+    ('>f','aexp0'),
+    ('>f','amplt'),
+    ('>f','astep'),
+    ('>i','istep'),
+    ('>f','partw'),
+    ('>f','tintg'),
+    ('>f','Ekin'),
+    ('>f','Ekin1'),
+    ('>f','Ekin2'),
+    ('>f','au0'),
+    ('>f','aeu0'),
+    ('>i','Nrow'),
+    ('>i','Ngridc'),
+    ('>i','Nspecies'),
+    ('>i','Nseed'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','hubble'),
+    ('>f','Wp5'),
+    ('>f','Ocurv'),
+    ('>f','Omb0'),
+    ('>%ds'%(396),'extras'),
+    ('>f','unknown'),
+    ('>i','pad')
+]
+
+constants = {
+    "Y_p":0.245,
+    "gamma":5./3.,
+    "T_CMB0":2.726,
+    "T_min":300.,
+    "ng":128,
+    "wmu":4.0/(8.0-5.0*0.245)
+}
+
+seek_extras = 137


diff -r 1597098bb078178b6569c3e37c345cc46bc58351 -r bd0ac35d628e271f53e041fa253d79af12b3ae0e yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -34,8 +34,6 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, mass_hydrogen_cgs
 import yt.utilities.lib as amr_utils
 
 KnownARTFields = FieldInfoContainer()
@@ -62,6 +60,7 @@
 #Density
 #Temperature
 #metallicities
+#MetalDensity SNII + SNia
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -69,7 +68,6 @@
 #Pressure
 #Gamma
 #GasEnergy
-#MetalDensity SNII + SNia
 #Potentials
 #xyzvelocity
 
@@ -170,32 +168,27 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    cd = data.pf.conversion_factors["Density"]
-    cg = data.pf.conversion_factors["GasEnergy"]
-    ct = data.pf.tr
     dg = data["GasEnergy"].astype('float64')
+    dg /= data.pf.conversion_factors["GasEnergy"]
     dd = data["Density"].astype('float64')
-    di = dd==0.0
+    dd /= data.pf.conversion_factors["Density"]
+    tr = dg/dd*data.pf.tr
+    #ghost cells have zero density?
+    tr[np.isnan(tr)] = 0.0
     #dd[di] = -1.0
-    tr = dg/dd
-    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
-    #    import pdb;pdb.set_trace()
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
-    tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
     #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x = data.pf.conversion_factors["Temperature"]
+    #x = data.pf.conversion_factors["Temperature"]
     x = 1.0
     return x
 add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_converttemperature
+#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
@@ -218,28 +211,27 @@
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(data):
+def _x_velocity(field,data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
 add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(data):
+def _y_velocity(field,data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
 add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(data):
+def _z_velocity(field,data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
 add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
@@ -251,20 +243,63 @@
 
 #Particle fields
 
+def ParticleMass(field,data):
+    return data['particle_mass']
+add_field("ParticleMass",function=ParticleMass,units=r"\rm{g}",particle_type=True)
+
+
 #Derived particle fields
 
+def ParticleMassMsun(field,data):
+    return data['particle_mass']*data.pf['Msun']
+add_field("ParticleMassMsun",function=ParticleMassMsun,units=r"\rm{g}",particle_type=True)
+
+def _creation_time(field,data):
+    pa = data["particle_age"]
+    tr = np.zeros(pa.shape,dtype='float')-1.0
+    tr[pa>0] = pa[pa>0]
+    return tr
+add_field("creation_time",function=_creation_time,units=r"\rm{s}",particle_type=True)
+
 def mass_dm(field, data):
+    tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
-    tr  = data["Ones"] #create a grid in the right size
     if np.sum(idx)>0:
-        tr /= np.prod(tr.shape) #divide by the volume
-        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
+        print tr.shape
         return tr
     else:
-        return tr*0.0
+        return tr*1e-9
 
-add_field("particle_cell_mass_dm", function=mass_dm,
-          validators=[ValidateSpatial(0)])
+add_field("particle_cell_mass_dm", function=mass_dm, units = r"\mathrm{M_{sun}}",
+        validators=[ValidateSpatial(0)],        
+        take_log=False,
+        projection_conversion="1")
 
+def _spdensity(field, data):
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
+    if data.star_mass.shape[0] ==0 : return grid_mass 
+    amr_utils.CICDeposit_3(data.star_position_x,
+                           data.star_position_y,
+                           data.star_position_z,
+                           data.star_mass.astype('float32'),
+                           data.star_mass.shape[0],
+                           grid_mass, 
+                           np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    return grid_mass 
+
+#add_field("star_density", function=_spdensity,
+#          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+
+def _simple_density(field,data):
+    mass = np.sum(data.star_mass)
+    volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
+    return mass/volume
+
+add_field("star_density", function=_simple_density,
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity)


diff -r 1597098bb078178b6569c3e37c345cc46bc58351 -r bd0ac35d628e271f53e041fa253d79af12b3ae0e yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,7 +36,7 @@
     BaseIOHandler
 import yt.utilities.lib as au
 
-from yt.frontends.art.definitions import art_particle_field_names
+from yt.frontends.art.definitions import *
 
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
@@ -121,45 +121,19 @@
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        #This will be cleaned up later
-        idx = np.array(grid.particle_indices)
-        if field == 'particle_index':
-            return np.array(idx)
-        if field == 'particle_type':
-            return grid.pf.particle_type[idx]
-        if field == 'particle_position_x':
-            return grid.pf.particle_position[idx][:,0]
-        if field == 'particle_position_y':
-            return grid.pf.particle_position[idx][:,1]
-        if field == 'particle_position_z':
-            return grid.pf.particle_position[idx][:,2]
-        if field == 'particle_mass':
-            return grid.pf.particle_mass[idx]
-        if field == 'particle_velocity_x':
-            return grid.pf.particle_velocity[idx][:,0]
-        if field == 'particle_velocity_y':
-            return grid.pf.particle_velocity[idx][:,1]
-        if field == 'particle_velocity_z':
-            return grid.pf.particle_velocity[idx][:,2]
-        
-        #stellar fields
-        if field == 'particle_age':
-            return grid.pf.particle_age[idx]
-        if field == 'particle_metallicity':
-            return grid.pf.particle_metallicity1[idx] +\
-                   grid.pf.particle_metallicity2[idx]
-        if field == 'particle_metallicity1':
-            return grid.pf.particle_metallicity1[idx]
-        if field == 'particle_metallicity2':
-            return grid.pf.particle_metallicity2[idx]
-        if field == 'particle_mass_initial':
-            return grid.pf.particle_mass_initial[idx]
-        
-        raise 'Should have matched one of the particle fields...'
-
+        dat = getattr(grid,field,None)
+        if dat is not None: 
+            return dat
+        starfield = field.replace('star','particle')
+        dat = getattr(grid,starfield,None)
+        if dat is not None:
+            psi = grid.pf.particle_star_index
+            idx = grid.particle_type==psi
+            return dat[idx]
+        raise KeyError
         
     def _read_data_set(self, grid, field):
-        if field in art_particle_field_names:
+        if field in particle_fields:
             return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
@@ -198,9 +172,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -232,7 +206,7 @@
     f.seek(offset)
     return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
 
-def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
     pos = f.tell()
     f.seek(level_oct_offsets[level])
     #Get the info for this level, skip the rest
@@ -283,13 +257,18 @@
     le = le[idx]
     fl = fl[idx]
 
+
     #left edges are expressed as if they were on 
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
     #le = le/2**(root_level-1-level)-1
 
+    #try to find the root_level first
+    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
+    root_level = root_level.astype('int64')
+
     #try without the -1
-    le = le/2**(root_level-2-level)-1
+    le = le/2**(root_level+1-level)-1
 
     #now read the hvars and vars arrays
     #we are looking for iOctCh
@@ -299,13 +278,12 @@
     
     
     f.seek(pos)
-    return le,fl,nLevel
+    return le,fl,nLevel,root_level
 
 
-def read_particles(file,nstars,Nrow):
+def read_particles(file,Nrow):
     words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4 # for file_particle_data; not always true?
-    np = nstars # number of particles including stars, should come from lspecies[-1]
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
@@ -314,7 +292,7 @@
     data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
-def read_stars(file,nstars,Nrow):
+def read_stars(file):
     fh = open(file,'rb')
     tdum,adum   = _read_frecord(fh,'>d')
     nstars      = _read_frecord(fh,'>i')
@@ -327,7 +305,8 @@
     if fh.tell() < os.path.getsize(file):
         metallicity2 = _read_frecord(fh,'>f')     
     assert fh.tell() == os.path.getsize(file)
-    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+    return  nstars, mass, imass, tbirth, metallicity1, metallicity2,\
+            ws_old,ws_oldi,tdum,adum
 
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
@@ -346,7 +325,7 @@
         arr = arr.reshape((width, chunk), order="F")
         assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
-        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined available
         #zero in the mask means there is refinement available
         a=b
         left -= chunk
@@ -476,3 +455,29 @@
     #fb2t = interp1d(tbs,ages)
     return fb2t
 
+def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
+    #stars are formed in lumps; spread out the ages linearly
+    da= np.diff(ages)
+    assert np.all(da<=0)
+    #ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da<0)
+    idx+=1 #mark the right edges
+    #spread this age evenly out to the next age
+    lidx=0
+    lage=0
+    for i in idx:
+        n = i-lidx #n stars affected
+        rage = ages[i]
+        lage = max(rage-spread,0.0)
+        agesd[lidx:i]=np.linspace(lage,rage,n)
+        lidx=i
+        #lage=rage
+        if logger: logger(i)
+    #we didn't get the last iter
+    i=ages.shape[0]-1
+    n = i-lidx #n stars affected
+    rage = ages[i]
+    lage = max(rage-spread,0.0)
+    agesd[lidx:i]=np.linspace(lage,rage,n)
+    return agesd

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list