[yt-svn] commit/yt-3.0: 188 new changesets

Bitbucket commits-noreply at bitbucket.org
Wed Oct 3 22:25:54 PDT 2012


188 new commits in yt-3.0:


https://bitbucket.org/yt_analysis/yt-3.0/changeset/6a46bd81eb7b/
changeset:   6a46bd81eb7b
branch:      yt
user:        MatthewTurk
date:        2012-06-09 21:59:12
summary:     Adding an IO stager
affected #:  1 file

diff -r 08f29a9cec42435bb0581a2059b4f1c7abd6c8c5 -r 6a46bd81eb7bb4c77eeadb045071dc7a2800bcc8 yt/utilities/parallel_tools/io_runner.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -0,0 +1,195 @@
+"""
+A simple IO staging mechanism
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+from .parallel_analysis_interface import ProcessorPool
+from yt.utilities.io_handler import BaseIOHandler
+from contextlib import contextmanager
+import time
+
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+
+YT_TAG_MESSAGE = 317 # Cell 317 knows where to go
+
+class IOCommunicator(BaseIOHandler):
+    def __init__(self, pf, wg, pool):
+        mylog.info("Initializing IOCommunicator")
+        self.pf = pf
+        self.wg = wg # We don't need to use this!
+        self.pool = pool
+        self.comm = pool.comm
+        # We read our grids here
+        self.grids = []
+        storage = {}
+        grids = pf.h.grids.tolist()
+        grids.sort(key=lambda a:a.filename)
+        for sto, g in parallel_objects(grids, storage = storage):
+            sto.result = self.comm.rank
+            sto.result_id = g.id
+            self.grids.append(g)
+        self._id_offset = pf.h.grids[0]._id_offset
+        mylog.info("Reading from disk ...")
+        self.initialize_data()
+        mylog.info("Broadcasting ...")
+        self.comm.comm.bcast(storage, root = wg.ranks[0])
+        mylog.info("Done.")
+        self.hooks = []
+
+    def initialize_data(self):
+        pf = self.pf
+        fields = [f for f in pf.h.field_list
+                  if not pf.field_info[f].particle_type]
+        pfields = [f for f in pf.h.field_list
+                   if pf.field_info[f].particle_type]
+        # Preload is only defined for Enzo ...
+        if pf.h.io._data_style == "enzo_packed_3d":
+            self.queue = pf.h.io.queue
+            pf.h.io.preload(self.grids, fields)
+            for g in self.grids:
+                for f in fields:
+                    if f not in self.queue[g.id]:
+                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        self.queue[g.id][f] = d
+                for f in pfields:
+                    self.queue[g.id][f] = self._read(g, f)
+        else:
+            self.queue = {}
+            for g in self.grids:
+                for f in fields + pfields:
+                    self.queue[g.id][f] = pf.h.io._read(g, f)
+
+    def _read(self, g, f):
+        fi = self.pf.field_info[f]
+        if fi.particle_type and g.NumberOfParticles == 0:
+            # because this gets upcast to float
+            return na.array([],dtype='float64')
+        try:
+            temp = self.pf.h.io._read_data_set(g, f)
+        except:# self.pf.hierarchy.io._read_exception as exc:
+            if fi.not_in_all:
+                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+            else:
+                raise
+        return temp
+
+    def wait(self):
+        status = MPI.Status()
+        while 1:
+            if self.comm.comm.Iprobe(MPI.ANY_SOURCE,
+                                YT_TAG_MESSAGE,
+                                status = status):
+                msg = self.comm.comm.recv(
+                        source = status.source, tag = YT_TAG_MESSAGE)
+                if msg['op'] == "end":
+                    mylog.debug("Shutting down IO.")
+                    break
+                self._send_data(msg, status.source)
+                status = MPI.Status()
+            else:
+                time.sleep(1e-2)
+
+    def _send_data(self, msg, dest):
+        grid_id = msg['grid_id']
+        field = msg['field']
+        ts = self.queue[grid_id][field].astype("float64")
+        mylog.debug("Opening send to %s (%s)", dest, ts.shape)
+        self.hooks.append(self.comm.comm.Isend([ts, MPI.DOUBLE], dest = dest))
+
+class IOHandlerRemote(BaseIOHandler):
+    _data_style = "remote"
+
+    def __init__(self, pf, wg, pool):
+        self.pf = pf
+        self.wg = wg # probably won't need
+        self.pool = pool
+        self.comm = pool.comm
+        self.proc_map = self.comm.comm.bcast(None,
+                root = pool['io'].ranks[0])
+        super(IOHandlerRemote, self).__init__()
+
+    def _read_data_set(self, grid, field):
+        dest = self.proc_map[grid.id]
+        msg = dict(grid_id = grid.id, field = field, op="read")
+        mylog.debug("Requesting %s for %s from %s", field, grid, dest)
+        if self.pf.field_info[field].particle_type:
+            data = na.empty(grid.NumberOfParticles, 'float64')
+        else:
+            data = na.empty(grid.ActiveDimensions, 'float64')
+        hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
+        self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
+        mylog.debug("Waiting for data.")
+        MPI.Request.Wait(hook)
+        return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        #sl = tuple(reversed(sl))
+        return self._read_data_set(grid,field)[sl]
+
+    def terminate(self):
+        msg = dict(op='end')
+        if self.wg.comm.rank == 0:
+            for rank in self.pool['io'].ranks:
+                mylog.debug("Sending termination message to %s", rank)
+                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
+
+ at contextmanager
+def remote_io(pf, wg, pool):
+    original_io = pf.h.io
+    pf.h.io = IOHandlerRemote(pf, wg, pool)
+    yield
+    pf.h.io.terminate()
+    pf.h.io = original_io
+
+def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+    pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
+    rv = None
+    if wg.name == "work":
+        pf = load(fn)
+        with remote_io(pf, wg, pool):
+            rv = func(pf, *args, **kwargs)
+    elif wg.name == "io":
+        pf = load(fn)
+        io = IOCommunicator(pf, wg, pool)
+        io.wait()
+    # We should broadcast the result
+    rv = pool.comm.mpi_bcast(rv, root=pool['work'].ranks[0])
+    pool.free_all()
+    mylog.debug("Return value: %s", rv)
+    return rv
+
+# Here is an example of how to use this functionality.
+if __name__ == "__main__":
+    def gq(pf):
+        dd = pf.h.all_data()
+        return dd.quantities["TotalQuantity"]("CellMassMsun")
+    q = io_nodes("DD0087/DD0087", 8, 24, gq)
+    mylog.info(q)
+
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7216eb530568/
changeset:   7216eb530568
branch:      yt
user:        MatthewTurk
date:        2012-06-09 23:20:54
summary:     Fixing indentation issue.
affected #:  1 file

diff -r 6a46bd81eb7bb4c77eeadb045071dc7a2800bcc8 -r 7216eb530568a5f3077423b91e69dd845fb5518c yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -269,7 +269,7 @@
         self.size = size
         self.ranks = ranks
         self.comm = comm
-	self.name = name
+        self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -292,11 +292,9 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-
-	# Default name to the workgroup number.
+        # Default name to the workgroup number.
         if name is None: 
-	    name = string(len(workgroups))
-	    
+            name = string(len(workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/27ed700ec6d7/
changeset:   27ed700ec6d7
branch:      yt
user:        MatthewTurk
date:        2012-06-10 00:39:41
summary:     Controller system skeleton
affected #:  2 files

diff -r 7216eb530568a5f3077423b91e69dd845fb5518c -r 27ed700ec6d7e6c4b35541224d82af412c840239 yt/utilities/parallel_tools/controller_system.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -0,0 +1,66 @@
+"""
+A queueing system based on MPI
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+    
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+from contextmanager import contextlib
+
+
+class WorkSplitter(object):
+    def __init__(self, controller, group1, group2):
+        self.group1 = group1
+        self.group2 = group2
+        self.controller = controller
+
+    @classmethod
+    def setup(cls, ng1, ng2):
+        pp, wg = ProcessorPool.from_sizes(
+            [(1, "controller"), (ng1, "group1"), (ng2, "group2")])
+        groupc = pp['controller']
+        group1 = pp['group1']
+        group2 = pp['group2']
+        obj = cls(groupc, group1, group2)
+        obj.run(wg.name)
+
+    def run(self, name):
+        if name == "controller":
+            self.run_controller()
+        elif name == "group1":
+            self.run_group1()
+        elif name == "group2":
+            self.run_group2()
+        else:
+            raise NotImplementedError
+
+    def run_controller(self):
+        raise NotImplementedError
+
+    def run_group1(self):
+        raise NotImplementedError
+
+    def run_group2(self):
+        raise NotImplementedError


diff -r 7216eb530568a5f3077423b91e69dd845fb5518c -r 27ed700ec6d7e6c4b35541224d82af412c840239 yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -2,10 +2,10 @@
 A simple IO staging mechanism
 
 Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
+Affiliation: Columbia
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
 
   This file is part of yt.
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3e4e9d309c96/
changeset:   3e4e9d309c96
branch:      yt
user:        MatthewTurk
date:        2012-06-12 19:15:36
summary:     Controller system
affected #:  1 file

diff -r 27ed700ec6d7e6c4b35541224d82af412c840239 -r 3e4e9d309c96b5757ccb414c9f1da44976ea4096 yt/utilities/parallel_tools/controller_system.py
--- a/yt/utilities/parallel_tools/controller_system.py
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -28,7 +28,7 @@
 except ImportError:
     pass
 from contextmanager import contextlib
-
+from abc import ABCMeta, abstractmethod, abstractproperty
 
 class WorkSplitter(object):
     def __init__(self, controller, group1, group2):
@@ -56,11 +56,14 @@
         else:
             raise NotImplementedError
 
+    @abstractmethod
     def run_controller(self):
-        raise NotImplementedError
+        pass
 
+    @abstractmethod
     def run_group1(self):
-        raise NotImplementedError
+        pass
 
+    @abstractmethod
     def run_group2(self):
-        raise NotImplementedError
+        pass



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4516f7cf678c/
changeset:   4516f7cf678c
branch:      yt
user:        MatthewTurk
date:        2012-06-28 18:42:52
summary:     Fixing another missing pending request update location
affected #:  1 file

diff -r 437a66fd8588e4ab5735718bfa43b41f19b8fdf3 -r 4516f7cf678c91a89a0d5d74b4263bccedda8d0e yt/gui/reason/html/app/controller/Notebook.js
--- a/yt/gui/reason/html/app/controller/Notebook.js
+++ b/yt/gui/reason/html/app/controller/Notebook.js
@@ -73,9 +73,11 @@
     },
 
     addRequest: function(request_id, command) {
+        /*console.log("Adding request " + request_id);*/
         this.getRequestsStore().add({
             request_id: request_id, command: command,
         });
+        reason.pending.update([this.getRequestsStore().count()]);
     },
 
     addCell: function(cell) {
@@ -85,6 +87,7 @@
             var ind = this.getRequestsStore().find(
                 'request_id', cell['result_id']);
             if (ind != -1) {
+                /*console.log("Removing request " + cell['result_id']);*/
                 var rec = this.getRequestsStore().removeAt(ind);
             }
             reason.pending.update([this.getRequestsStore().count()]);



https://bitbucket.org/yt_analysis/yt-3.0/changeset/83d3ca350ab3/
changeset:   83d3ca350ab3
branch:      yt
user:        jzuhone
date:        2012-08-08 19:42:49
summary:     First pass at implementing ParticleIO for FLASH
affected #:  1 file

diff -r a0d43ccae65d9bfe84532a9fb563ae0ab341c8c8 -r 83d3ca350ab3def502b3aea3526f3e8b14faaf1d yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -29,6 +29,77 @@
 from yt.utilities.io_handler import \
     BaseIOHandler
 
+def particles_validator_region(x, y, z, args) :
+    
+    left_edge = args[0]
+    right_edge = args[1]
+    periodic = args[2]
+    DLE = args[3]
+    DRE = args[4]
+
+    xx = x
+    yy = y
+    zz = z
+
+    if periodic == 1 : 
+
+        DW = DRE - DLE
+        xx[x < left_edge[0]] = x + DW[0]
+        xx[x > right_edge[0]] = x - DW[0]
+        yy[y < left_edge[1]] = y + DW[1]
+        yy[y > right_edge[1]] = y - DW[1]
+        zz[z < left_edge[2]] = z + DW[2]
+        zz[z > right_edge[2]] = z - DW[2]
+
+    idxx = na.logical_and(xx >= left_edge[0], xx <= right_edge[0])
+    idxy = na.logical_and(yy >= left_edge[1], yy <= right_edge[1])
+    idxz = na.logical_and(zz >= left_edge[2], zz <= right_edge[2])
+
+    idxs = na.logical_and(idxx, idyy)
+    idxs = na.logical_and(idxz, idxs)
+
+    return idxs
+
+def particles_validator_sphere(x, y, z, args) :
+    
+    center = args[0]
+    radius = args[1]
+    periodic = args[2]
+    DLE = args[3]
+    DRE = args[4]
+
+    xx = na.abs(x-center[0])
+    yy = na.abs(y-center[1])
+    zz = na.abs(z-center[2])
+
+    if periodic == 1 : 
+
+        DW = DRE - DLE
+
+        xx = na.minimum(xx,DW[0]-xx)
+        yy = na.minimum(yy,DW[1]-yy)
+        zz = na.minimum(zz,DW[2]-zz)
+
+    r = na.sqrt(xx*xx+yy*yy+zz*zz)
+
+    return r <= radius
+
+def particles_validator_disk(x, y, z, args) :
+    
+    center = args[0]
+    normal = args[1]
+    radius = args[2]
+    height = args[3]
+
+    d = -na.dot(normal*center)
+
+    ph = na.abs(x*normal[0] + y*normal[1] + z*normal[2] + d)
+    pd2 = (x-center[0])**2+(y-center[1])**2+(z-center[2])**2
+
+    pr = na.sqrt(pd2-ph*ph)
+
+    return na.logical_and(pr <= radius, ph <= height)
+
 class IOHandlerFLASH(BaseIOHandler):
     _particle_reader = False
     _data_style = "flash_hdf5"
@@ -49,8 +120,26 @@
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
-        pass
+        f = self._handle
+        _particles = []
+        fx = self._particle_fields["particle_posx"]
+        fy = self._particle_fields["particle_posy"]
+        fz = self._particle_fields["particle_posz"]
+        posx = f["/tracer particles"][:,fx]
+        posy = f["/tracer particles"][:,fy]
+        posz = f["/tracer particles"][:,fz]
+        if type == 0 :
+            idxs = particles_validator_region(posx,posy,posz,args)
+        elif type == 1 :
+            idxs = particles_validator_sphere(posx,posy,posz,args)
+        elif type == 2 :
+            idxs = particles_validator_disk(posx,posy,posz,args)
+        for field in fields_to_read :
+            fi = self._particle_fields[field]
+            _particles.append(f["/tracer particles"][idxs,fi])
+        return _particles
 
+    """
     def _select_particles(self, grid, field):
         f = self._handle
         npart = f["/tracer particles"].shape[0]
@@ -67,6 +156,7 @@
             tr.append(f["/tracer particles"][gi,fi])
             start = end
         return na.concatenate(tr)
+    """
 
     def _read_data_set(self, grid, field):
         f = self._handle



https://bitbucket.org/yt_analysis/yt-3.0/changeset/84700323b1c2/
changeset:   84700323b1c2
branch:      yt
user:        jzuhone
date:        2012-08-08 20:19:00
summary:     ParticleIO now runs correctly, but it's still kind of slow. Probably will have to rewrite this in Cython.
affected #:  1 file

diff -r 83d3ca350ab3def502b3aea3526f3e8b14faaf1d -r 84700323b1c2a0488488ca0b3851414ad2686e16 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -30,7 +30,7 @@
     BaseIOHandler
 
 def particles_validator_region(x, y, z, args) :
-    
+
     left_edge = args[0]
     right_edge = args[1]
     periodic = args[2]
@@ -55,7 +55,7 @@
     idxy = na.logical_and(yy >= left_edge[1], yy <= right_edge[1])
     idxz = na.logical_and(zz >= left_edge[2], zz <= right_edge[2])
 
-    idxs = na.logical_and(idxx, idyy)
+    idxs = na.logical_and(idxx, idxy)
     idxs = na.logical_and(idxz, idxs)
 
     return idxs
@@ -101,7 +101,7 @@
     return na.logical_and(pr <= radius, ph <= height)
 
 class IOHandlerFLASH(BaseIOHandler):
-    _particle_reader = False
+    _particle_reader = True
     _data_style = "flash_hdf5"
 
     def __init__(self, pf, *args, **kwargs):
@@ -121,13 +121,14 @@
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
         f = self._handle
-        _particles = []
+        particles = []
+        _particles = f["/tracer particles"][:,:]
         fx = self._particle_fields["particle_posx"]
         fy = self._particle_fields["particle_posy"]
         fz = self._particle_fields["particle_posz"]
-        posx = f["/tracer particles"][:,fx]
-        posy = f["/tracer particles"][:,fy]
-        posz = f["/tracer particles"][:,fz]
+        posx = _particles[:,fx]
+        posy = _particles[:,fy]
+        posz = _particles[:,fz]
         if type == 0 :
             idxs = particles_validator_region(posx,posy,posz,args)
         elif type == 1 :
@@ -136,8 +137,9 @@
             idxs = particles_validator_disk(posx,posy,posz,args)
         for field in fields_to_read :
             fi = self._particle_fields[field]
-            _particles.append(f["/tracer particles"][idxs,fi])
-        return _particles
+            particles.append(_particles[idxs,fi])
+        del _particles
+        return particles
 
     """
     def _select_particles(self, grid, field):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b086152fcd63/
changeset:   b086152fcd63
branch:      yt
user:        jzuhone
date:        2012-08-08 20:33:04
summary:     Removing _select_particles as it is deprecated (and very wrong, actually).
affected #:  1 file

diff -r 84700323b1c2a0488488ca0b3851414ad2686e16 -r b086152fcd63edc6ef507935adff0482482422e2 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -141,25 +141,6 @@
         del _particles
         return particles
 
-    """
-    def _select_particles(self, grid, field):
-        f = self._handle
-        npart = f["/tracer particles"].shape[0]
-        total_selected = 0
-        start = 0
-        stride = 1e6
-        blki = self._particle_fields["particle_blk"]
-        bi = grid.id - grid._id_offset
-        fi = self._particle_fields[field]
-        tr = []
-        while start < npart:
-            end = min(start + stride - 1, npart)
-            gi = f["/tracer particles"][start:end,blki] == bi
-            tr.append(f["/tracer particles"][gi,fi])
-            start = end
-        return na.concatenate(tr)
-    """
-
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3cb50470c165/
changeset:   3cb50470c165
branch:      yt
user:        jzuhone
date:        2012-08-08 23:31:36
summary:     Cythonized the bulk of _read_particles. Still too slow...
affected #:  2 files

diff -r b086152fcd63edc6ef507935adff0482482422e2 -r 3cb50470c16598aeb048b216d6ae734aedb6ee0f yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -25,6 +25,7 @@
 
 import numpy as na
 import h5py
+from yt.frontends.flash import _flash_particle_reader
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -120,6 +121,7 @@
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
+        """
         f = self._handle
         particles = []
         _particles = f["/tracer particles"][:,:]
@@ -139,7 +141,18 @@
             fi = self._particle_fields[field]
             particles.append(_particles[idxs,fi])
         del _particles
-        return particles
+        """
+        fx = self._particle_fields["particle_posx"]
+        fy = self._particle_fields["particle_posy"]
+        fz = self._particle_fields["particle_posz"]
+        field_indices = na.array([self._particle_fields[field]
+                                  for field in fields_to_read],
+                                 dtype='int32')
+        return _flash_particle_reader.read_particles(self._handle.fid,
+                                                     fx, fy, fz,
+                                                     len(fields_to_read),
+                                                     type, args,
+                                                     field_indices)
 
     def _read_data_set(self, grid, field):
         f = self._handle


diff -r b086152fcd63edc6ef507935adff0482482422e2 -r 3cb50470c16598aeb048b216d6ae734aedb6ee0f yt/frontends/flash/setup.py
--- a/yt/frontends/flash/setup.py
+++ b/yt/frontends/flash/setup.py
@@ -8,6 +8,10 @@
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('flash', parent_package, top_path)
+    config.add_extension("_flash_particle_reader",
+                         ["yt/frontends/flash/_flash_particle_reader.pyx"],
+                         language="c"
+                         )
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9fe563700715/
changeset:   9fe563700715
branch:      yt
user:        jzuhone
date:        2012-08-08 23:33:14
summary:     Removing old python code we moved to cython
affected #:  1 file

diff -r 3cb50470c16598aeb048b216d6ae734aedb6ee0f -r 9fe5637007154ad1fecdac000c83904cfc16100f yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -30,77 +30,6 @@
 from yt.utilities.io_handler import \
     BaseIOHandler
 
-def particles_validator_region(x, y, z, args) :
-
-    left_edge = args[0]
-    right_edge = args[1]
-    periodic = args[2]
-    DLE = args[3]
-    DRE = args[4]
-
-    xx = x
-    yy = y
-    zz = z
-
-    if periodic == 1 : 
-
-        DW = DRE - DLE
-        xx[x < left_edge[0]] = x + DW[0]
-        xx[x > right_edge[0]] = x - DW[0]
-        yy[y < left_edge[1]] = y + DW[1]
-        yy[y > right_edge[1]] = y - DW[1]
-        zz[z < left_edge[2]] = z + DW[2]
-        zz[z > right_edge[2]] = z - DW[2]
-
-    idxx = na.logical_and(xx >= left_edge[0], xx <= right_edge[0])
-    idxy = na.logical_and(yy >= left_edge[1], yy <= right_edge[1])
-    idxz = na.logical_and(zz >= left_edge[2], zz <= right_edge[2])
-
-    idxs = na.logical_and(idxx, idxy)
-    idxs = na.logical_and(idxz, idxs)
-
-    return idxs
-
-def particles_validator_sphere(x, y, z, args) :
-    
-    center = args[0]
-    radius = args[1]
-    periodic = args[2]
-    DLE = args[3]
-    DRE = args[4]
-
-    xx = na.abs(x-center[0])
-    yy = na.abs(y-center[1])
-    zz = na.abs(z-center[2])
-
-    if periodic == 1 : 
-
-        DW = DRE - DLE
-
-        xx = na.minimum(xx,DW[0]-xx)
-        yy = na.minimum(yy,DW[1]-yy)
-        zz = na.minimum(zz,DW[2]-zz)
-
-    r = na.sqrt(xx*xx+yy*yy+zz*zz)
-
-    return r <= radius
-
-def particles_validator_disk(x, y, z, args) :
-    
-    center = args[0]
-    normal = args[1]
-    radius = args[2]
-    height = args[3]
-
-    d = -na.dot(normal*center)
-
-    ph = na.abs(x*normal[0] + y*normal[1] + z*normal[2] + d)
-    pd2 = (x-center[0])**2+(y-center[1])**2+(z-center[2])**2
-
-    pr = na.sqrt(pd2-ph*ph)
-
-    return na.logical_and(pr <= radius, ph <= height)
-
 class IOHandlerFLASH(BaseIOHandler):
     _particle_reader = True
     _data_style = "flash_hdf5"
@@ -121,27 +50,6 @@
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
-        """
-        f = self._handle
-        particles = []
-        _particles = f["/tracer particles"][:,:]
-        fx = self._particle_fields["particle_posx"]
-        fy = self._particle_fields["particle_posy"]
-        fz = self._particle_fields["particle_posz"]
-        posx = _particles[:,fx]
-        posy = _particles[:,fy]
-        posz = _particles[:,fz]
-        if type == 0 :
-            idxs = particles_validator_region(posx,posy,posz,args)
-        elif type == 1 :
-            idxs = particles_validator_sphere(posx,posy,posz,args)
-        elif type == 2 :
-            idxs = particles_validator_disk(posx,posy,posz,args)
-        for field in fields_to_read :
-            fi = self._particle_fields[field]
-            particles.append(_particles[idxs,fi])
-        del _particles
-        """
         fx = self._particle_fields["particle_posx"]
         fy = self._particle_fields["particle_posy"]
         fz = self._particle_fields["particle_posz"]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2a4951ccdade/
changeset:   2a4951ccdade
branch:      yt
user:        jzuhone
date:        2012-08-08 23:37:46
summary:     Forgot to add this in
affected #:  2 files

diff -r 9fe5637007154ad1fecdac000c83904cfc16100f -r 2a4951ccdade83639f4179c09016cf3955f51862 yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -23,6 +23,7 @@
 from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.time_series import TimeSeriesData
 from yt.utilities.lib import sample_field_at_positions
+from yt.convenience import load
 from yt.funcs import *
 
 import numpy as na
@@ -79,7 +80,8 @@
         indices.sort() # Just in case the caller wasn't careful
         
         self.field_data = YTFieldData()
-        self.pfs = TimeSeriesData.from_filenames(filenames)
+        #self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.pfs = [load(fn) for fn in filenames]
         self.masks = []
         self.sorts = []
         self.indices = indices
@@ -112,14 +114,14 @@
         for pf in self.pfs :
             dd = pf.h.all_data()
             newtags = dd["particle_index"].astype("int")
-            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
-                print "Not all requested particle ids contained in this file!"
-                raise IndexError
-            mask = na.in1d(newtags, indices, assume_unique=True)
-            sorts = na.argsort(newtags[mask])
-            self.masks.append(mask)            
-            self.sorts.append(sorts)
-            self.times.append(pf.current_time)
+            #if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+            #    print "Not all requested particle ids contained in this file!"
+            #    raise IndexError
+            #mask = na.in1d(newtags, indices, assume_unique=True)
+            #sorts = na.argsort(newtags[mask])
+            #self.masks.append(mask)            
+            #self.sorts.append(sorts)
+            #self.times.append(pf.current_time)
 
         self.times = na.array(self.times)
 


diff -r 9fe5637007154ad1fecdac000c83904cfc16100f -r 2a4951ccdade83639f4179c09016cf3955f51862 yt/frontends/flash/_flash_particle_reader.pyx
--- /dev/null
+++ b/yt/frontends/flash/_flash_particle_reader.pyx
@@ -0,0 +1,232 @@
+import numpy as np
+cimport numpy as np
+cimport cython
+import h5py
+
+cdef particles_validator_region(np.ndarray[np.float64_t, ndim=1] x,
+                                np.ndarray[np.float64_t, ndim=1] y,
+                                np.ndarray[np.float64_t, ndim=1] z,
+                                np.ndarray[np.float64_t, ndim=1] left_edge,
+                                np.ndarray[np.float64_t, ndim=1] right_edge,
+                                np.int32_t periodic,
+                                np.ndarray[np.float64_t, ndim=1] DLE,
+                                np.ndarray[np.float64_t, ndim=1] DRE) :
+
+    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxs
+    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxx
+    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxy
+    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxz
+    
+    cdef np.ndarray[np.float64_t, ndim=1] xx
+    cdef np.ndarray[np.float64_t, ndim=1] yy
+    cdef np.ndarray[np.float64_t, ndim=1] zz
+
+    cdef np.ndarray[np.float64_t, ndim=1] DW
+
+    idxs = np.zeros(x.shape[0], 'bool')
+    idxx = np.zeros(x.shape[0], 'bool')
+    idxy = np.zeros(x.shape[0], 'bool')
+    idxz = np.zeros(x.shape[0], 'bool')
+
+    xx = np.zeros(x.shape[0], 'float64')
+    yy = np.zeros(x.shape[0], 'float64')
+    zz = np.zeros(x.shape[0], 'float64')
+
+    DW = np.zeros(3, 'float64')
+
+    xx = x
+    yy = y
+    zz = z
+
+    if periodic == 1 : 
+
+        DW = DRE - DLE
+        xx[x < left_edge[0]] = x + DW[0]
+        xx[x > right_edge[0]] = x - DW[0]
+        yy[y < left_edge[1]] = y + DW[1]
+        yy[y > right_edge[1]] = y - DW[1]
+        zz[z < left_edge[2]] = z + DW[2]
+        zz[z > right_edge[2]] = z - DW[2]
+
+    idxx = np.logical_and(xx >= left_edge[0], xx <= right_edge[0])
+    idxy = np.logical_and(yy >= left_edge[1], yy <= right_edge[1])
+    idxz = np.logical_and(zz >= left_edge[2], zz <= right_edge[2])
+
+    idxs = np.logical_and(idxx, idxy)
+    idxs = np.logical_and(idxz, idxs)
+
+    return idxs
+
+cdef particles_validator_sphere(np.ndarray[np.float64_t, ndim=1] x,
+                                np.ndarray[np.float64_t, ndim=1] y, 
+                                np.ndarray[np.float64_t, ndim=1] z,
+                                np.ndarray[np.float64_t, ndim=1] center,
+                                np.float64_t radius,
+                                np.int32_t periodic,
+                                np.ndarray[np.float64_t, ndim=1] DLE,
+                                np.ndarray[np.float64_t, ndim=1] DRE) :
+
+    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxs
+
+    cdef np.ndarray[np.float64_t, ndim=1] r
+    cdef np.ndarray[np.float64_t, ndim=1] xx
+    cdef np.ndarray[np.float64_t, ndim=1] yy
+    cdef np.ndarray[np.float64_t, ndim=1] zz
+
+    cdef np.ndarray[np.float64_t, ndim=1] DW
+
+    idxs = np.zeros(x.shape[0], 'bool')
+    
+    r = np.zeros(x.shape[0], 'float64')
+    xx = np.zeros(x.shape[0], 'float64')
+    yy = np.zeros(x.shape[0], 'float64')
+    zz = np.zeros(x.shape[0], 'float64')
+
+    DW = np.zeros(3, 'float64')
+    
+    xx = np.abs(x-center[0])
+    yy = np.abs(y-center[1])
+    zz = np.abs(z-center[2])
+
+    if periodic == 1 : 
+
+        DW = DRE - DLE
+
+        xx = np.minimum(xx,DW[0]-xx)
+        yy = np.minimum(yy,DW[1]-yy)
+        zz = np.minimum(zz,DW[2]-zz)
+
+    r = np.sqrt(xx*xx+yy*yy+zz*zz)
+
+    idxs = np.array(r <= radius)
+    
+    return idxs
+
+cdef particles_validator_disk(np.ndarray[np.float64_t, ndim=1] x,
+                              np.ndarray[np.float64_t, ndim=1] y,
+                              np.ndarray[np.float64_t, ndim=1] z,
+                              np.ndarray[np.float64_t, ndim=1] center,
+                              np.ndarray[np.float64_t, ndim=1] normal,
+                              np.float64_t radius, np.float64_t height) :
+
+    cdef np.float64_t d
+
+    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxs
+
+    cdef np.ndarray[np.float64_t, ndim=1] ph
+    cdef np.ndarray[np.float64_t, ndim=1] pd2
+    cdef np.ndarray[np.float64_t, ndim=1] pr
+
+    idxs = np.zeros(x.shape[0], 'bool')
+    
+    ph = np.zeros(x.shape[0], 'float64')
+    pd2 = np.zeros(x.shape[0], 'float64')
+    pr = np.zeros(x.shape[0], 'float64')
+    
+    d = -np.dot(normal*center)
+
+    ph = np.abs(x*normal[0] + y*normal[1] + z*normal[2] + d)
+    pd2 = (x-center[0])**2+(y-center[1])**2+(z-center[2])**2
+
+    pr = np.sqrt(pd2-ph*ph)
+
+    idxs = np.logical_and(pr <= radius, ph <= height)
+    
+    return idxs
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def read_particles(file_id, int x_index, int y_index, int z_index,
+                   int num_fields, int rtype, args,
+                   np.ndarray[np.int32_t, ndim=1] field_indices) :
+
+    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxs
+    cdef int i
+    cdef int num_particles
+    cdef np.int32_t periodic
+    cdef np.ndarray[np.float64_t, ndim=1] left_edge
+    cdef np.ndarray[np.float64_t, ndim=1] right_edge
+    cdef np.ndarray[np.float64_t, ndim=1] DLE
+    cdef np.ndarray[np.float64_t, ndim=1] DRE
+    cdef np.float64_t radius
+    cdef np.float64_t height
+    cdef np.ndarray[np.float64_t, ndim=1] normal
+    cdef np.ndarray[np.float64_t, ndim=1] center
+    cdef np.ndarray[np.float64_t, ndim=1] particle_field
+    cdef np.ndarray[np.float64_t, ndim=1] posx
+    cdef np.ndarray[np.float64_t, ndim=1] posy
+    cdef np.ndarray[np.float64_t, ndim=1] posz
+
+    left_edge = np.zeros(3, 'float64')
+    right_edge = np.zeros(3, 'float64')
+    DLE = np.zeros(3, 'float64')
+    DRE = np.zeros(3, 'float64')
+    normal = np.zeros(3, 'float64')
+    center = np.zeros(3, 'float64')
+
+    dataset = h5py.h5d.open(file_id, "tracer particles")
+    dataspace = dataset.get_space()
+    rank = dataspace.get_simple_extent_dims()
+    memspace = h5py.h5s.create_simple((rank[0],))
+
+    num_particles = rank[0]
+    count = (num_particles,1)
+
+    posx = np.zeros(num_particles, 'float64')
+    posy = np.zeros(num_particles, 'float64')
+    posz = np.zeros(num_particles, 'float64')
+
+    start = (0,x_index)
+    dataspace.select_hyperslab(start,count)
+    dataset.read(memspace, dataspace, posx)
+
+    start = (0,y_index)
+    dataspace.select_hyperslab(start,count)
+    dataset.read(memspace, dataspace, posy)
+
+    start = (0,z_index)
+    dataspace.select_hyperslab(start,count)
+    dataset.read(memspace, dataspace, posz)
+    
+    idxs = np.zeros(num_particles, 'bool')
+
+    particle_field = np.zeros(num_particles, 'float64')
+    
+    if rtype == 0 :
+        left_edge = args[0]
+        right_edge = args[1]
+        periodic = args[2]
+        DLE = args[3]
+        DRE = args[4]
+        idxs = particles_validator_region(posx,posy,posz,
+                                          left_edge,right_edge,
+                                          periodic,DLE,DRE)
+    elif rtype == 1:
+        center = args[0]
+        radius = args[1]
+        periodic = args[2]
+        DLE = args[3]
+        DRE = args[4]
+        idxs = particles_validator_sphere(posx,posy,posz,
+                                          center,radius,
+                                          periodic,DLE,DRE)
+    elif rtype == 2:
+        center = args[0]
+        normal = args[1]
+        radius = args[2]
+        height = args[3]
+        idxs = particles_validator_disk(posx,posy,posz,
+                                        center,normal,
+                                        radius,height)
+
+    _particles = []
+
+    for i in range(num_fields) :
+
+        start = (0,field_indices[i])
+        dataspace.select_hyperslab(start,count)
+        dataset.read(memspace, dataspace, particle_field)
+        _particles.append(particle_field[idxs])
+        
+    return _particles
+    



https://bitbucket.org/yt_analysis/yt-3.0/changeset/479bd429db27/
changeset:   479bd429db27
branch:      yt
user:        MatthewTurk
date:        2012-08-09 00:01:05
summary:     Unrolling a bunch of loops in Cython for the FLASH particle reader for regions
affected #:  1 file

diff -r 2a4951ccdade83639f4179c09016cf3955f51862 -r 479bd429db270b6ca60e2b7be32e763593d07a28 yt/frontends/flash/_flash_particle_reader.pyx
--- a/yt/frontends/flash/_flash_particle_reader.pyx
+++ b/yt/frontends/flash/_flash_particle_reader.pyx
@@ -12,50 +12,34 @@
                                 np.ndarray[np.float64_t, ndim=1] DLE,
                                 np.ndarray[np.float64_t, ndim=1] DRE) :
 
-    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxs
-    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxx
-    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxy
-    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxz
-    
-    cdef np.ndarray[np.float64_t, ndim=1] xx
-    cdef np.ndarray[np.float64_t, ndim=1] yy
-    cdef np.ndarray[np.float64_t, ndim=1] zz
+    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] mask
+    cdef int i, ax
 
-    cdef np.ndarray[np.float64_t, ndim=1] DW
-
-    idxs = np.zeros(x.shape[0], 'bool')
-    idxx = np.zeros(x.shape[0], 'bool')
-    idxy = np.zeros(x.shape[0], 'bool')
-    idxz = np.zeros(x.shape[0], 'bool')
-
-    xx = np.zeros(x.shape[0], 'float64')
-    yy = np.zeros(x.shape[0], 'float64')
-    zz = np.zeros(x.shape[0], 'float64')
+    mask = np.zeros(x.shape[0], 'bool')
 
     DW = np.zeros(3, 'float64')
 
-    xx = x
-    yy = y
-    zz = z
+    if periodic == 1: 
+        DW = DRE - DLE
 
-    if periodic == 1 : 
+    cdef np.float64_t pos[3]
+    cdef int inside
+    for i in range(x.shape[0]):
+        pos[0] = x[i]
+        pos[1] = y[i]
+        pos[2] = z[i]
+        inside = 1
+        for ax in range(3):
+            if pos[ax] < left_edge[ax]: pos[ax] += DW[ax]
+            if pos[ax] > right_edge[ax]: pos[ax] -= DW[ax]
+        for ax in range(3):
+            if pos[ax] < left_edge[ax] or pos[ax] > right_edge[ax]:
+                inside = 0
+                break
+        if inside == 1:
+            mask[i] = 1
 
-        DW = DRE - DLE
-        xx[x < left_edge[0]] = x + DW[0]
-        xx[x > right_edge[0]] = x - DW[0]
-        yy[y < left_edge[1]] = y + DW[1]
-        yy[y > right_edge[1]] = y - DW[1]
-        zz[z < left_edge[2]] = z + DW[2]
-        zz[z > right_edge[2]] = z - DW[2]
-
-    idxx = np.logical_and(xx >= left_edge[0], xx <= right_edge[0])
-    idxy = np.logical_and(yy >= left_edge[1], yy <= right_edge[1])
-    idxz = np.logical_and(zz >= left_edge[2], zz <= right_edge[2])
-
-    idxs = np.logical_and(idxx, idxy)
-    idxs = np.logical_and(idxz, idxs)
-
-    return idxs
+    return mask
 
 cdef particles_validator_sphere(np.ndarray[np.float64_t, ndim=1] x,
                                 np.ndarray[np.float64_t, ndim=1] y, 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/298587cec10f/
changeset:   298587cec10f
branch:      yt
user:        MatthewTurk
date:        2012-08-09 00:04:14
summary:     Another minor speedup.
affected #:  1 file

diff -r 479bd429db270b6ca60e2b7be32e763593d07a28 -r 298587cec10f5fae81862ab241b7ea4944ba9841 yt/frontends/flash/_flash_particle_reader.pyx
--- a/yt/frontends/flash/_flash_particle_reader.pyx
+++ b/yt/frontends/flash/_flash_particle_reader.pyx
@@ -3,6 +3,9 @@
 cimport cython
 import h5py
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef particles_validator_region(np.ndarray[np.float64_t, ndim=1] x,
                                 np.ndarray[np.float64_t, ndim=1] y,
                                 np.ndarray[np.float64_t, ndim=1] z,
@@ -17,10 +20,10 @@
 
     mask = np.zeros(x.shape[0], 'bool')
 
-    DW = np.zeros(3, 'float64')
+    cdef np.ndarray[np.float64_t, ndim=1] DW = np.zeros(3, 'float64')
 
     if periodic == 1: 
-        DW = DRE - DLE
+        DW[:] = DRE - DLE
 
     cdef np.float64_t pos[3]
     cdef int inside



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d372363d6efb/
changeset:   d372363d6efb
branch:      yt
user:        samskillman
date:        2012-08-21 22:46:22
summary:     Re-enabling the checks that force enzo grid left/right edges to be on a cell boundary of the parent grid.
affected #:  1 file

diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r d372363d6efb1eefdedc87e1f7e0612e6af21cc4 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -445,7 +445,7 @@
             g._prepare_grid()
             g._setup_dx()
             g.set_filename(f[0])
-            #if g.Parent is not None: g._guess_properties_from_parent()
+            if g.Parent is not None: g._guess_properties_from_parent()
         del self.filenames # No longer needed.
         self.max_level = self.grid_levels.max()
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0cfe9c9412ee/
changeset:   0cfe9c9412ee
branch:      yt
user:        samskillman
date:        2012-08-21 23:14:59
summary:     Putting reconstruction of grid left/right edges under the conditional of the ytcfg reconstruct_hierarchy. Defaults to 0, False.
affected #:  2 files

diff -r d372363d6efb1eefdedc87e1f7e0612e6af21cc4 -r 0cfe9c9412eef02f28c6be60ad3aca2d089f8c31 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -51,6 +51,7 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
+    reconstruct_hierarchy = '0',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',


diff -r d372363d6efb1eefdedc87e1f7e0612e6af21cc4 -r 0cfe9c9412eef02f28c6be60ad3aca2d089f8c31 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -445,7 +445,8 @@
             g._prepare_grid()
             g._setup_dx()
             g.set_filename(f[0])
-            if g.Parent is not None: g._guess_properties_from_parent()
+            if ytcfg.getboolean("yt","reconstruct_hierarchy"):
+                if g.Parent is not None: g._guess_properties_from_parent()
         del self.filenames # No longer needed.
         self.max_level = self.grid_levels.max()
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/348acc39f206/
changeset:   348acc39f206
branch:      yt
user:        samskillman
date:        2012-08-21 23:25:21
summary:     0->False, and moving check outside loop.
affected #:  2 files

diff -r 0cfe9c9412eef02f28c6be60ad3aca2d089f8c31 -r 348acc39f2061c87dc27d47ee24a8a053836ae34 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -51,7 +51,7 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
-    reconstruct_hierarchy = '0',
+    reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',


diff -r 0cfe9c9412eef02f28c6be60ad3aca2d089f8c31 -r 348acc39f2061c87dc27d47ee24a8a053836ae34 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -441,11 +441,12 @@
         mylog.info("Finished rebuilding")
 
     def _populate_grid_objects(self):
+        reconstruct = ytcfg.getboolean("yt","reconstruct_hierarchy")
         for g,f in izip(self.grids, self.filenames):
             g._prepare_grid()
             g._setup_dx()
             g.set_filename(f[0])
-            if ytcfg.getboolean("yt","reconstruct_hierarchy"):
+            if reconstruct:
                 if g.Parent is not None: g._guess_properties_from_parent()
         del self.filenames # No longer needed.
         self.max_level = self.grid_levels.max()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/12729ee9bbd5/
changeset:   12729ee9bbd5
branch:      yt
user:        MatthewTurk
date:        2012-08-21 23:28:47
summary:     Merged in samskillman/yt (pull request #249)
affected #:  2 files

diff -r 7c5ad85490e8ade384a165f1af51e1ef7cd9f692 -r 12729ee9bbd5e18fb9f477aee8a5fc2c70a97452 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -51,6 +51,7 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
+    reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',


diff -r 7c5ad85490e8ade384a165f1af51e1ef7cd9f692 -r 12729ee9bbd5e18fb9f477aee8a5fc2c70a97452 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -441,11 +441,13 @@
         mylog.info("Finished rebuilding")
 
     def _populate_grid_objects(self):
+        reconstruct = ytcfg.getboolean("yt","reconstruct_hierarchy")
         for g,f in izip(self.grids, self.filenames):
             g._prepare_grid()
             g._setup_dx()
             g.set_filename(f[0])
-            #if g.Parent is not None: g._guess_properties_from_parent()
+            if reconstruct:
+                if g.Parent is not None: g._guess_properties_from_parent()
         del self.filenames # No longer needed.
         self.max_level = self.grid_levels.max()
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b73d49ef9875/
changeset:   b73d49ef9875
branch:      yt
user:        ngoldbaum
date:        2012-08-23 04:43:33
summary:     Updating plot window to respect field display names.
affected #:  1 file

diff -r 12729ee9bbd5e18fb9f477aee8a5fc2c70a97452 -r b73d49ef9875f1a49e14ac2d05e1cdb3ebed3849 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -667,10 +667,12 @@
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
 
+            field_name = self.data_source.pf.field_info[f].display_name
+
             if md['units'] == None or md['units'] == '':
-                label = r'$\rm{'+f.encode('string-escape')+r'}$'
+                label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
             else:
-                label = r'$\rm{'+f.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+                label = r'$\rm{'+field_name.encode('string-escape')+r'}\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6ab3cf9ed978/
changeset:   6ab3cf9ed978
branch:      yt
user:        MatthewTurk
date:        2012-08-23 17:47:56
summary:     Adding set_axes_unit to the plot window.
affected #:  2 files

diff -r b73d49ef9875f1a49e14ac2d05e1cdb3ebed3849 -r 6ab3cf9ed978ae39cca57fbea9ce9e0b1798c956 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -110,3 +110,10 @@
         return "You have not declared yourself to be inside the IPython" + \
                "Notebook.  Do so with this command:\n\n" + \
                "ytcfg['yt','ipython_notebook'] = 'True'"
+
+class YTUnitNotRecognized(YTException):
+    def __init__(self, unit):
+        self.unit = unit
+
+    def __str__(self):
+        return "This parameter file doesn't recognize %s" % self.unit


diff -r b73d49ef9875f1a49e14ac2d05e1cdb3ebed3849 -r 6ab3cf9ed978ae39cca57fbea9ce9e0b1798c956 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -554,13 +554,47 @@
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
+    _unit = None
+    @invalidate_plot
+    def set_axes_unit(self, unit_name):
+        r"""Set the unit for display on the x and y axes of the image.
+
+        Parameters
+        ----------
+        unit_name : string
+            A unit, available for conversion in the parameter file, that the
+            image extents will be displayed in.
+
+        Raises
+        ------
+        YTUnitNotRecognized
+            If the unit is not known, this will be raised.
+
+        Examples
+        --------
+
+        >>> p = ProjectionPlot(pf, "y", "Density")
+        >>> p.show()
+        >>> p.set_axes_unit("kpc")
+        >>> p.show()
+        """
+        # blind except because it could be in conversion_factors or units
+        try:
+            self.pf[unit_name]
+        except KeyError: 
+            raise YTUnitNotRecognized(unit_name)
+        self._unit = unit_name
+
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
         mi = fval.min()
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        unit = get_smallest_appropriate_unit(x_width, self.pf)
+        if self._unit is None:
+            unit = get_smallest_appropriate_unit(x_width, self.pf)
+        else:
+            unit = self._unit
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7a7c32a24138/
changeset:   7a7c32a24138
branch:      yt
user:        MatthewTurk
date:        2012-08-23 17:55:16
summary:     Adding an axes unit reset, by setting the unit_name to None for axes_units.
affected #:  1 file

diff -r 6ab3cf9ed978ae39cca57fbea9ce9e0b1798c956 -r 7a7c32a24138a2430741e8c39438856893d0fe3b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -563,7 +563,8 @@
         ----------
         unit_name : string
             A unit, available for conversion in the parameter file, that the
-            image extents will be displayed in.
+            image extents will be displayed in.  If set to None, any previous
+            units will be reset.
 
         Raises
         ------
@@ -577,12 +578,15 @@
         >>> p.show()
         >>> p.set_axes_unit("kpc")
         >>> p.show()
+        >>> p.set_axes_unit(None)
+        >>> p.show()
         """
         # blind except because it could be in conversion_factors or units
         try:
             self.pf[unit_name]
         except KeyError: 
-            raise YTUnitNotRecognized(unit_name)
+            if unit_name is not None:
+                raise YTUnitNotRecognized(unit_name)
         self._unit = unit_name
 
     def get_metadata(self, field, strip_mathml = True, return_string = True):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a374af7dc9d3/
changeset:   a374af7dc9d3
branch:      yt
user:        MatthewTurk
date:        2012-08-19 20:26:24
summary:     Merging in an old changeset for Reason
affected #:  1 file

diff -r 7c5ad85490e8ade384a165f1af51e1ef7cd9f692 -r a374af7dc9d3f5555530d1b73046adc30e3b8732 yt/gui/reason/html/app/controller/Notebook.js
--- a/yt/gui/reason/html/app/controller/Notebook.js
+++ b/yt/gui/reason/html/app/controller/Notebook.js
@@ -73,9 +73,11 @@
     },
 
     addRequest: function(request_id, command) {
+        /*console.log("Adding request " + request_id);*/
         this.getRequestsStore().add({
             request_id: request_id, command: command,
         });
+        reason.pending.update([this.getRequestsStore().count()]);
     },
 
     addCell: function(cell) {
@@ -85,6 +87,7 @@
             var ind = this.getRequestsStore().find(
                 'request_id', cell['result_id']);
             if (ind != -1) {
+                /*console.log("Removing request " + cell['result_id']);*/
                 var rec = this.getRequestsStore().removeAt(ind);
             }
             reason.pending.update([this.getRequestsStore().count()]);



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2c7cce8d50ed/
changeset:   2c7cce8d50ed
branch:      yt
user:        MatthewTurk
date:        2012-08-19 20:27:37
summary:     Merging the first pass at IO controller system, for staging IO on MPI nodes.
affected #:  3 files

diff -r a374af7dc9d3f5555530d1b73046adc30e3b8732 -r 2c7cce8d50ed4fb4459a03a465aabff97c56c885 yt/utilities/parallel_tools/controller_system.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -0,0 +1,69 @@
+"""
+A queueing system based on MPI
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+    
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+from contextmanager import contextlib
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+class WorkSplitter(object):
+    def __init__(self, controller, group1, group2):
+        self.group1 = group1
+        self.group2 = group2
+        self.controller = controller
+
+    @classmethod
+    def setup(cls, ng1, ng2):
+        pp, wg = ProcessorPool.from_sizes(
+            [(1, "controller"), (ng1, "group1"), (ng2, "group2")])
+        groupc = pp['controller']
+        group1 = pp['group1']
+        group2 = pp['group2']
+        obj = cls(groupc, group1, group2)
+        obj.run(wg.name)
+
+    def run(self, name):
+        if name == "controller":
+            self.run_controller()
+        elif name == "group1":
+            self.run_group1()
+        elif name == "group2":
+            self.run_group2()
+        else:
+            raise NotImplementedError
+
+    @abstractmethod
+    def run_controller(self):
+        pass
+
+    @abstractmethod
+    def run_group1(self):
+        pass
+
+    @abstractmethod
+    def run_group2(self):
+        pass


diff -r a374af7dc9d3f5555530d1b73046adc30e3b8732 -r 2c7cce8d50ed4fb4459a03a465aabff97c56c885 yt/utilities/parallel_tools/io_runner.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -0,0 +1,195 @@
+"""
+A simple IO staging mechanism
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+from .parallel_analysis_interface import ProcessorPool
+from yt.utilities.io_handler import BaseIOHandler
+from contextlib import contextmanager
+import time
+
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+
+YT_TAG_MESSAGE = 317 # Cell 317 knows where to go
+
+class IOCommunicator(BaseIOHandler):
+    def __init__(self, pf, wg, pool):
+        mylog.info("Initializing IOCommunicator")
+        self.pf = pf
+        self.wg = wg # We don't need to use this!
+        self.pool = pool
+        self.comm = pool.comm
+        # We read our grids here
+        self.grids = []
+        storage = {}
+        grids = pf.h.grids.tolist()
+        grids.sort(key=lambda a:a.filename)
+        for sto, g in parallel_objects(grids, storage = storage):
+            sto.result = self.comm.rank
+            sto.result_id = g.id
+            self.grids.append(g)
+        self._id_offset = pf.h.grids[0]._id_offset
+        mylog.info("Reading from disk ...")
+        self.initialize_data()
+        mylog.info("Broadcasting ...")
+        self.comm.comm.bcast(storage, root = wg.ranks[0])
+        mylog.info("Done.")
+        self.hooks = []
+
+    def initialize_data(self):
+        pf = self.pf
+        fields = [f for f in pf.h.field_list
+                  if not pf.field_info[f].particle_type]
+        pfields = [f for f in pf.h.field_list
+                   if pf.field_info[f].particle_type]
+        # Preload is only defined for Enzo ...
+        if pf.h.io._data_style == "enzo_packed_3d":
+            self.queue = pf.h.io.queue
+            pf.h.io.preload(self.grids, fields)
+            for g in self.grids:
+                for f in fields:
+                    if f not in self.queue[g.id]:
+                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        self.queue[g.id][f] = d
+                for f in pfields:
+                    self.queue[g.id][f] = self._read(g, f)
+        else:
+            self.queue = {}
+            for g in self.grids:
+                for f in fields + pfields:
+                    self.queue[g.id][f] = pf.h.io._read(g, f)
+
+    def _read(self, g, f):
+        fi = self.pf.field_info[f]
+        if fi.particle_type and g.NumberOfParticles == 0:
+            # because this gets upcast to float
+            return na.array([],dtype='float64')
+        try:
+            temp = self.pf.h.io._read_data_set(g, f)
+        except:# self.pf.hierarchy.io._read_exception as exc:
+            if fi.not_in_all:
+                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+            else:
+                raise
+        return temp
+
+    def wait(self):
+        status = MPI.Status()
+        while 1:
+            if self.comm.comm.Iprobe(MPI.ANY_SOURCE,
+                                YT_TAG_MESSAGE,
+                                status = status):
+                msg = self.comm.comm.recv(
+                        source = status.source, tag = YT_TAG_MESSAGE)
+                if msg['op'] == "end":
+                    mylog.debug("Shutting down IO.")
+                    break
+                self._send_data(msg, status.source)
+                status = MPI.Status()
+            else:
+                time.sleep(1e-2)
+
+    def _send_data(self, msg, dest):
+        grid_id = msg['grid_id']
+        field = msg['field']
+        ts = self.queue[grid_id][field].astype("float64")
+        mylog.debug("Opening send to %s (%s)", dest, ts.shape)
+        self.hooks.append(self.comm.comm.Isend([ts, MPI.DOUBLE], dest = dest))
+
+class IOHandlerRemote(BaseIOHandler):
+    _data_style = "remote"
+
+    def __init__(self, pf, wg, pool):
+        self.pf = pf
+        self.wg = wg # probably won't need
+        self.pool = pool
+        self.comm = pool.comm
+        self.proc_map = self.comm.comm.bcast(None,
+                root = pool['io'].ranks[0])
+        super(IOHandlerRemote, self).__init__()
+
+    def _read_data_set(self, grid, field):
+        dest = self.proc_map[grid.id]
+        msg = dict(grid_id = grid.id, field = field, op="read")
+        mylog.debug("Requesting %s for %s from %s", field, grid, dest)
+        if self.pf.field_info[field].particle_type:
+            data = na.empty(grid.NumberOfParticles, 'float64')
+        else:
+            data = na.empty(grid.ActiveDimensions, 'float64')
+        hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
+        self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
+        mylog.debug("Waiting for data.")
+        MPI.Request.Wait(hook)
+        return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        #sl = tuple(reversed(sl))
+        return self._read_data_set(grid,field)[sl]
+
+    def terminate(self):
+        msg = dict(op='end')
+        if self.wg.comm.rank == 0:
+            for rank in self.pool['io'].ranks:
+                mylog.debug("Sending termination message to %s", rank)
+                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
+
+ at contextmanager
+def remote_io(pf, wg, pool):
+    original_io = pf.h.io
+    pf.h.io = IOHandlerRemote(pf, wg, pool)
+    yield
+    pf.h.io.terminate()
+    pf.h.io = original_io
+
+def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+    pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
+    rv = None
+    if wg.name == "work":
+        pf = load(fn)
+        with remote_io(pf, wg, pool):
+            rv = func(pf, *args, **kwargs)
+    elif wg.name == "io":
+        pf = load(fn)
+        io = IOCommunicator(pf, wg, pool)
+        io.wait()
+    # We should broadcast the result
+    rv = pool.comm.mpi_bcast(rv, root=pool['work'].ranks[0])
+    pool.free_all()
+    mylog.debug("Return value: %s", rv)
+    return rv
+
+# Here is an example of how to use this functionality.
+if __name__ == "__main__":
+    def gq(pf):
+        dd = pf.h.all_data()
+        return dd.quantities["TotalQuantity"]("CellMassMsun")
+    q = io_nodes("DD0087/DD0087", 8, 24, gq)
+    mylog.info(q)
+
+


diff -r a374af7dc9d3f5555530d1b73046adc30e3b8732 -r 2c7cce8d50ed4fb4459a03a465aabff97c56c885 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -271,7 +271,7 @@
         self.size = size
         self.ranks = ranks
         self.comm = comm
-	self.name = name
+        self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -294,11 +294,9 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-
-	# Default name to the workgroup number.
+        # Default name to the workgroup number.
         if name is None: 
-	    name = string(len(workgroups))
-	    
+            name = string(len(workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a24cadac71b4/
changeset:   a24cadac71b4
branch:      yt
user:        jsoishi
date:        2012-08-23 20:09:32
summary:     Merged in MatthewTurk/yt (pull request #248)
affected #:  4 files

diff -r 7a7c32a24138a2430741e8c39438856893d0fe3b -r a24cadac71b410cb4b9f356f763249fa3df9a4bb yt/gui/reason/html/app/controller/Notebook.js
--- a/yt/gui/reason/html/app/controller/Notebook.js
+++ b/yt/gui/reason/html/app/controller/Notebook.js
@@ -73,9 +73,11 @@
     },
 
     addRequest: function(request_id, command) {
+        /*console.log("Adding request " + request_id);*/
         this.getRequestsStore().add({
             request_id: request_id, command: command,
         });
+        reason.pending.update([this.getRequestsStore().count()]);
     },
 
     addCell: function(cell) {
@@ -85,6 +87,7 @@
             var ind = this.getRequestsStore().find(
                 'request_id', cell['result_id']);
             if (ind != -1) {
+                /*console.log("Removing request " + cell['result_id']);*/
                 var rec = this.getRequestsStore().removeAt(ind);
             }
             reason.pending.update([this.getRequestsStore().count()]);


diff -r 7a7c32a24138a2430741e8c39438856893d0fe3b -r a24cadac71b410cb4b9f356f763249fa3df9a4bb yt/utilities/parallel_tools/controller_system.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -0,0 +1,69 @@
+"""
+A queueing system based on MPI
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+    
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+from contextmanager import contextlib
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+class WorkSplitter(object):
+    def __init__(self, controller, group1, group2):
+        self.group1 = group1
+        self.group2 = group2
+        self.controller = controller
+
+    @classmethod
+    def setup(cls, ng1, ng2):
+        pp, wg = ProcessorPool.from_sizes(
+            [(1, "controller"), (ng1, "group1"), (ng2, "group2")])
+        groupc = pp['controller']
+        group1 = pp['group1']
+        group2 = pp['group2']
+        obj = cls(groupc, group1, group2)
+        obj.run(wg.name)
+
+    def run(self, name):
+        if name == "controller":
+            self.run_controller()
+        elif name == "group1":
+            self.run_group1()
+        elif name == "group2":
+            self.run_group2()
+        else:
+            raise NotImplementedError
+
+    @abstractmethod
+    def run_controller(self):
+        pass
+
+    @abstractmethod
+    def run_group1(self):
+        pass
+
+    @abstractmethod
+    def run_group2(self):
+        pass


diff -r 7a7c32a24138a2430741e8c39438856893d0fe3b -r a24cadac71b410cb4b9f356f763249fa3df9a4bb yt/utilities/parallel_tools/io_runner.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -0,0 +1,195 @@
+"""
+A simple IO staging mechanism
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+from .parallel_analysis_interface import ProcessorPool
+from yt.utilities.io_handler import BaseIOHandler
+from contextlib import contextmanager
+import time
+
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+
+YT_TAG_MESSAGE = 317 # Cell 317 knows where to go
+
+class IOCommunicator(BaseIOHandler):
+    def __init__(self, pf, wg, pool):
+        mylog.info("Initializing IOCommunicator")
+        self.pf = pf
+        self.wg = wg # We don't need to use this!
+        self.pool = pool
+        self.comm = pool.comm
+        # We read our grids here
+        self.grids = []
+        storage = {}
+        grids = pf.h.grids.tolist()
+        grids.sort(key=lambda a:a.filename)
+        for sto, g in parallel_objects(grids, storage = storage):
+            sto.result = self.comm.rank
+            sto.result_id = g.id
+            self.grids.append(g)
+        self._id_offset = pf.h.grids[0]._id_offset
+        mylog.info("Reading from disk ...")
+        self.initialize_data()
+        mylog.info("Broadcasting ...")
+        self.comm.comm.bcast(storage, root = wg.ranks[0])
+        mylog.info("Done.")
+        self.hooks = []
+
+    def initialize_data(self):
+        pf = self.pf
+        fields = [f for f in pf.h.field_list
+                  if not pf.field_info[f].particle_type]
+        pfields = [f for f in pf.h.field_list
+                   if pf.field_info[f].particle_type]
+        # Preload is only defined for Enzo ...
+        if pf.h.io._data_style == "enzo_packed_3d":
+            self.queue = pf.h.io.queue
+            pf.h.io.preload(self.grids, fields)
+            for g in self.grids:
+                for f in fields:
+                    if f not in self.queue[g.id]:
+                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        self.queue[g.id][f] = d
+                for f in pfields:
+                    self.queue[g.id][f] = self._read(g, f)
+        else:
+            self.queue = {}
+            for g in self.grids:
+                for f in fields + pfields:
+                    self.queue[g.id][f] = pf.h.io._read(g, f)
+
+    def _read(self, g, f):
+        fi = self.pf.field_info[f]
+        if fi.particle_type and g.NumberOfParticles == 0:
+            # because this gets upcast to float
+            return na.array([],dtype='float64')
+        try:
+            temp = self.pf.h.io._read_data_set(g, f)
+        except:# self.pf.hierarchy.io._read_exception as exc:
+            if fi.not_in_all:
+                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+            else:
+                raise
+        return temp
+
+    def wait(self):
+        status = MPI.Status()
+        while 1:
+            if self.comm.comm.Iprobe(MPI.ANY_SOURCE,
+                                YT_TAG_MESSAGE,
+                                status = status):
+                msg = self.comm.comm.recv(
+                        source = status.source, tag = YT_TAG_MESSAGE)
+                if msg['op'] == "end":
+                    mylog.debug("Shutting down IO.")
+                    break
+                self._send_data(msg, status.source)
+                status = MPI.Status()
+            else:
+                time.sleep(1e-2)
+
+    def _send_data(self, msg, dest):
+        grid_id = msg['grid_id']
+        field = msg['field']
+        ts = self.queue[grid_id][field].astype("float64")
+        mylog.debug("Opening send to %s (%s)", dest, ts.shape)
+        self.hooks.append(self.comm.comm.Isend([ts, MPI.DOUBLE], dest = dest))
+
+class IOHandlerRemote(BaseIOHandler):
+    _data_style = "remote"
+
+    def __init__(self, pf, wg, pool):
+        self.pf = pf
+        self.wg = wg # probably won't need
+        self.pool = pool
+        self.comm = pool.comm
+        self.proc_map = self.comm.comm.bcast(None,
+                root = pool['io'].ranks[0])
+        super(IOHandlerRemote, self).__init__()
+
+    def _read_data_set(self, grid, field):
+        dest = self.proc_map[grid.id]
+        msg = dict(grid_id = grid.id, field = field, op="read")
+        mylog.debug("Requesting %s for %s from %s", field, grid, dest)
+        if self.pf.field_info[field].particle_type:
+            data = na.empty(grid.NumberOfParticles, 'float64')
+        else:
+            data = na.empty(grid.ActiveDimensions, 'float64')
+        hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
+        self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
+        mylog.debug("Waiting for data.")
+        MPI.Request.Wait(hook)
+        return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        #sl = tuple(reversed(sl))
+        return self._read_data_set(grid,field)[sl]
+
+    def terminate(self):
+        msg = dict(op='end')
+        if self.wg.comm.rank == 0:
+            for rank in self.pool['io'].ranks:
+                mylog.debug("Sending termination message to %s", rank)
+                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
+
+ at contextmanager
+def remote_io(pf, wg, pool):
+    original_io = pf.h.io
+    pf.h.io = IOHandlerRemote(pf, wg, pool)
+    yield
+    pf.h.io.terminate()
+    pf.h.io = original_io
+
+def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+    pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
+    rv = None
+    if wg.name == "work":
+        pf = load(fn)
+        with remote_io(pf, wg, pool):
+            rv = func(pf, *args, **kwargs)
+    elif wg.name == "io":
+        pf = load(fn)
+        io = IOCommunicator(pf, wg, pool)
+        io.wait()
+    # We should broadcast the result
+    rv = pool.comm.mpi_bcast(rv, root=pool['work'].ranks[0])
+    pool.free_all()
+    mylog.debug("Return value: %s", rv)
+    return rv
+
+# Here is an example of how to use this functionality.
+if __name__ == "__main__":
+    def gq(pf):
+        dd = pf.h.all_data()
+        return dd.quantities["TotalQuantity"]("CellMassMsun")
+    q = io_nodes("DD0087/DD0087", 8, 24, gq)
+    mylog.info(q)
+
+


diff -r 7a7c32a24138a2430741e8c39438856893d0fe3b -r a24cadac71b410cb4b9f356f763249fa3df9a4bb yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -271,7 +271,7 @@
         self.size = size
         self.ranks = ranks
         self.comm = comm
-	self.name = name
+        self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -294,11 +294,9 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-
-	# Default name to the workgroup number.
+        # Default name to the workgroup number.
         if name is None: 
-	    name = string(len(workgroups))
-	    
+            name = string(len(workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c9020cec232a/
changeset:   c9020cec232a
branch:      yt
user:        MatthewTurk
date:        2012-08-24 17:23:59
summary:     Updating docstring for SlicePlot and ProjectionPlot to include keyword 'max'
affected #:  1 file

diff -r 7a7c32a24138a2430741e8c39438856893d0fe3b -r c9020cec232a59635c77f353d93fc7bd59028cfe yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -845,11 +845,12 @@
              or the axis name itself
         fields : string
              The name of the field(s) to be plotted.
-        center : two or three-element vector of sequence floats, 'c', or 'center'
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
              The coordinate of the center of the image.  If left blanck,
              the image centers on the location of the maximum density
              cell.  If set to 'c' or 'center', the plot is centered on
-             the middle of the domain.
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -913,11 +914,12 @@
              or the axis name itself
         fields : string
             The name of the field(s) to be plotted.
-        center : A two or three-element vector of sequence floats, 'c', or 'center'
-            The coordinate of the center of the image.  If left blanck,
-            the image centers on the location of the maximum density
-            cell.  If set to 'c' or 'center', the plot is centered on
-            the middle of the domain.
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+             The coordinate of the center of the image.  If left blanck,
+             the image centers on the location of the maximum density
+             cell.  If set to 'c' or 'center', the plot is centered on
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/467f57bae9aa/
changeset:   467f57bae9aa
branch:      yt
user:        MatthewTurk
date:        2012-08-24 17:24:53
summary:     Merge
affected #:  4 files

diff -r c9020cec232a59635c77f353d93fc7bd59028cfe -r 467f57bae9aa354b03872f7f9b02587ce4d9bad8 yt/gui/reason/html/app/controller/Notebook.js
--- a/yt/gui/reason/html/app/controller/Notebook.js
+++ b/yt/gui/reason/html/app/controller/Notebook.js
@@ -73,9 +73,11 @@
     },
 
     addRequest: function(request_id, command) {
+        /*console.log("Adding request " + request_id);*/
         this.getRequestsStore().add({
             request_id: request_id, command: command,
         });
+        reason.pending.update([this.getRequestsStore().count()]);
     },
 
     addCell: function(cell) {
@@ -85,6 +87,7 @@
             var ind = this.getRequestsStore().find(
                 'request_id', cell['result_id']);
             if (ind != -1) {
+                /*console.log("Removing request " + cell['result_id']);*/
                 var rec = this.getRequestsStore().removeAt(ind);
             }
             reason.pending.update([this.getRequestsStore().count()]);


diff -r c9020cec232a59635c77f353d93fc7bd59028cfe -r 467f57bae9aa354b03872f7f9b02587ce4d9bad8 yt/utilities/parallel_tools/controller_system.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -0,0 +1,69 @@
+"""
+A queueing system based on MPI
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+    
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+from contextmanager import contextlib
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+class WorkSplitter(object):
+    def __init__(self, controller, group1, group2):
+        self.group1 = group1
+        self.group2 = group2
+        self.controller = controller
+
+    @classmethod
+    def setup(cls, ng1, ng2):
+        pp, wg = ProcessorPool.from_sizes(
+            [(1, "controller"), (ng1, "group1"), (ng2, "group2")])
+        groupc = pp['controller']
+        group1 = pp['group1']
+        group2 = pp['group2']
+        obj = cls(groupc, group1, group2)
+        obj.run(wg.name)
+
+    def run(self, name):
+        if name == "controller":
+            self.run_controller()
+        elif name == "group1":
+            self.run_group1()
+        elif name == "group2":
+            self.run_group2()
+        else:
+            raise NotImplementedError
+
+    @abstractmethod
+    def run_controller(self):
+        pass
+
+    @abstractmethod
+    def run_group1(self):
+        pass
+
+    @abstractmethod
+    def run_group2(self):
+        pass


diff -r c9020cec232a59635c77f353d93fc7bd59028cfe -r 467f57bae9aa354b03872f7f9b02587ce4d9bad8 yt/utilities/parallel_tools/io_runner.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -0,0 +1,195 @@
+"""
+A simple IO staging mechanism
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+from .parallel_analysis_interface import ProcessorPool
+from yt.utilities.io_handler import BaseIOHandler
+from contextlib import contextmanager
+import time
+
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+
+YT_TAG_MESSAGE = 317 # Cell 317 knows where to go
+
+class IOCommunicator(BaseIOHandler):
+    def __init__(self, pf, wg, pool):
+        mylog.info("Initializing IOCommunicator")
+        self.pf = pf
+        self.wg = wg # We don't need to use this!
+        self.pool = pool
+        self.comm = pool.comm
+        # We read our grids here
+        self.grids = []
+        storage = {}
+        grids = pf.h.grids.tolist()
+        grids.sort(key=lambda a:a.filename)
+        for sto, g in parallel_objects(grids, storage = storage):
+            sto.result = self.comm.rank
+            sto.result_id = g.id
+            self.grids.append(g)
+        self._id_offset = pf.h.grids[0]._id_offset
+        mylog.info("Reading from disk ...")
+        self.initialize_data()
+        mylog.info("Broadcasting ...")
+        self.comm.comm.bcast(storage, root = wg.ranks[0])
+        mylog.info("Done.")
+        self.hooks = []
+
+    def initialize_data(self):
+        pf = self.pf
+        fields = [f for f in pf.h.field_list
+                  if not pf.field_info[f].particle_type]
+        pfields = [f for f in pf.h.field_list
+                   if pf.field_info[f].particle_type]
+        # Preload is only defined for Enzo ...
+        if pf.h.io._data_style == "enzo_packed_3d":
+            self.queue = pf.h.io.queue
+            pf.h.io.preload(self.grids, fields)
+            for g in self.grids:
+                for f in fields:
+                    if f not in self.queue[g.id]:
+                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        self.queue[g.id][f] = d
+                for f in pfields:
+                    self.queue[g.id][f] = self._read(g, f)
+        else:
+            self.queue = {}
+            for g in self.grids:
+                for f in fields + pfields:
+                    self.queue[g.id][f] = pf.h.io._read(g, f)
+
+    def _read(self, g, f):
+        fi = self.pf.field_info[f]
+        if fi.particle_type and g.NumberOfParticles == 0:
+            # because this gets upcast to float
+            return na.array([],dtype='float64')
+        try:
+            temp = self.pf.h.io._read_data_set(g, f)
+        except:# self.pf.hierarchy.io._read_exception as exc:
+            if fi.not_in_all:
+                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+            else:
+                raise
+        return temp
+
+    def wait(self):
+        status = MPI.Status()
+        while 1:
+            if self.comm.comm.Iprobe(MPI.ANY_SOURCE,
+                                YT_TAG_MESSAGE,
+                                status = status):
+                msg = self.comm.comm.recv(
+                        source = status.source, tag = YT_TAG_MESSAGE)
+                if msg['op'] == "end":
+                    mylog.debug("Shutting down IO.")
+                    break
+                self._send_data(msg, status.source)
+                status = MPI.Status()
+            else:
+                time.sleep(1e-2)
+
+    def _send_data(self, msg, dest):
+        grid_id = msg['grid_id']
+        field = msg['field']
+        ts = self.queue[grid_id][field].astype("float64")
+        mylog.debug("Opening send to %s (%s)", dest, ts.shape)
+        self.hooks.append(self.comm.comm.Isend([ts, MPI.DOUBLE], dest = dest))
+
+class IOHandlerRemote(BaseIOHandler):
+    _data_style = "remote"
+
+    def __init__(self, pf, wg, pool):
+        self.pf = pf
+        self.wg = wg # probably won't need
+        self.pool = pool
+        self.comm = pool.comm
+        self.proc_map = self.comm.comm.bcast(None,
+                root = pool['io'].ranks[0])
+        super(IOHandlerRemote, self).__init__()
+
+    def _read_data_set(self, grid, field):
+        dest = self.proc_map[grid.id]
+        msg = dict(grid_id = grid.id, field = field, op="read")
+        mylog.debug("Requesting %s for %s from %s", field, grid, dest)
+        if self.pf.field_info[field].particle_type:
+            data = na.empty(grid.NumberOfParticles, 'float64')
+        else:
+            data = na.empty(grid.ActiveDimensions, 'float64')
+        hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
+        self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
+        mylog.debug("Waiting for data.")
+        MPI.Request.Wait(hook)
+        return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        #sl = tuple(reversed(sl))
+        return self._read_data_set(grid,field)[sl]
+
+    def terminate(self):
+        msg = dict(op='end')
+        if self.wg.comm.rank == 0:
+            for rank in self.pool['io'].ranks:
+                mylog.debug("Sending termination message to %s", rank)
+                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
+
+ at contextmanager
+def remote_io(pf, wg, pool):
+    original_io = pf.h.io
+    pf.h.io = IOHandlerRemote(pf, wg, pool)
+    yield
+    pf.h.io.terminate()
+    pf.h.io = original_io
+
+def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+    pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
+    rv = None
+    if wg.name == "work":
+        pf = load(fn)
+        with remote_io(pf, wg, pool):
+            rv = func(pf, *args, **kwargs)
+    elif wg.name == "io":
+        pf = load(fn)
+        io = IOCommunicator(pf, wg, pool)
+        io.wait()
+    # We should broadcast the result
+    rv = pool.comm.mpi_bcast(rv, root=pool['work'].ranks[0])
+    pool.free_all()
+    mylog.debug("Return value: %s", rv)
+    return rv
+
+# Here is an example of how to use this functionality.
+if __name__ == "__main__":
+    def gq(pf):
+        dd = pf.h.all_data()
+        return dd.quantities["TotalQuantity"]("CellMassMsun")
+    q = io_nodes("DD0087/DD0087", 8, 24, gq)
+    mylog.info(q)
+
+


diff -r c9020cec232a59635c77f353d93fc7bd59028cfe -r 467f57bae9aa354b03872f7f9b02587ce4d9bad8 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -271,7 +271,7 @@
         self.size = size
         self.ranks = ranks
         self.comm = comm
-	self.name = name
+        self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -294,11 +294,9 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-
-	# Default name to the workgroup number.
+        # Default name to the workgroup number.
         if name is None: 
-	    name = string(len(workgroups))
-	    
+            name = string(len(workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/70fe4d11ef4f/
changeset:   70fe4d11ef4f
branch:      yt
user:        MatthewTurk
date:        2012-08-28 15:13:18
summary:     Fixing issue when display_name is None
affected #:  1 file

diff -r 467f57bae9aa354b03872f7f9b02587ce4d9bad8 -r 70fe4d11ef4f36c9fc98ed2018017a1b531c000d yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -706,7 +706,7 @@
             self.plots[f].axes.set_ylabel(labels[1])
 
             field_name = self.data_source.pf.field_info[f].display_name
-
+            if field_name is None: field_name = f
             if md['units'] == None or md['units'] == '':
                 label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
             else:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0c59e6cdee28/
changeset:   0c59e6cdee28
branch:      yt
user:        MatthewTurk
date:        2012-08-30 19:15:09
summary:     Removing unused .sha512 file and adding alias between Total_Energy and
TotalEnergy.
affected #:  2 files

diff -r 70fe4d11ef4f36c9fc98ed2018017a1b531c000d -r 0c59e6cdee2852299047ff56e2a69f7e4515a279 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -399,7 +399,6 @@
 # Now we dump all our SHA512 files out.
 
 echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
-echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512


diff -r 70fe4d11ef4f36c9fc98ed2018017a1b531c000d -r 0c59e6cdee2852299047ff56e2a69f7e4515a279 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -183,6 +183,12 @@
           display_name = "\rm{Total}\/\rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
+def _TotalEnergy(field, data):
+    return data["Total_Energy"] / _convertEnergy(data)
+add_field("TotalEnergy", function=_TotalEnergy,
+          display_name = "\rm{Total}\/\rm{Energy}",
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+
 def _NumberDensity(field, data):
     # We can assume that we at least have Density
     # We should actually be guaranteeing the presence of a .shape attribute,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9763846e979a/
changeset:   9763846e979a
branch:      yt
user:        caseywstark
date:        2012-08-28 23:55:17
summary:     Draft version of the GDF writer. I tested to make sure it works with tests/DD0010
affected #:  1 file

diff -r 467f57bae9aa354b03872f7f9b02587ce4d9bad8 -r 9763846e979adb2d8df11e82a0dba81443e3924c yt/utilities/grid_data_format/writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/writer.py
@@ -0,0 +1,169 @@
+"""
+Writing yt data to a GDF file.
+
+Authors: Casey W. Stark <caseywstark at gmail.com>
+Affiliation: UC Berkeley
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Casey W. Stark.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+import os
+
+import h5py
+import numpy as np
+
+from yt import __version__ as yt_version
+
+
+def write_to_gdf(pf, gdf_path, data_author=None, data_comment=None,
+                 particle_type_name="dark_matter"):
+    """
+    Write a parameter file to the given path in the Grid Data Format.
+
+    Parameters
+    ----------
+    pf : StaticOutput object
+        The yt data to write out.
+    gdf_path : string
+        The path of the file to output.
+
+    """
+    # Make sure we have the absolute path to the file first
+    gdf_path = os.path.abspath(gdf_path)
+
+    # Stupid check -- is the file already there?
+    # @todo: make this a specific exception/error.
+    if os.path.exists(gdf_path):
+        raise IOError("A file already exists in the location: %s. Please provide a new one or remove that file." % gdf_path)
+
+    ###
+    # Create and open the file with h5py
+    ###
+    f = h5py.File(gdf_path, "w")
+
+    ###
+    # "gridded_data_format" group
+    ###
+    g = f.create_group("gridded_data_format")
+    g.attrs["data_software"] = "yt"
+    g.attrs["data_software_version"] = yt_version
+    if data_author is not None:
+        g.attrs["data_author"] = data_author
+    if data_comment is not None:
+        g.attrs["data_comment"] = data_comment
+
+    ###
+    # "simulation_parameters" group
+    ###
+    g = f.create_group("simulation_parameters")
+    g.attrs["refine_by"] = pf.refine_by
+    g.attrs["dimensionality"] = pf.dimensionality
+    g.attrs["domain_dimensions"] = pf.domain_dimensions
+    g.attrs["current_time"] = pf.current_time
+    g.attrs["domain_left_edge"] = pf.domain_left_edge
+    g.attrs["domain_right_edge"] = pf.domain_right_edge
+    g.attrs["unique_identifier"] = pf.unique_identifier
+    g.attrs["cosmological_simulation"] = pf.cosmological_simulation
+    # @todo: Where is this in the yt API?
+    #g.attrs["num_ghost_zones"] = pf...
+    # @todo: Where is this in the yt API?
+    #g.attrs["field_ordering"] = pf...
+    # @todo: not yet supported by yt.
+    #g.attrs["boundary_conditions"] = pf...
+
+    if pf.cosmological_simulation:
+        g.attrs["current_redshift"] = pf.current_redshift
+        g.attrs["omega_matter"] = pf.omega_matter
+        g.attrs["omega_lambda"] = pf.omega_lambda
+        g.attrs["hubble_constant"] = pf.hubble_constant
+
+    ###
+    # "field_types" group
+    ###
+    g = f.create_group("field_types")
+
+    # Which field list should we iterate over?
+    for field_name in pf.h.field_list:
+        # create the subgroup with the field's name
+        sg = g.create_group(field_name)
+
+        # grab the display name and units from the field info container.
+        display_name = pf.field_info[field_name].display_name
+        units = pf.field_info[field_name].get_units()
+
+        # check that they actually contain something...
+        if display_name:
+            sg.attrs["field_name"] = display_name
+        else:
+            sg.attrs["field_name"] = field_name
+        if units:
+            sg.attrs["field_units"] = units
+        else:
+            sg.attrs["field_units"] = "None"
+        # @todo: the values must be in CGS already right?
+        sg.attrs["field_to_cgs"] = 1.0
+        # @todo: is this always true?
+        sg.attrs["staggering"] = 0
+
+    ###
+    # "particle_types" group
+    ###
+    g = f.create_group("particle_types")
+
+    # @todo: Particle type iterator
+    sg = g.create_group(particle_type_name)
+    sg["particle_type_name"] = particle_type_name
+
+    ###
+    # root datasets -- info about the grids
+    ###
+    f["grid_dimensions"] = pf.h.grid_dimensions
+    f["grid_left_index"] = pf.h.grid_left_edge
+    f["grid_level"] = pf.h.grid_levels
+    # @todo: Do we need to loop over the grids for this?
+    f["grid_parent_id"] = -1
+    f["grid_particle_count"] = pf.h.grid_particle_count
+
+    ###
+    # "data" group -- where we should spend the most time
+    ###
+    g = f.create_group("data")
+
+    for grid in pf.h.grids:
+        # add group for this grid
+
+        grid_group = g.create_group("grid_%010i" % grid.id)
+        # add group for the particles on this grid
+        particles_group = grid_group.create_group("particles")
+        pt_group = particles_group.create_group(particle_type_name)
+
+        # add the field data to the grid group
+        for field_name in pf.h.field_list:
+            # Check if this is a real field or particle data.
+            field_obj = pf.field_info[field_name]
+
+            if field_obj.particle_type:  # particle data
+                pt_group[field_name] = grid.get_data(field_name)
+            else:  # a field
+                grid_group[field_name] = grid.get_data(field_name)
+
+    # don't forget to close the file.
+    f.close()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5641531f7c9d/
changeset:   5641531f7c9d
branch:      yt
user:        caseywstark
date:        2012-08-28 23:56:53
summary:     Added __version__ string to the yt package-level __init__.py
affected #:  1 file

diff -r 9763846e979adb2d8df11e82a0dba81443e3924c -r 5641531f7c9d3b024a0fbe1a21f1472f8f1d4691 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,5 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+__version__ = "2.5-dev"



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a7570ca33e16/
changeset:   a7570ca33e16
branch:      yt
user:        MatthewTurk
date:        2012-08-29 14:52:12
summary:     Merged in caseywstark/yt-gdf-writer (pull request #252)
affected #:  2 files

diff -r 70fe4d11ef4f36c9fc98ed2018017a1b531c000d -r a7570ca33e168bbf7b9a9f4f399582b38632cc0a yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,5 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+__version__ = "2.5-dev"


diff -r 70fe4d11ef4f36c9fc98ed2018017a1b531c000d -r a7570ca33e168bbf7b9a9f4f399582b38632cc0a yt/utilities/grid_data_format/writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/writer.py
@@ -0,0 +1,169 @@
+"""
+Writing yt data to a GDF file.
+
+Authors: Casey W. Stark <caseywstark at gmail.com>
+Affiliation: UC Berkeley
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Casey W. Stark.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+import os
+
+import h5py
+import numpy as np
+
+from yt import __version__ as yt_version
+
+
+def write_to_gdf(pf, gdf_path, data_author=None, data_comment=None,
+                 particle_type_name="dark_matter"):
+    """
+    Write a parameter file to the given path in the Grid Data Format.
+
+    Parameters
+    ----------
+    pf : StaticOutput object
+        The yt data to write out.
+    gdf_path : string
+        The path of the file to output.
+
+    """
+    # Make sure we have the absolute path to the file first
+    gdf_path = os.path.abspath(gdf_path)
+
+    # Stupid check -- is the file already there?
+    # @todo: make this a specific exception/error.
+    if os.path.exists(gdf_path):
+        raise IOError("A file already exists in the location: %s. Please provide a new one or remove that file." % gdf_path)
+
+    ###
+    # Create and open the file with h5py
+    ###
+    f = h5py.File(gdf_path, "w")
+
+    ###
+    # "gridded_data_format" group
+    ###
+    g = f.create_group("gridded_data_format")
+    g.attrs["data_software"] = "yt"
+    g.attrs["data_software_version"] = yt_version
+    if data_author is not None:
+        g.attrs["data_author"] = data_author
+    if data_comment is not None:
+        g.attrs["data_comment"] = data_comment
+
+    ###
+    # "simulation_parameters" group
+    ###
+    g = f.create_group("simulation_parameters")
+    g.attrs["refine_by"] = pf.refine_by
+    g.attrs["dimensionality"] = pf.dimensionality
+    g.attrs["domain_dimensions"] = pf.domain_dimensions
+    g.attrs["current_time"] = pf.current_time
+    g.attrs["domain_left_edge"] = pf.domain_left_edge
+    g.attrs["domain_right_edge"] = pf.domain_right_edge
+    g.attrs["unique_identifier"] = pf.unique_identifier
+    g.attrs["cosmological_simulation"] = pf.cosmological_simulation
+    # @todo: Where is this in the yt API?
+    #g.attrs["num_ghost_zones"] = pf...
+    # @todo: Where is this in the yt API?
+    #g.attrs["field_ordering"] = pf...
+    # @todo: not yet supported by yt.
+    #g.attrs["boundary_conditions"] = pf...
+
+    if pf.cosmological_simulation:
+        g.attrs["current_redshift"] = pf.current_redshift
+        g.attrs["omega_matter"] = pf.omega_matter
+        g.attrs["omega_lambda"] = pf.omega_lambda
+        g.attrs["hubble_constant"] = pf.hubble_constant
+
+    ###
+    # "field_types" group
+    ###
+    g = f.create_group("field_types")
+
+    # Which field list should we iterate over?
+    for field_name in pf.h.field_list:
+        # create the subgroup with the field's name
+        sg = g.create_group(field_name)
+
+        # grab the display name and units from the field info container.
+        display_name = pf.field_info[field_name].display_name
+        units = pf.field_info[field_name].get_units()
+
+        # check that they actually contain something...
+        if display_name:
+            sg.attrs["field_name"] = display_name
+        else:
+            sg.attrs["field_name"] = field_name
+        if units:
+            sg.attrs["field_units"] = units
+        else:
+            sg.attrs["field_units"] = "None"
+        # @todo: the values must be in CGS already right?
+        sg.attrs["field_to_cgs"] = 1.0
+        # @todo: is this always true?
+        sg.attrs["staggering"] = 0
+
+    ###
+    # "particle_types" group
+    ###
+    g = f.create_group("particle_types")
+
+    # @todo: Particle type iterator
+    sg = g.create_group(particle_type_name)
+    sg["particle_type_name"] = particle_type_name
+
+    ###
+    # root datasets -- info about the grids
+    ###
+    f["grid_dimensions"] = pf.h.grid_dimensions
+    f["grid_left_index"] = pf.h.grid_left_edge
+    f["grid_level"] = pf.h.grid_levels
+    # @todo: Do we need to loop over the grids for this?
+    f["grid_parent_id"] = -1
+    f["grid_particle_count"] = pf.h.grid_particle_count
+
+    ###
+    # "data" group -- where we should spend the most time
+    ###
+    g = f.create_group("data")
+
+    for grid in pf.h.grids:
+        # add group for this grid
+
+        grid_group = g.create_group("grid_%010i" % grid.id)
+        # add group for the particles on this grid
+        particles_group = grid_group.create_group("particles")
+        pt_group = particles_group.create_group(particle_type_name)
+
+        # add the field data to the grid group
+        for field_name in pf.h.field_list:
+            # Check if this is a real field or particle data.
+            field_obj = pf.field_info[field_name]
+
+            if field_obj.particle_type:  # particle data
+                pt_group[field_name] = grid.get_data(field_name)
+            else:  # a field
+                grid_group[field_name] = grid.get_data(field_name)
+
+    # don't forget to close the file.
+    f.close()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9c119e2bbb44/
changeset:   9c119e2bbb44
branch:      yt
user:        MatthewTurk
date:        2012-08-30 19:15:18
summary:     Merging
affected #:  2 files

diff -r 0c59e6cdee2852299047ff56e2a69f7e4515a279 -r 9c119e2bbb44180dfe7701c974dcb5f3b66d297e yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,5 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+__version__ = "2.5-dev"


diff -r 0c59e6cdee2852299047ff56e2a69f7e4515a279 -r 9c119e2bbb44180dfe7701c974dcb5f3b66d297e yt/utilities/grid_data_format/writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/writer.py
@@ -0,0 +1,169 @@
+"""
+Writing yt data to a GDF file.
+
+Authors: Casey W. Stark <caseywstark at gmail.com>
+Affiliation: UC Berkeley
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Casey W. Stark.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+import os
+
+import h5py
+import numpy as np
+
+from yt import __version__ as yt_version
+
+
+def write_to_gdf(pf, gdf_path, data_author=None, data_comment=None,
+                 particle_type_name="dark_matter"):
+    """
+    Write a parameter file to the given path in the Grid Data Format.
+
+    Parameters
+    ----------
+    pf : StaticOutput object
+        The yt data to write out.
+    gdf_path : string
+        The path of the file to output.
+
+    """
+    # Make sure we have the absolute path to the file first
+    gdf_path = os.path.abspath(gdf_path)
+
+    # Stupid check -- is the file already there?
+    # @todo: make this a specific exception/error.
+    if os.path.exists(gdf_path):
+        raise IOError("A file already exists in the location: %s. Please provide a new one or remove that file." % gdf_path)
+
+    ###
+    # Create and open the file with h5py
+    ###
+    f = h5py.File(gdf_path, "w")
+
+    ###
+    # "gridded_data_format" group
+    ###
+    g = f.create_group("gridded_data_format")
+    g.attrs["data_software"] = "yt"
+    g.attrs["data_software_version"] = yt_version
+    if data_author is not None:
+        g.attrs["data_author"] = data_author
+    if data_comment is not None:
+        g.attrs["data_comment"] = data_comment
+
+    ###
+    # "simulation_parameters" group
+    ###
+    g = f.create_group("simulation_parameters")
+    g.attrs["refine_by"] = pf.refine_by
+    g.attrs["dimensionality"] = pf.dimensionality
+    g.attrs["domain_dimensions"] = pf.domain_dimensions
+    g.attrs["current_time"] = pf.current_time
+    g.attrs["domain_left_edge"] = pf.domain_left_edge
+    g.attrs["domain_right_edge"] = pf.domain_right_edge
+    g.attrs["unique_identifier"] = pf.unique_identifier
+    g.attrs["cosmological_simulation"] = pf.cosmological_simulation
+    # @todo: Where is this in the yt API?
+    #g.attrs["num_ghost_zones"] = pf...
+    # @todo: Where is this in the yt API?
+    #g.attrs["field_ordering"] = pf...
+    # @todo: not yet supported by yt.
+    #g.attrs["boundary_conditions"] = pf...
+
+    if pf.cosmological_simulation:
+        g.attrs["current_redshift"] = pf.current_redshift
+        g.attrs["omega_matter"] = pf.omega_matter
+        g.attrs["omega_lambda"] = pf.omega_lambda
+        g.attrs["hubble_constant"] = pf.hubble_constant
+
+    ###
+    # "field_types" group
+    ###
+    g = f.create_group("field_types")
+
+    # Which field list should we iterate over?
+    for field_name in pf.h.field_list:
+        # create the subgroup with the field's name
+        sg = g.create_group(field_name)
+
+        # grab the display name and units from the field info container.
+        display_name = pf.field_info[field_name].display_name
+        units = pf.field_info[field_name].get_units()
+
+        # check that they actually contain something...
+        if display_name:
+            sg.attrs["field_name"] = display_name
+        else:
+            sg.attrs["field_name"] = field_name
+        if units:
+            sg.attrs["field_units"] = units
+        else:
+            sg.attrs["field_units"] = "None"
+        # @todo: the values must be in CGS already right?
+        sg.attrs["field_to_cgs"] = 1.0
+        # @todo: is this always true?
+        sg.attrs["staggering"] = 0
+
+    ###
+    # "particle_types" group
+    ###
+    g = f.create_group("particle_types")
+
+    # @todo: Particle type iterator
+    sg = g.create_group(particle_type_name)
+    sg["particle_type_name"] = particle_type_name
+
+    ###
+    # root datasets -- info about the grids
+    ###
+    f["grid_dimensions"] = pf.h.grid_dimensions
+    f["grid_left_index"] = pf.h.grid_left_edge
+    f["grid_level"] = pf.h.grid_levels
+    # @todo: Do we need to loop over the grids for this?
+    f["grid_parent_id"] = -1
+    f["grid_particle_count"] = pf.h.grid_particle_count
+
+    ###
+    # "data" group -- where we should spend the most time
+    ###
+    g = f.create_group("data")
+
+    for grid in pf.h.grids:
+        # add group for this grid
+
+        grid_group = g.create_group("grid_%010i" % grid.id)
+        # add group for the particles on this grid
+        particles_group = grid_group.create_group("particles")
+        pt_group = particles_group.create_group(particle_type_name)
+
+        # add the field data to the grid group
+        for field_name in pf.h.field_list:
+            # Check if this is a real field or particle data.
+            field_obj = pf.field_info[field_name]
+
+            if field_obj.particle_type:  # particle data
+                pt_group[field_name] = grid.get_data(field_name)
+            else:  # a field
+                grid_group[field_name] = grid.get_data(field_name)
+
+    # don't forget to close the file.
+    f.close()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/edc2b99212bd/
changeset:   edc2b99212bd
branch:      yt
user:        mqk
date:        2012-08-09 21:56:19
summary:     Introduced universal fields for spherical coordinates (sph_r, sph_theta, sph_phi) and for cylindrical coordinates (cyl_R, cyl_z, cyl_theta). These rely on field_parameters 'normal' and 'center'. The latter is now set in AMRCylinderBase instead of 'height_vector'. Also the old fields DiskAngle and Height are the same as sph_theta and cyl_z, respectively, so I've redefined those to use the new fields.
affected #:  3 files

diff -r b6c9c207677cbbd6e1c36d51189634797e002c6d -r edc2b99212bd100a4357083f3f365d0bf17a57c7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3074,7 +3074,7 @@
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
-        self.set_field_parameter("height_vector", self._norm_vec)
+        self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
         self._d = -1.0 * na.dot(self._norm_vec, self.center)


diff -r b6c9c207677cbbd6e1c36d51189634797e002c6d -r edc2b99212bd100a4357083f3f365d0bf17a57c7 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -220,7 +220,7 @@
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity', 'center', 'height_vector']:
+        if param in ['bulk_velocity', 'center', 'normal']:
             return na.random.random(3) * 1e-2
         else:
             return 0.0


diff -r b6c9c207677cbbd6e1c36d51189634797e002c6d -r edc2b99212bd100a4357083f3f365d0bf17a57c7 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -217,50 +217,181 @@
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
+
+
+### spherical coordinates: r (radius)
+def _sph_r(field, data):
+    center = data.get_field_parameter("center")
+      
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The spherical coordinates radius is simply the magnitude of the
+    ## coords vector.
+
+    return na.sqrt(na.sum(coords**2,axis=-1))
+
+def _Convert_sph_r_CGS(data):
+   return data.convert("cm")
+
+add_field("sph_r", function=_sph_r,
+         validators=[ValidateParameter("center")],
+         convert_function = _Convert_sph_r_CGS, units=r"\rm{cm}")
+
+
+### spherical coordinates: theta (angle with respect to normal)
+def _sph_theta(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The angle (theta) with respect to the normal (J), is the arccos
+    ## of the dot product of the normal with the normalized coords
+    ## vector.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    JdotCoords = na.sum(J*coords,axis=-1)
+    
+    return na.arccos( JdotCoords / na.sqrt(na.sum(coords**2,axis=-1)) )
+
+add_field("sph_theta", function=_sph_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### spherical coordinates: phi (angle in the plane perpendicular to the normal)
+def _sph_phi(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+    
+    ## We have freedom with respect to what axis (xprime) to define
+    ## the disk angle. Here I've chosen to use the axis that is
+    ## perpendicular to the normal and the y-axis. When normal ==
+    ## y-hat, then set xprime = z-hat. With this definition, when
+    ## normal == z-hat (as is typical), then xprime == x-hat.
+    ##
+    ## The angle is then given by the arctan of the ratio of the
+    ## yprime-component and the xprime-component of the coords vector.
+
+    xprime = na.cross([0.0,1.0,0.0],normal)
+    if na.sum(xprime) == 0: xprime = na.array([0.0, 0.0, 1.0])
+    yprime = na.cross(normal,xprime)
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    Jx = na.tile(xprime,tile_shape)
+    Jy = na.tile(yprime,tile_shape)
+    
+    Px = na.sum(Jx*coords,axis=-1)
+    Py = na.sum(Jy*coords,axis=-1)
+    
+    return na.arctan2(Py,Px)
+
+add_field("sph_phi", function=_sph_phi,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+
+### cylindrical coordinates: R (radius in the cylinder's plane)
+def _cyl_R(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+      
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The cross product of the normal (J) with the coords vector
+    ## gives a vector of magnitude equal to the cylindrical radius.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    JcrossCoords = na.cross(J,coords)
+    return na.sqrt(na.sum(JcrossCoords**2,axis=-1))
+
+def _Convert_cyl_R_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_R", function=_cyl_R,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: z (height above the cylinder's plane)
+def _cyl_z(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The dot product of the normal (J) with the coords vector gives
+    ## the cylindrical height.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    return na.sum(J*coords,axis=-1)  
+
+def _Convert_cyl_z_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_z", function=_cyl_z,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_z_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: theta (angle in the cylinder's plane)
+### [This is identical to the spherical coordinate's 'phi' angle.]
+def _cyl_theta(field, data):
+    return data['sph_phi']
+
+add_field("cyl_theta", function=_cyl_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### The old field DiskAngle is the same as the spherical coordinates'
+### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
+def _DiskAngle(field, data):
+    return data['sph_theta']
+
+add_field("DiskAngle", function=_DiskAngle,
+          take_log=False,
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
+          display_field=False)
+
+
+### The old field Height is the same as the cylindrical coordinates' z
+### field. I'm keeping Height for backwards compatibility.
 def _Height(field, data):
-    # We take the dot product of the radius vector with the height-vector
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    h_vec = h_vec / na.sqrt(h_vec[0]**2.0+
-                            h_vec[1]**2.0+
-                            h_vec[2]**2.0)
-    height = r_vec[0,:] * h_vec[0] \
-           + r_vec[1,:] * h_vec[1] \
-           + r_vec[2,:] * h_vec[2]
-    return na.abs(height)
+    return data['cyl_z']
+
 def _convertHeight(data):
     return data.convert("cm")
 def _convertHeightAU(data):
     return data.convert("au")
 add_field("Height", function=_Height,
           convert_function=_convertHeight,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"cm", display_field=False)
 add_field("HeightAU", function=_Height,
           convert_function=_convertHeightAU,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
-def _DiskAngle(field, data):
-    # We make both r_vec and h_vec into unit vectors
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    r_vec = r_vec/na.sqrt((r_vec**2.0).sum(axis=0))
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    dp = r_vec[0,:] * h_vec[0] \
-       + r_vec[1,:] * h_vec[1] \
-       + r_vec[2,:] * h_vec[2]
-    return na.arccos(dp)
-add_field("DiskAngle", function=_DiskAngle,
-          take_log=False,
-          validators=[ValidateParameter("height_vector"),
-                      ValidateParameter("center")],
-          display_field=False)
 
 def _DynamicalTime(field, data):
     """



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d55d789c548f/
changeset:   d55d789c548f
branch:      yt
user:        MatthewTurk
date:        2012-08-31 13:08:38
summary:     Merged in mqk/yt_clean (pull request #240)
affected #:  3 files

diff -r 9c119e2bbb44180dfe7701c974dcb5f3b66d297e -r d55d789c548f33b563121fe511cd689fc36f8e9a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3077,7 +3077,7 @@
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
-        self.set_field_parameter("height_vector", self._norm_vec)
+        self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
         self._d = -1.0 * na.dot(self._norm_vec, self.center)


diff -r 9c119e2bbb44180dfe7701c974dcb5f3b66d297e -r d55d789c548f33b563121fe511cd689fc36f8e9a yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -220,7 +220,7 @@
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity', 'center', 'height_vector']:
+        if param in ['bulk_velocity', 'center', 'normal']:
             return na.random.random(3) * 1e-2
         else:
             return 0.0


diff -r 9c119e2bbb44180dfe7701c974dcb5f3b66d297e -r d55d789c548f33b563121fe511cd689fc36f8e9a yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -217,50 +217,181 @@
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
+
+
+### spherical coordinates: r (radius)
+def _sph_r(field, data):
+    center = data.get_field_parameter("center")
+      
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The spherical coordinates radius is simply the magnitude of the
+    ## coords vector.
+
+    return na.sqrt(na.sum(coords**2,axis=-1))
+
+def _Convert_sph_r_CGS(data):
+   return data.convert("cm")
+
+add_field("sph_r", function=_sph_r,
+         validators=[ValidateParameter("center")],
+         convert_function = _Convert_sph_r_CGS, units=r"\rm{cm}")
+
+
+### spherical coordinates: theta (angle with respect to normal)
+def _sph_theta(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The angle (theta) with respect to the normal (J), is the arccos
+    ## of the dot product of the normal with the normalized coords
+    ## vector.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    JdotCoords = na.sum(J*coords,axis=-1)
+    
+    return na.arccos( JdotCoords / na.sqrt(na.sum(coords**2,axis=-1)) )
+
+add_field("sph_theta", function=_sph_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### spherical coordinates: phi (angle in the plane perpendicular to the normal)
+def _sph_phi(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+    
+    ## We have freedom with respect to what axis (xprime) to define
+    ## the disk angle. Here I've chosen to use the axis that is
+    ## perpendicular to the normal and the y-axis. When normal ==
+    ## y-hat, then set xprime = z-hat. With this definition, when
+    ## normal == z-hat (as is typical), then xprime == x-hat.
+    ##
+    ## The angle is then given by the arctan of the ratio of the
+    ## yprime-component and the xprime-component of the coords vector.
+
+    xprime = na.cross([0.0,1.0,0.0],normal)
+    if na.sum(xprime) == 0: xprime = na.array([0.0, 0.0, 1.0])
+    yprime = na.cross(normal,xprime)
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    Jx = na.tile(xprime,tile_shape)
+    Jy = na.tile(yprime,tile_shape)
+    
+    Px = na.sum(Jx*coords,axis=-1)
+    Py = na.sum(Jy*coords,axis=-1)
+    
+    return na.arctan2(Py,Px)
+
+add_field("sph_phi", function=_sph_phi,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+
+### cylindrical coordinates: R (radius in the cylinder's plane)
+def _cyl_R(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+      
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The cross product of the normal (J) with the coords vector
+    ## gives a vector of magnitude equal to the cylindrical radius.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    JcrossCoords = na.cross(J,coords)
+    return na.sqrt(na.sum(JcrossCoords**2,axis=-1))
+
+def _Convert_cyl_R_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_R", function=_cyl_R,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: z (height above the cylinder's plane)
+def _cyl_z(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The dot product of the normal (J) with the coords vector gives
+    ## the cylindrical height.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    return na.sum(J*coords,axis=-1)  
+
+def _Convert_cyl_z_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_z", function=_cyl_z,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_z_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: theta (angle in the cylinder's plane)
+### [This is identical to the spherical coordinate's 'phi' angle.]
+def _cyl_theta(field, data):
+    return data['sph_phi']
+
+add_field("cyl_theta", function=_cyl_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### The old field DiskAngle is the same as the spherical coordinates'
+### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
+def _DiskAngle(field, data):
+    return data['sph_theta']
+
+add_field("DiskAngle", function=_DiskAngle,
+          take_log=False,
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
+          display_field=False)
+
+
+### The old field Height is the same as the cylindrical coordinates' z
+### field. I'm keeping Height for backwards compatibility.
 def _Height(field, data):
-    # We take the dot product of the radius vector with the height-vector
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    h_vec = h_vec / na.sqrt(h_vec[0]**2.0+
-                            h_vec[1]**2.0+
-                            h_vec[2]**2.0)
-    height = r_vec[0,:] * h_vec[0] \
-           + r_vec[1,:] * h_vec[1] \
-           + r_vec[2,:] * h_vec[2]
-    return na.abs(height)
+    return data['cyl_z']
+
 def _convertHeight(data):
     return data.convert("cm")
 def _convertHeightAU(data):
     return data.convert("au")
 add_field("Height", function=_Height,
           convert_function=_convertHeight,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"cm", display_field=False)
 add_field("HeightAU", function=_Height,
           convert_function=_convertHeightAU,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
-def _DiskAngle(field, data):
-    # We make both r_vec and h_vec into unit vectors
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    r_vec = r_vec/na.sqrt((r_vec**2.0).sum(axis=0))
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    dp = r_vec[0,:] * h_vec[0] \
-       + r_vec[1,:] * h_vec[1] \
-       + r_vec[2,:] * h_vec[2]
-    return na.arccos(dp)
-add_field("DiskAngle", function=_DiskAngle,
-          take_log=False,
-          validators=[ValidateParameter("height_vector"),
-                      ValidateParameter("center")],
-          display_field=False)
 
 def _DynamicalTime(field, data):
     """



https://bitbucket.org/yt_analysis/yt-3.0/changeset/57cc327cb898/
changeset:   57cc327cb898
branch:      yt
user:        ngoldbaum
date:        2012-08-31 03:51:12
summary:     Fixing a sign error in pixel_scale()
affected #:  1 file

diff -r 9c119e2bbb44180dfe7701c974dcb5f3b66d297e -r 57cc327cb89845570867307066d8b803f68e3dd5 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -80,11 +80,11 @@
     def pixel_scale(self,plot):
         x0, x1 = plot.xlim
         xx0, xx1 = plot._axes.get_xlim()
-        dx = (xx0 - xx1)/(x1 - x0)
+        dx = (xx1 - xx0)/(x1 - x0)
         
         y0, y1 = plot.ylim
         yy0, yy1 = plot._axes.get_ylim()
-        dy = (yy0 - yy1)/(y1 - y0)
+        dy = (yy1 - yy0)/(y1 - y0)
 
         return (dx,dy)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7b0508f05dcc/
changeset:   7b0508f05dcc
branch:      yt
user:        ngoldbaum
date:        2012-08-31 05:23:22
summary:     Making the grids callback conform rely on plot coordinates rather than pixel coordinates.  This fixes the annotate=true option.
affected #:  1 file

diff -r 57cc327cb89845570867307066d8b803f68e3dd5 -r 7b0508f05dccf7996bee669c82d1b802417fd2c1 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -312,39 +312,36 @@
     def __call__(self, plot):
         x0, x1 = plot.xlim
         y0, y1 = plot.ylim
-        width, height = plot.image._A.shape
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         xi = x_dict[plot.data.axis]
         yi = y_dict[plot.data.axis]
-        dx = width / (x1-x0)
-        dy = height / (y1-y0)
+        (dx, dy) = self.pixel_scale(plot)
+        (xpix, ypix) = plot.image._A.shape
         px_index = x_dict[plot.data.axis]
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
-        if self.periodic:
-            pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
-        else:
-            pxs, pys = na.mgrid[0:0:1j,0:0:1j]
+        #if self.periodic:
+        #    pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+        #else:
+        pxs, pys = na.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
-            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx
-            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
-            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
-            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
+            left_edge_x = (GLE[:,px_index]+pxo-x0)*dx + xx0
+            left_edge_y = (GLE[:,py_index]+pyo-y0)*dy + yy0
+            right_edge_x = (GRE[:,px_index]+pxo-x0)*dx + xx0
+            right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
+            visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
+                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
+            if visible.nonzero()[0].size == 0: continue
             verts = na.array(
-                [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
-                 (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
-            visible =  ( right_edge_px - left_edge_px > self.min_pix ) & \
-                       ( right_edge_px - left_edge_px > self.min_pix )
+                [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
+                 (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
-            if verts.size == 0: continue
             edgecolors = (0.0,0.0,0.0,self.alpha)
-            verts[:,:,0]= (xx1-xx0)*(verts[:,:,0]/width) + xx0
-            verts[:,:,1]= (yy1-yy0)*(verts[:,:,1]/height) + yy0
             grid_collection = matplotlib.collections.PolyCollection(
                 verts, facecolors="none",
                 edgecolors=edgecolors)
@@ -352,8 +349,8 @@
             plot._axes.add_collection(grid_collection)
             if self.annotate:
                 ids = [g.id for g in plot.data._grids]
-                for n in range(len(left_edge_px)):
-                    plot._axes.text(left_edge_px[n]+2,left_edge_py[n]+2,ids[n])
+                for n in range(len(left_edge_x)):
+                    plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n])
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9a5c39e1b2fb/
changeset:   9a5c39e1b2fb
branch:      yt
user:        ngoldbaum
date:        2012-08-31 05:25:49
summary:     Renaming the annotate option of the grids callback to draw_ids.  This is to avoid confusion with the name of the callback in the plot window interface.
affected #:  1 file

diff -r 7b0508f05dccf7996bee669c82d1b802417fd2c1 -r 9a5c39e1b2fb4697b1af9eebc3831bc4eddec8e0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -295,18 +295,18 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, annotate=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, draw_ids=False, periodic=True):
         """
-        annotate_grids(alpha=1.0, min_pix=1, annotate=False, periodic=True)
+        annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
-        *annotate* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
         """
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
-        self.annotate = annotate # put grid numbers in the corner.
+        self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
 
     def __call__(self, plot):
@@ -347,7 +347,7 @@
                 edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
-            if self.annotate:
+            if self.draw_ids:
                 ids = [g.id for g in plot.data._grids]
                 for n in range(len(left_edge_x)):
                     plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a038fe48d289/
changeset:   a038fe48d289
branch:      yt
user:        ngoldbaum
date:        2012-08-31 05:29:30
summary:     Turning on the logic for periodic boundary conditions in the grid callback.
affected #:  1 file

diff -r 9a5c39e1b2fb4697b1af9eebc3831bc4eddec8e0 -r a038fe48d28983883e24dad24ecd16a7b63458bc yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -321,10 +321,10 @@
         px_index = x_dict[plot.data.axis]
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
-        #if self.periodic:
-        #    pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
-        #else:
-        pxs, pys = na.mgrid[0:0:1j,0:0:1j]
+        if self.periodic:
+            pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+        else:
+            pxs, pys = na.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/59cea3bf549c/
changeset:   59cea3bf549c
branch:      yt
user:        ngoldbaum
date:        2012-08-31 08:37:59
summary:     Making it so that only visible grids have id annotations drawn.
affected #:  1 file

diff -r a038fe48d28983883e24dad24ecd16a7b63458bc -r 59cea3bf549cc864b3a28148fbad3e4287fa0355 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -349,7 +349,7 @@
             plot._axes.add_collection(grid_collection)
             if self.draw_ids:
                 ids = [g.id for g in plot.data._grids]
-                for n in range(len(left_edge_x)):
+                for n in visible.nonzero()[0]:
                     plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n])
             plot._axes.hold(False)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b2e45258e0fb/
changeset:   b2e45258e0fb
branch:      yt
user:        ngoldbaum
date:        2012-08-31 08:40:51
summary:     Fixing a bug in the set_width method of plot window that will cause it to crash if supplied with a (width,unit) tuple
affected #:  1 file

diff -r 59cea3bf549cc864b3a28148fbad3e4287fa0355 -r b2e45258e0fb56b439e53f0fc145077f0f09356d yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -364,7 +364,7 @@
         """
         if iterable(width) and isinstance(width[1],str):
             unit = width[1]
-            width = width[0]
+            width = (width[0],width[0])
         elif not iterable(width):
             width = (width,width)
         Wx, Wy = width



https://bitbucket.org/yt_analysis/yt-3.0/changeset/00fe4774d2c6/
changeset:   00fe4774d2c6
branch:      yt
user:        ngoldbaum
date:        2012-08-31 08:47:07
summary:     Updating set_width to accept tuples of (width, unit) tuples
affected #:  1 file

diff -r b2e45258e0fb56b439e53f0fc145077f0f09356d -r 00fe4774d2c6ec73747384f4a94c620f0b51e418 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -354,25 +354,48 @@
 
         parameters
         ----------
-        width : float, array of floats, or (float, unit) tuple.
-            the width of the image.
+        width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
+             Width can have four different formats to support windows with variable 
+             x and y widths.  They are:
+             
+             ==================================     =======================
+             format                                 example                
+             ==================================     =======================
+             (float, string)                        (10,'kpc')
+             ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+             float                                  0.2
+             (float, float)                         (0.2, 0.3)
+             ==================================     =======================
+             
+             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
+             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
+             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+             the y axis.  In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
+             in code units.  the width of the image.
         unit : str
             the unit the width has been specified in.
             defaults to code units.  If width is a tuple this 
             argument is ignored
 
         """
-        if iterable(width) and isinstance(width[1],str):
-            unit = width[1]
-            width = (width[0],width[0])
-        elif not iterable(width):
-            width = (width,width)
+        if iterable(width): 
+            if isinstance(width[1],str):
+                w, unit = width
+                width = (w, w)
+            elif isinstance(width[1], tuple):
+                wx,unitx = width[0]
+                wy,unity = width[1]
+                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        else:
+            width = (width, width)
         Wx, Wy = width
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
         centerx = (self.xlim[1] + self.xlim[0])/2 
         centery = (self.ylim[1] + self.ylim[0])/2 
+        
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/bdde37db29a8/
changeset:   bdde37db29a8
branch:      yt
user:        ngoldbaum
date:        2012-08-31 08:50:51
summary:     Updating the draw_ids keyword to respect the clip box for the plot.  If this isn't set then ids wil be draw for grids that have loer left hand corners outside the plot window.
affected #:  1 file

diff -r 00fe4774d2c6ec73747384f4a94c620f0b51e418 -r bdde37db29a88f8ba3720afda6fca0de49996951 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -350,7 +350,7 @@
             if self.draw_ids:
                 ids = [g.id for g in plot.data._grids]
                 for n in visible.nonzero()[0]:
-                    plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n])
+                    plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n],clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/327b4358244a/
changeset:   327b4358244a
branch:      yt
user:        MatthewTurk
date:        2012-08-31 13:22:00
summary:     Merged in ngoldbaum/yt-cleancopy (pull request #255)
affected #:  2 files

diff -r d55d789c548f33b563121fe511cd689fc36f8e9a -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -80,11 +80,11 @@
     def pixel_scale(self,plot):
         x0, x1 = plot.xlim
         xx0, xx1 = plot._axes.get_xlim()
-        dx = (xx0 - xx1)/(x1 - x0)
+        dx = (xx1 - xx0)/(x1 - x0)
         
         y0, y1 = plot.ylim
         yy0, yy1 = plot._axes.get_ylim()
-        dy = (yy0 - yy1)/(y1 - y0)
+        dy = (yy1 - yy0)/(y1 - y0)
 
         return (dx,dy)
 
@@ -295,30 +295,29 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, annotate=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, draw_ids=False, periodic=True):
         """
-        annotate_grids(alpha=1.0, min_pix=1, annotate=False, periodic=True)
+        annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
-        *annotate* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
         """
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
-        self.annotate = annotate # put grid numbers in the corner.
+        self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
         y0, y1 = plot.ylim
-        width, height = plot.image._A.shape
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         xi = x_dict[plot.data.axis]
         yi = y_dict[plot.data.axis]
-        dx = width / (x1-x0)
-        dy = height / (y1-y0)
+        (dx, dy) = self.pixel_scale(plot)
+        (xpix, ypix) = plot.image._A.shape
         px_index = x_dict[plot.data.axis]
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
@@ -331,29 +330,27 @@
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
-            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx
-            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
-            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
-            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
+            left_edge_x = (GLE[:,px_index]+pxo-x0)*dx + xx0
+            left_edge_y = (GLE[:,py_index]+pyo-y0)*dy + yy0
+            right_edge_x = (GRE[:,px_index]+pxo-x0)*dx + xx0
+            right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
+            visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
+                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
+            if visible.nonzero()[0].size == 0: continue
             verts = na.array(
-                [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
-                 (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
-            visible =  ( right_edge_px - left_edge_px > self.min_pix ) & \
-                       ( right_edge_px - left_edge_px > self.min_pix )
+                [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
+                 (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
-            if verts.size == 0: continue
             edgecolors = (0.0,0.0,0.0,self.alpha)
-            verts[:,:,0]= (xx1-xx0)*(verts[:,:,0]/width) + xx0
-            verts[:,:,1]= (yy1-yy0)*(verts[:,:,1]/height) + yy0
             grid_collection = matplotlib.collections.PolyCollection(
                 verts, facecolors="none",
                 edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
-            if self.annotate:
+            if self.draw_ids:
                 ids = [g.id for g in plot.data._grids]
-                for n in range(len(left_edge_px)):
-                    plot._axes.text(left_edge_px[n]+2,left_edge_py[n]+2,ids[n])
+                for n in visible.nonzero()[0]:
+                    plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n],clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):


diff -r d55d789c548f33b563121fe511cd689fc36f8e9a -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -354,25 +354,48 @@
 
         parameters
         ----------
-        width : float, array of floats, or (float, unit) tuple.
-            the width of the image.
+        width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
+             Width can have four different formats to support windows with variable 
+             x and y widths.  They are:
+             
+             ==================================     =======================
+             format                                 example                
+             ==================================     =======================
+             (float, string)                        (10,'kpc')
+             ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+             float                                  0.2
+             (float, float)                         (0.2, 0.3)
+             ==================================     =======================
+             
+             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
+             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
+             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+             the y axis.  In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
+             in code units.  the width of the image.
         unit : str
             the unit the width has been specified in.
             defaults to code units.  If width is a tuple this 
             argument is ignored
 
         """
-        if iterable(width) and isinstance(width[1],str):
-            unit = width[1]
-            width = width[0]
-        elif not iterable(width):
-            width = (width,width)
+        if iterable(width): 
+            if isinstance(width[1],str):
+                w, unit = width
+                width = (w, w)
+            elif isinstance(width[1], tuple):
+                wx,unitx = width[0]
+                wy,unity = width[1]
+                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        else:
+            width = (width, width)
         Wx, Wy = width
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
         centerx = (self.xlim[1] + self.xlim[0])/2 
         centery = (self.ylim[1] + self.ylim[0])/2 
+        
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8e9b059aacc1/
changeset:   8e9b059aacc1
branch:      yt
user:        scopatz
date:        2012-08-31 18:02:02
summary:     na is dead, long live np
affected #:  112 files

diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def voigt(a,u):
     """
@@ -65,15 +65,15 @@
             J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
                       Sep 1990 (better overflow checking)
     """
-    x = na.asarray(u).astype(na.float64)
-    y = na.asarray(a).astype(na.float64)
+    x = np.asarray(u).astype(np.float64)
+    y = np.asarray(a).astype(np.float64)
 
-    w = na.array([0.462243670,   0.286675505,   0.109017206, 
+    w = np.array([0.462243670,   0.286675505,   0.109017206, 
                   0.0248105209,  0.00324377334, 0.000228338636, 
                   7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
                   2.22939365e-13])
 
-    t = na.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
+    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
                   2.25497400,  2.78880606,  3.34785457, 3.94476404, 
                   4.60368245,  5.38748089])
 
@@ -94,31 +94,31 @@
     y2 = y * y
 
     # limits are y<1.,  x<4 or y<1.8(x+1),  x>4 (no checking performed)
-    u1 = na.exp(-x * x + y2) * na.cos(2. * x * y)
+    u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
 
     # Clenshaw's Algorithm
-    bno1 = na.zeros(x.shape)
-    bno2 = na.zeros(x.shape)
-    x1 = na.clip((x / 5.), -na.inf, 1.)
+    bno1 = np.zeros(x.shape)
+    bno2 = np.zeros(x.shape)
+    x1 = np.clip((x / 5.), -np.inf, 1.)
     coef = 4. * x1 * x1 - 2.
     for i in range(33, -1, -1):
         bn = coef * bno1 - bno2 + c[i]
-        bno2 = na.copy(bno1)
-        bno1 = na.copy(bn)
+        bno2 = np.copy(bno1)
+        bno1 = np.copy(bn)
 
     f = x1 * (bn - bno2)
     dno1 = 1. - 2. * x * f
     dno2 = f
 
-    q = na.abs(x) > 5
+    q = np.abs(x) > 5
     if q.any():
-        x14 = na.power(na.clip(x[q], -na.inf, 500.),  14)
-        x12 = na.power(na.clip(x[q], -na.inf, 1000.), 12)
-        x10 = na.power(na.clip(x[q], -na.inf, 5000.), 10)
-        x8  = na.power(na.clip(x[q], -na.inf, 50000.), 8)
-        x6  = na.power(na.clip(x[q], -na.inf, 1.e6),   6)
-        x4  = na.power(na.clip(x[q], -na.inf, 1.e9),   4)
-        x2  = na.power(na.clip(x[q], -na.inf, 1.e18),  2)
+        x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
+        x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
+        x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
+        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
         dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
@@ -135,12 +135,12 @@
             if (i % 2) == 1:
                 q = -q
                 yn = yn * y2
-                g = dn.astype(na.float64) * yn
+                g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if na.max(na.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8: break
 
     k1 = u1 - 1.12837917 * funct
-    k1 = k1.astype(na.float64).clip(0)
+    k1 = k1.astype(np.float64).clip(0)
     return k1
 
 def tau_profile(lam0, fval, gamma, vkms, column_density, 
@@ -191,19 +191,19 @@
     ## create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
-            na.arange(n_lambda, dtype=na.float) * dlambda - \
+            np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2    # wavelength vector (angstroms)
     nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
 
     ## tau_0
-    tau_X = na.sqrt(na.pi) * e**2 / (me * ccgs) * \
+    tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
     tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq
     x = (nua - nu1) / nudop
-    a = gamma / (4 * na.pi * nudop)   # damping parameter 
+    a = gamma / (4 * np.pi * nudop)   # damping parameter 
     phi = voigt(a, x)                 # profile
     tauphi = tau0 * phi               # profile scaled with tau0
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from absorption_line import tau_profile
 
@@ -48,7 +48,7 @@
         self.tau_field = None
         self.flux_field = None
         self.spectrum_line_list = None
-        self.lambda_bins = na.linspace(lambda_min, lambda_max, n_lambda)
+        self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
         self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
         self.line_list = []
         self.continuum_list = []
@@ -114,13 +114,13 @@
             field_data[field] = input[field].value
         input.close()
 
-        self.tau_field = na.zeros(self.lambda_bins.size)
+        self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
-        self.flux_field = na.exp(-self.tau_field)
+        self.flux_field = np.exp(-self.tau_field)
 
         if output_file.endswith('.h5'):
             self._write_spectrum_hdf5(output_file)
@@ -148,20 +148,20 @@
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = na.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
-            left_index = na.digitize((this_wavelength *
-                                     na.power((tau_min * continuum['normalization'] /
+            right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
+            left_index = np.digitize((this_wavelength *
+                                     np.power((tau_min * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
-            valid_continuua = na.where(((column_density /
+            valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > tau_min) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
-                line_tau = na.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
+                line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
                                      this_wavelength[lixel]), continuum['index']) * \
                                      column_density[lixel] / continuum['normalization']
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
@@ -184,10 +184,10 @@
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
-            thermal_b = km_per_cm * na.sqrt((2 * boltzmann_constant_cgs *
+            thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['Temperature']) /
                                             (amu_cgs * line['atomic_mass']))
-            center_bins = na.digitize((delta_lambda + line['wavelength']),
+            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                       self.lambda_bins)
 
             # ratio of line width to bin width
@@ -201,7 +201,7 @@
                            spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
 
             # loop over all lines wider than the bin width
-            valid_lines = na.where((width_ratio >= 1.0) &
+            valid_lines = np.where((width_ratio >= 1.0) &
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 
 from yt.utilities.linear_interpolators import \
@@ -44,13 +44,13 @@
     mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
     if center is None: center = pf.h.find_max("Density")[1]
     fields = ensure_list(fields)
-    r,theta,phi = na.mgrid[0:rmax:nr*1j,
-                           0:na.pi:ntheta*1j,
-                           0:2*na.pi:nphi*1j]
+    r,theta,phi = np.mgrid[0:rmax:nr*1j,
+                           0:np.pi:ntheta*1j,
+                           0:2*np.pi:nphi*1j]
     new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*na.sin(theta)*na.cos(phi) + center[0]
-    new_grid['y'] = r*na.sin(theta)*na.sin(phi) + center[1]
-    new_grid['z'] = r*na.cos(theta)             + center[2]
+    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
+    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
+    new_grid['z'] = r*np.cos(theta)             + center[2]
     sphere = pf.h.sphere(center, rmax)
     return arbitrary_regrid(new_grid, sphere, fields, smoothed)
 
@@ -62,10 +62,10 @@
     This has not been well-tested other than for regular spherical regridding.
     """
     fields = ensure_list(fields)
-    new_grid['handled'] = na.zeros(new_grid['x'].shape, dtype='bool')
+    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
-        new_grid[field] = na.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = na.argsort(data_source.gridLevels)
+        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
+    grid_order = np.argsort(data_source.gridLevels)
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):
@@ -73,12 +73,12 @@
         cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
 
         # makes x0,x1,y0,y1,z0,z1
-        bounds = na.concatenate(zip(cg.left_edge, cg.right_edge)) 
+        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
 
         
         # Now we figure out which of our points are inside this grid
         # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = na.ones(new_grid['handled'].shape, dtype='bool') # everything at first
+        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
         for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
             # &= does a logical_and on the array
             point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
@@ -116,7 +116,7 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  na.log10(sph_grid[field][:,i,:]))
+                  np.log10(sph_grid[field][:,i,:]))
     pylab.savefig("polar/latitude_%03i.png" % i)
 
 for i in range(n_phi):
@@ -124,6 +124,6 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  na.log10(sph_grid[field][:,:,i]))
+                  np.log10(sph_grid[field][:,:,i]))
     pylab.savefig("polar/longitude_%03i.png" % i)
 """


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.convenience import \
     simulation
@@ -132,12 +132,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (na.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.fabs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:na.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -146,7 +146,7 @@
                     current_slice = cosmology_splice[-1]
                     while current_slice['next'] is not None and \
                             (z < current_slice['next']['redshift'] or \
-                                 na.abs(z - current_slice['next']['redshift']) <
+                                 np.abs(z - current_slice['next']['redshift']) <
                                  z_Tolerance):
                         current_slice = current_slice['next']
 
@@ -164,7 +164,7 @@
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:na.fabs(far_redshift -
+            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
                                                                     obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
@@ -246,9 +246,9 @@
         outputs = []
 
         while z > near_redshift:
-            rounded = na.round(z, decimals=decimals)
+            rounded = np.round(z, decimals=decimals)
             if rounded - z < 0:
-                rounded += na.power(10.0, (-1.0*decimals))
+                rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
@@ -289,7 +289,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -299,9 +299,9 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
-            output['deltazMax'] = na.fabs(z2 - z)
+            output['deltazMax'] = np.fabs(z2 - z)
 
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
@@ -329,7 +329,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -339,10 +339,10 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(na.fabs(z2 - z), deltaz_min)
+            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -364,7 +364,7 @@
         distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
           self.cosmology.HubbleConstantNow / 100.0
 
-        while ((na.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
@@ -374,6 +374,6 @@
             iteration += 1
             if (iteration > max_Iterations):
                 mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, na.fabs(z2 - z)))
+                            (z, np.fabs(z2 - z)))
                 break
-        return na.fabs(z2 - z)
+        return np.fabs(z2 - z)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
@@ -24,25 +24,25 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def common_volume(n_cube_1, n_cube_2, periodic=None):
     "Return the n-volume in common between the two n-cubes."
 
     # Check for proper args.
-    if ((len(na.shape(n_cube_1)) != 2) or
-        (na.shape(n_cube_1)[1] != 2) or
-        (na.shape(n_cube_1) != na.shape(n_cube_2))):
+    if ((len(np.shape(n_cube_1)) != 2) or
+        (np.shape(n_cube_1)[1] != 2) or
+        (np.shape(n_cube_1) != np.shape(n_cube_2))):
         print "Arguments must be 2 (n, 2) numpy array."
         return 0
 
     if ((periodic is not None) and
-        (na.shape(n_cube_1) != na.shape(periodic))):
+        (np.shape(n_cube_1) != np.shape(periodic))):
         print "periodic argument must be (n, 2) numpy array."
         return 0
 
     nCommon = 1.0
-    for q in range(na.shape(n_cube_1)[0]):
+    for q in range(np.shape(n_cube_1)[0]):
         if (periodic is None):
             nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
         else:
@@ -97,10 +97,10 @@
             return min(flen1, flen2)
 
         # Adjust for periodicity
-        seg1[0] = na.mod(seg1[0], scale) + periodic[0]
+        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
         seg1[1] = seg1[0] + len1
         if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = na.mod(seg2[0], scale) + periodic[0]
+        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
         seg2[1] = seg2[0] + len2
         if (seg2[1] > periodic[1]): seg2[1] -= scale
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.analysis_modules.halo_profiler.api import \
@@ -77,7 +77,7 @@
 
     # Write out cube of masks from each slice.
     if cube_file is not None:
-        _write_halo_mask(cube_file, na.array(light_cone_mask))
+        _write_halo_mask(cube_file, np.array(light_cone_mask))
 
     # Write out a text list of all halos in the image.
     if map_file is not None:
@@ -86,7 +86,7 @@
     # Write out final mask.
     if mask_file is not None:
         # Final mask is simply the product of the mask from each slice.
-        final_mask = na.ones(shape=(pixels, pixels))
+        final_mask = np.ones(shape=(pixels, pixels))
         for mask in light_cone_mask:
             final_mask *= mask
         _write_halo_mask(mask_file, final_mask)
@@ -103,7 +103,7 @@
     output = h5py.File(filename, 'a')
     if 'HaloMask' in output.keys():
         del output['HaloMask']
-    output.create_dataset('HaloMask', data=na.array(halo_mask))
+    output.create_dataset('HaloMask', data=np.array(halo_mask))
     output.close()
 
 @parallel_root_only
@@ -155,21 +155,21 @@
     # Make boolean mask and cut out halos.
     dx = slice['box_width_fraction'] / pixels
     x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = na.ones(shape=(pixels, pixels), dtype=bool)
+    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
 
     # Cut out any pixel that has any part at all in the circle.
     for q in range(len(all_halo_radius)):
-        dif_xIndex = na.array(int(all_halo_x[q]/dx) -
-                              na.array(range(pixels))) != 0
-        dif_yIndex = na.array(int(all_halo_y[q]/dx) -
-                              na.array(range(pixels))) != 0
+        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
+                              np.array(range(pixels))) != 0
+        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
+                              np.array(range(pixels))) != 0
 
-        xDistance = (na.abs(x - all_halo_x[q]) -
+        xDistance = (np.abs(x - all_halo_x[q]) -
                      (0.5 * dx)) * dif_xIndex
-        yDistance = (na.abs(x - all_halo_y[q]) -
+        yDistance = (np.abs(x - all_halo_y[q]) -
                      (0.5 * dx)) * dif_yIndex
 
-        distance = na.array([na.sqrt(w**2 + xDistance**2)
+        distance = np.array([np.sqrt(w**2 + xDistance**2)
                              for w in yDistance])
         haloMask *= (distance >= all_halo_radius[q])
 
@@ -231,11 +231,11 @@
                                Mpc_units)
             halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
 
-    halo_x = na.array(halo_x)
-    halo_y = na.array(halo_y)
-    halo_depth = na.array(halo_depth)
-    halo_radius = na.array(halo_radius)
-    halo_mass = na.array(halo_mass)
+    halo_x = np.array(halo_x)
+    halo_y = np.array(halo_y)
+    halo_depth = np.array(halo_depth)
+    halo_radius = np.array(halo_radius)
+    halo_mass = np.array(halo_mass)
 
     # Adjust halo centers along line of sight.
     depth_center = slice['projection_center'][slice['projection_axis']]
@@ -247,15 +247,15 @@
     add_left = (halo_depth + halo_radius) > 1 # should be box width
     add_right = (halo_depth - halo_radius) < 0
 
-    halo_depth = na.concatenate([halo_depth,
+    halo_depth = np.concatenate([halo_depth,
                                  (halo_depth[add_left]-1),
                                  (halo_depth[add_right]+1)])
-    halo_x = na.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = na.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = na.concatenate([halo_radius,
+    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
+    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
+    halo_radius = np.concatenate([halo_radius,
                                   halo_radius[add_left],
                                   halo_radius[add_right]])
-    halo_mass = na.concatenate([halo_mass,
+    halo_mass = np.concatenate([halo_mass,
                                 halo_mass[add_left],
                                 halo_mass[add_right]])
 
@@ -284,19 +284,19 @@
         del mask
     del halo_depth
 
-    all_halo_x = na.array([])
-    all_halo_y = na.array([])
-    all_halo_radius = na.array([])
-    all_halo_mass = na.array([])
+    all_halo_x = np.array([])
+    all_halo_y = np.array([])
+    all_halo_radius = np.array([])
+    all_halo_mass = np.array([])
 
     # Tile halos of width box fraction is greater than one.
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(slice['box_width_fraction']))):
-        for y in range(int(na.ceil(slice['box_width_fraction']))):
-            all_halo_x = na.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = na.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = na.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = na.concatenate([all_halo_mass, halo_mass])
+    for x in range(int(np.ceil(slice['box_width_fraction']))):
+        for y in range(int(np.ceil(slice['box_width_fraction']))):
+            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
+            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
+            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
+            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
 
     del halo_x, halo_y, halo_radius, halo_mass
 
@@ -310,8 +310,8 @@
 
     # Wrap off-edge centers back around to
     # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += na.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += na.ceil(slice['box_width_fraction'])
+    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
+    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
 
     # After shifting, some centers have fractional coverage
     # on both sides of the box.
@@ -319,9 +319,9 @@
 
     # Centers hanging off the right edge.
     add_x_right = all_halo_x + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= na.ceil(slice['box_width_fraction'])
+    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
     add_x_halo_y = all_halo_y[add_x_right]
     add_x_halo_radius = all_halo_radius[add_x_right]
     add_x_halo_mass = all_halo_mass[add_x_right]
@@ -330,7 +330,7 @@
     # Centers hanging off the left edge.
     add_x_left = all_halo_x - all_halo_radius < 0
     add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += na.ceil(slice['box_width_fraction'])
+    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
     add2_x_halo_y = all_halo_y[add_x_left]
     add2_x_halo_radius = all_halo_radius[add_x_left]
     add2_x_halo_mass = all_halo_mass[add_x_left]
@@ -338,10 +338,10 @@
 
     # Centers hanging off the top edge.
     add_y_right = all_halo_y + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_y_halo_x = all_halo_x[add_y_right]
     add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= na.ceil(slice['box_width_fraction'])
+    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
     add_y_halo_radius = all_halo_radius[add_y_right]
     add_y_halo_mass = all_halo_mass[add_y_right]
     del add_y_right
@@ -350,24 +350,24 @@
     add_y_left = all_halo_y - all_halo_radius < 0
     add2_y_halo_x = all_halo_x[add_y_left]
     add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += na.ceil(slice['box_width_fraction'])
+    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
     add2_y_halo_radius = all_halo_radius[add_y_left]
     add2_y_halo_mass = all_halo_mass[add_y_left]
     del add_y_left
 
     # Add the hanging centers back to the projection data.
-    all_halo_x = na.concatenate([all_halo_x,
+    all_halo_x = np.concatenate([all_halo_x,
                                  add_x_halo_x, add2_x_halo_x,
                                  add_y_halo_x, add2_y_halo_x])
-    all_halo_y = na.concatenate([all_halo_y,
+    all_halo_y = np.concatenate([all_halo_y,
                                  add_x_halo_y, add2_x_halo_y,
                                  add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = na.concatenate([all_halo_radius,
+    all_halo_radius = np.concatenate([all_halo_radius,
                                       add_x_halo_radius,
                                       add2_x_halo_radius,
                                       add_y_halo_radius,
                                       add2_y_halo_radius])
-    all_halo_mass = na.concatenate([all_halo_mass,
+    all_halo_mass = np.concatenate([all_halo_mass,
                                     add_x_halo_mass,
                                     add2_x_halo_mass,
                                     add_y_halo_mass,


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -198,7 +198,7 @@
 
         # Calculate projection sizes, and get
         # random projection axes and centers.
-        na.random.seed(self.original_random_seed)
+        np.random.seed(self.original_random_seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -250,9 +250,9 @@
                self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
                 # Random axis and center.
                 self.light_cone_solution[q]['projection_axis'] = \
-                  na.random.randint(0, 3)
+                  np.random.randint(0, 3)
                 self.light_cone_solution[q]['projection_center'] = \
-                  [na.random.random() for i in range(3)]
+                  [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
@@ -342,7 +342,7 @@
                                                    njobs=njobs,
                                                    dynamic=dynamic)
             # Collapse cube into final mask.
-            self.halo_mask = na.ones(shape=(self.pixels, self.pixels),
+            self.halo_mask = np.ones(shape=(self.pixels, self.pixels),
                                      dtype=bool)
             for mask in halo_mask_cube:
                 self.halo_mask *= mask
@@ -428,7 +428,7 @@
                 boxSizeProper = self.simulation.box_size / \
                   (self.simulation.hubble_constant * (1.0 + output['redshift']))
                 pixelarea = (boxSizeProper/self.pixels)**2 #in proper cm^2
-                factor = pixelarea/(4.0*na.pi*dL**2)
+                factor = pixelarea/(4.0*np.pi*dL**2)
                 mylog.info("Distance to slice = %e" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
@@ -461,7 +461,7 @@
                 else:
                     my_image = all_storage[my_slice]['field'] / \
                       all_storage[my_slice]['weight_field']
-                only_on_root(write_image, na.log10(my_image),
+                only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
             self.projection_stack.append(all_storage[my_slice]['field'])
@@ -491,7 +491,7 @@
 
         # Write image.
         if save_final_image:
-            only_on_root(write_image, na.log10(light_cone_projection),
+            only_on_root(write_image, np.log10(light_cone_projection),
                          "%s_%s.png" % (filename, field), cmap_name=cmap_name)
 
         # Write stack to hdf5 file.
@@ -561,7 +561,7 @@
         box_fraction_used = 0.0
 
         # Seed random number generator with new seed.
-        na.random.seed(int(new_seed))
+        np.random.seed(int(new_seed))
 
         for q, output in enumerate(self.light_cone_solution):
             # It is necessary to make the same number of calls to the random
@@ -578,9 +578,9 @@
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
-                newAxis = na.random.randint(0, 3)
+                newAxis = np.random.randint(0, 3)
 
-                newCenter = [na.random.random() for i in range(3)]
+                newCenter = [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice, but with depth center shifted.
@@ -600,8 +600,8 @@
             box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
 
             # Make list of rectangle corners to calculate common volume.
-            newCube = na.zeros(shape=(len(newCenter), 2))
-            oldCube = na.zeros(shape=(len(newCenter), 2))
+            newCube = np.zeros(shape=(len(newCenter), 2))
+            oldCube = np.zeros(shape=(len(newCenter), 2))
             for w in range(len(newCenter)):
                 if (w == self.master_solution[q]['projection_axis']):
                     oldCube[w] = [self.master_solution[q]['projection_center'][w] -
@@ -630,7 +630,7 @@
                                   0.5 * self.master_solution[q]['box_width_fraction']]
 
             my_volume += common_volume(oldCube, newCube,
-                                           periodic=na.array([[0, 1],
+                                           periodic=np.array([[0, 1],
                                                               [0, 1],
                                                               [0, 1]]))
             total_volume += output['box_depth_fraction'] * \
@@ -691,7 +691,7 @@
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
         # Make list of redshifts to include as a dataset attribute.
-        redshiftList = na.array([my_slice['redshift'] \
+        redshiftList = np.array([my_slice['redshift'] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
@@ -727,16 +727,16 @@
 
         if write_data:
             mylog.info("Saving %s to %s." % (field_node, filename))
-            self.projection_stack = na.array(self.projection_stack)
+            self.projection_stack = np.array(self.projection_stack)
             field_dataset = output.create_dataset(field_node,
                                                   data=self.projection_stack)
             field_dataset.attrs['redshifts'] = redshiftList
             field_dataset.attrs['observer_redshift'] = \
-              na.float(self.observer_redshift)
+              np.float(self.observer_redshift)
             field_dataset.attrs['field_of_view_in_arcminutes'] = \
-              na.float(self.field_of_view_in_arcminutes)
+              np.float(self.field_of_view_in_arcminutes)
             field_dataset.attrs['image_resolution_in_arcseconds'] = \
-              na.float(self.image_resolution_in_arcseconds)
+              np.float(self.image_resolution_in_arcseconds)
 
         if (len(self.projection_weight_field_stack) > 0):
             if node_exists:
@@ -754,16 +754,16 @@
             if write_data:
                 mylog.info("Saving %s to %s." % (weight_field_node, filename))
                 self.projection_weight_field_stack = \
-                  na.array(self.projection_weight_field_stack)
+                  np.array(self.projection_weight_field_stack)
                 weight_field_dataset = \
                   output.create_dataset(weight_field_node,
                                         data=self.projection_weight_field_stack)
                 weight_field_dataset.attrs['redshifts'] = redshiftList
                 weight_field_dataset.attrs['observer_redshift'] = \
-                  na.float(self.observer_redshift)
+                  np.float(self.observer_redshift)
                 weight_field_dataset.attrs['field_of_view_in_arcminutes'] = \
-                  na.float(self.field_of_view_in_arcminutes)
+                  np.float(self.field_of_view_in_arcminutes)
                 weight_field_dataset.attrs['image_resolution_in_arcseconds'] = \
-                  na.float(self.image_resolution_in_arcseconds)
+                  np.float(self.image_resolution_in_arcseconds)
 
         output.close()


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -98,15 +98,15 @@
     original_weight_field = copy.deepcopy(proj['weight_field'])
 
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
-        for y in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
+    for x in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
+        for y in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
             if ((x + y) > 0):
-                proj['px'] = na.concatenate([proj['px'], original_px+x])
-                proj['py'] = na.concatenate([proj['py'], original_py+y])
-                proj['pdx'] = na.concatenate([proj['pdx'], original_pdx])
-                proj['pdy'] = na.concatenate([proj['pdy'], original_pdy])
-                proj[field] = na.concatenate([proj[field], original_field])
-                proj['weight_field'] = na.concatenate([proj['weight_field'],
+                proj['px'] = np.concatenate([proj['px'], original_px+x])
+                proj['py'] = np.concatenate([proj['py'], original_py+y])
+                proj['pdx'] = np.concatenate([proj['pdx'], original_pdx])
+                proj['pdy'] = np.concatenate([proj['pdy'], original_pdy])
+                proj[field] = np.concatenate([proj[field], original_field])
+                proj['weight_field'] = np.concatenate([proj['weight_field'],
                                                        original_weight_field])
 
     # Delete originals.
@@ -129,17 +129,17 @@
     proj['py'] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj['px'][proj['px'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
-    proj['py'][proj['py'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
+    proj['px'][proj['px'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
+    proj['py'][proj['py'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
     add_x_right = proj['px'] + 0.5 * proj['pdx'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_x_px = proj['px'][add_x_right]
-    add_x_px -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_x_px -= np.ceil(lightConeSlice['box_width_fraction'])
     add_x_py = proj['py'][add_x_right]
     add_x_pdx = proj['pdx'][add_x_right]
     add_x_pdy = proj['pdy'][add_x_right]
@@ -150,7 +150,7 @@
     # Cells hanging off the left edge.
     add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
     add2_x_px = proj['px'][add_x_left]
-    add2_x_px += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_x_px += np.ceil(lightConeSlice['box_width_fraction'])
     add2_x_py = proj['py'][add_x_left]
     add2_x_pdx = proj['pdx'][add_x_left]
     add2_x_pdy = proj['pdy'][add_x_left]
@@ -160,10 +160,10 @@
 
     # Cells hanging off the top edge.
     add_y_right = proj['py'] + 0.5 * proj['pdy'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_y_px = proj['px'][add_y_right]
     add_y_py = proj['py'][add_y_right]
-    add_y_py -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_y_py -= np.ceil(lightConeSlice['box_width_fraction'])
     add_y_pdx = proj['pdx'][add_y_right]
     add_y_pdy = proj['pdy'][add_y_right]
     add_y_field = proj[field][add_y_right]
@@ -174,7 +174,7 @@
     add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
     add2_y_px = proj['px'][add_y_left]
     add2_y_py = proj['py'][add_y_left]
-    add2_y_py += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_y_py += np.ceil(lightConeSlice['box_width_fraction'])
     add2_y_pdx = proj['pdx'][add_y_left]
     add2_y_pdy = proj['pdy'][add_y_left]
     add2_y_field = proj[field][add_y_left]
@@ -182,17 +182,17 @@
     del add_y_left
 
     # Add the hanging cells back to the projection data.
-    proj['px'] = na.concatenate([proj['px'], add_x_px, add_y_px,
+    proj['px'] = np.concatenate([proj['px'], add_x_px, add_y_px,
                                  add2_x_px, add2_y_px])
-    proj['py'] = na.concatenate([proj['py'], add_x_py, add_y_py,
+    proj['py'] = np.concatenate([proj['py'], add_x_py, add_y_py,
                                  add2_x_py, add2_y_py])
-    proj['pdx'] = na.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
+    proj['pdx'] = np.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
                                   add2_x_pdx, add2_y_pdx])
-    proj['pdy'] = na.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
+    proj['pdy'] = np.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
                                   add2_x_pdy, add2_y_pdy])
-    proj[field] = na.concatenate([proj[field], add_x_field, add_y_field,
+    proj[field] = np.concatenate([proj[field], add_x_field, add_y_field,
                                   add2_x_field, add2_y_field])
-    proj['weight_field'] = na.concatenate([proj['weight_field'],
+    proj['weight_field'] = np.concatenate([proj['weight_field'],
                                            add_x_weight_field, add_y_weight_field,
                                            add2_x_weight_field, add2_y_weight_field])
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
@@ -24,7 +24,7 @@
 """
 
 import copy
-import numpy as na
+import numpy as np
 import random as rand
 import sys
 
@@ -128,7 +128,7 @@
         rand.seed(seed)
         state = rand.getstate()
 
-    fail_digits = str(int(na.log10(failures))+1)
+    fail_digits = str(int(np.log10(failures))+1)
 
     while (len(unique_seeds) < solutions):
         # Create new random seed.
@@ -221,7 +221,7 @@
         mylog.error("Light cone solutions do not have equal volumes, will use the smaller one.")
 
     for q in range(len(solution1)):
-        cube1 = na.zeros(shape=(len(solution1[q]['projection_center']), 2))
+        cube1 = np.zeros(shape=(len(solution1[q]['projection_center']), 2))
         volume1 = 1.0
         for w in range(len(cube1)):
             if (w == solution1[q]['projection_axis']):
@@ -232,7 +232,7 @@
             cube1[w] = [solution1[q]['projection_center'][w] - 0.5 * width,
                         solution1[q]['projection_center'][w] + 0.5 * width]
 
-        cube2 = na.zeros(shape=(len(solution2[q]['projection_center']), 2))
+        cube2 = np.zeros(shape=(len(solution2[q]['projection_center']), 2))
         volume2 = 1.0
         for w in range(len(cube2)):
             if (w == solution2[q]['projection_axis']):
@@ -245,7 +245,7 @@
 
         total_volume += min(volume1, volume2)
         my_volume += common_volume(cube1, cube2,
-                                   periodic=na.array([[0, 1],
+                                   periodic=np.array([[0, 1],
                                                       [0, 1],
                                                       [0, 1]]))
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -124,7 +124,7 @@
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        na.random.seed(seed)
+        np.random.seed(seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -162,9 +162,9 @@
                     (box_fraction_used +
                      self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                 # Random start point
-                self.light_ray_solution[q]['start'] = na.random.random(3)
-                theta = na.pi * na.random.random()
-                phi = 2 * na.pi * na.random.random()
+                self.light_ray_solution[q]['start'] = np.random.random(3)
+                theta = np.pi * np.random.random()
+                phi = 2 * np.pi * np.random.random()
                 box_fraction_used = 0.0
             else:
                 # Use end point of previous segment and same theta and phi.
@@ -174,9 +174,9 @@
             self.light_ray_solution[q]['end'] = \
               self.light_ray_solution[q]['start'] + \
                 self.light_ray_solution[q]['traversal_box_fraction'] * \
-                na.array([na.cos(phi) * na.sin(theta),
-                          na.sin(phi) * na.sin(theta),
-                          na.cos(theta)])
+                np.array([np.cos(phi) * np.sin(theta),
+                          np.sin(phi) * np.sin(theta),
+                          np.cos(theta)])
             box_fraction_used += \
               self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -365,30 +365,30 @@
             sub_data = {}
             sub_data['segment_redshift'] = my_segment['redshift']
             for field in all_fields:
-                sub_data[field] = na.array([])
+                sub_data[field] = np.array([])
 
             # Get data for all subsegments in segment.
             for sub_segment in sub_segments:
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = pf.h.ray(sub_segment[0], sub_segment[1])
-                sub_data['dl'] = na.concatenate([sub_data['dl'],
+                sub_data['dl'] = np.concatenate([sub_data['dl'],
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
                 for field in fields:
-                    sub_data[field] = na.concatenate([sub_data[field],
+                    sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = na.array([sub_ray['x-velocity'],
+                    sub_vel = np.array([sub_ray['x-velocity'],
                                         sub_ray['y-velocity'],
                                         sub_ray['z-velocity']])
                     sub_data['los_velocity'] = \
-                      na.concatenate([sub_data['los_velocity'],
-                                      (na.rollaxis(sub_vel, 1) *
+                      np.concatenate([sub_data['los_velocity'],
+                                      (np.rollaxis(sub_vel, 1) *
                                        line_of_sight).sum(axis=1)])
                     del sub_vel
 
@@ -470,20 +470,20 @@
         if fields is None: fields = []
 
         # Create position array from halo list.
-        halo_centers = na.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, na.array(map(lambda halo: halo[field],
+        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
+        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
                                                        halo_list))) \
                                   for field in fields])
 
-        nearest_distance = na.zeros(data['x'].shape)
-        field_data = dict([(field, na.zeros(data['x'].shape)) \
+        nearest_distance = np.zeros(data['x'].shape)
+        field_data = dict([(field, np.zeros(data['x'].shape)) \
                            for field in fields])
         for index in xrange(nearest_distance.size):
-            nearest = na.argmin(periodic_distance(na.array([data['x'][index],
+            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
                                                             data['y'][index],
                                                             data['z'][index]]),
                                                   halo_centers))
-            nearest_distance[index] = periodic_distance(na.array([data['x'][index],
+            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
                                                                   data['y'][index],
                                                                   data['z'][index]]),
                                                         halo_centers[nearest])
@@ -532,41 +532,41 @@
         for field in [field for field in datum.keys()
                       if field not in exceptions]:
             if field in new_data:
-                new_data[field] = na.concatenate([new_data[field], datum[field]])
+                new_data[field] = np.concatenate([new_data[field], datum[field]])
             else:
-                new_data[field] = na.copy(datum[field])
+                new_data[field] = np.copy(datum[field])
     return new_data
 
 def vector_length(start, end):
     "Calculate vector length."
 
-    return na.sqrt(na.power((end - start), 2).sum())
+    return np.sqrt(np.power((end - start), 2).sum())
 
 def periodic_distance(coord1, coord2):
     "Calculate length of shortest vector between to points in periodic domain."
     dif = coord1 - coord2
 
-    dim = na.ones(coord1.shape,dtype=int)
+    dim = np.ones(coord1.shape,dtype=int)
     def periodic_bind(num):
-        pos = na.abs(num % dim)
-        neg = na.abs(num % -dim)
-        return na.min([pos,neg],axis=0)
+        pos = np.abs(num % dim)
+        neg = np.abs(num % -dim)
+        return np.min([pos,neg],axis=0)
 
     dif = periodic_bind(dif)
-    return na.sqrt((dif * dif).sum(axis=-1))
+    return np.sqrt((dif * dif).sum(axis=-1))
 
 def periodic_ray(start, end, left=None, right=None):
     "Break up periodic ray into non-periodic segments."
 
     if left is None:
-        left = na.zeros(start.shape)
+        left = np.zeros(start.shape)
     if right is None:
-        right = na.ones(start.shape)
+        right = np.ones(start.shape)
     dim = right - left
 
     vector = end - start
-    wall = na.zeros(start.shape)
-    close = na.zeros(start.shape, dtype=object)
+    wall = np.zeros(start.shape)
+    close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
     right_bound = vector > 0
@@ -574,15 +574,15 @@
     bound = vector != 0.0
 
     wall[left_bound] = left[left_bound]
-    close[left_bound] = na.max
+    close[left_bound] = np.max
     wall[right_bound] = right[right_bound]
-    close[right_bound] = na.min
-    wall[no_bound] = na.inf
-    close[no_bound] = na.min
+    close[right_bound] = np.min
+    wall[no_bound] = np.inf
+    close[no_bound] = np.min
 
     segments = []
-    this_start = na.copy(start)
-    this_end = na.copy(end)
+    this_start = np.copy(start)
+    this_end = np.copy(end)
     t = 0.0
     tolerance = 1e-6
 
@@ -596,14 +596,14 @@
             this_start[hit_right] -= dim[hit_right]
             this_end[hit_right] -= dim[hit_right]
 
-        nearest = na.array([close[q]([this_end[q], wall[q]]) \
+        nearest = np.array([close[q]([this_end[q], wall[q]]) \
                                 for q in range(start.size)])
         dt = ((nearest - this_start) / vector)[bound].min()
         now = this_start + vector * dt
-        close_enough = na.abs(now - nearest) < 1e-10
+        close_enough = np.abs(now - nearest) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([na.copy(this_start), na.copy(now)])
-        this_start = na.copy(now)
+        segments.append([np.copy(this_start), np.copy(now)])
+        this_start = np.copy(now)
         t += dt
 
     return segments


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -31,7 +31,7 @@
 import h5py
 import itertools
 import math
-import numpy as na
+import numpy as np
 import random
 import sys
 import os.path as path
@@ -123,13 +123,13 @@
         cy = self["particle_position_y"]
         cz = self["particle_position_z"]
         if isinstance(self, FOFHalo):
-            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
         else:
             c_vec = self.maximum_density_location() - self.pf.domain_center
         cx = (cx - c_vec[0])
         cy = (cy - c_vec[1])
         cz = (cz - c_vec[2])
-        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
         return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
     def maximum_density(self):
@@ -158,7 +158,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[1:]
-        return na.array([
+        return np.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
                 self._max_dens[self.id][3]])
@@ -193,7 +193,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return na.array([vx, vy, vz]) / pm.sum()
+        return np.array([vx, vy, vz]) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -216,8 +216,8 @@
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
         vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
         s = vx ** 2. + vy ** 2. + vz ** 2.
-        ms = na.mean(s)
-        return na.sqrt(ms) * pm.size
+        ms = np.mean(s)
+        return np.sqrt(ms) * pm.size
 
     def maximum_radius(self, center_of_mass=True):
         r"""Returns the maximum radius in the halo for all particles,
@@ -246,13 +246,13 @@
             center = self.center_of_mass()
         else:
             center = self.maximum_density_location()
-        rx = na.abs(self["particle_position_x"] - center[0])
-        ry = na.abs(self["particle_position_y"] - center[1])
-        rz = na.abs(self["particle_position_z"] - center[2])
+        rx = np.abs(self["particle_position_x"] - center[0])
+        ry = np.abs(self["particle_position_y"] - center[1])
+        rz = np.abs(self["particle_position_z"] - center[2])
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
-                + na.minimum(ry, DW[1] - ry) ** 2.0
-                + na.minimum(rz, DW[2] - rz) ** 2.0)
+        r = np.sqrt(np.minimum(rx, DW[0] - rx) ** 2.0
+                + np.minimum(ry, DW[1] - ry) ** 2.0
+                + np.minimum(rz, DW[2] - rz) ** 2.0)
         return r.max()
 
     def __getitem__(self, key):
@@ -393,7 +393,7 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins + 1)[over])
+            vir_bin = max(np.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
@@ -419,8 +419,8 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        dist = na.empty(thissize, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
         mark = 0
         # Find the distances to the particles. I don't like this much, but I
@@ -432,15 +432,15 @@
             mark += 1
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(min(dist) * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(min(dist) * .99 + TINY),
             math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1)
         # Find out which bin each particle goes into, and add the particle
         # mass to that bin.
-        inds = na.digitize(dist, self.radial_bins) - 1
+        inds = np.digitize(dist, self.radial_bins) - 1
         if self["particle_position_x"].size > 1:
-            for index in na.unique(inds):
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                na.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["ParticleMassMsun"][inds == index])
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -450,12 +450,12 @@
         (self.radial_bins * cm)**3.0)
         
     def _get_ellipsoid_parameters_basic(self):
-        na.seterr(all='ignore')
+        np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
         # neglecting to check if the 4 particles in the same plane,
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
-        if na.size(self["particle_position_x"]) < 4:
+        if np.size(self["particle_position_x"]) < 4:
             mylog.warning("Too few particles for ellipsoid parameters.")
             return (0, 0, 0, 0, 0, 0, 0)
         # Calculate the parameters that describe the ellipsoid of
@@ -466,19 +466,19 @@
 		    self["particle_position_y"],
 		    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = na.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
 	position = [position[0] - com[0],
 		    position[1] - com[1],
 		    position[2] - com[2]]
 	# different cases of particles being on other side of boundary
-	for axis in range(na.size(DW)):
-	    cases = na.array([position[axis],
+	for axis in range(np.size(DW)):
+	    cases = np.array([position[axis],
 	  		      position[axis] + DW[axis],
 			      position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
-            position[axis] = na.choose(na.abs(cases).argmin(axis=0), cases)
+            position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
 	# find the furthest particle's index
-	r = na.sqrt(position[0]**2 +
+	r = np.sqrt(position[0]**2 +
 		    position[1]**2 +
 		    position[2]**2)
         A_index = r.argmax()
@@ -490,24 +490,24 @@
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = na.empty((na.size(position[0]), 3), dtype='float64')
+	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
-        rr = na.array([position[0],
+        rr = np.array([position[0],
 		       position[1],
 		       position[2]]).T # Similar to tB_vector in old code.
-        tC_vector = na.cross(e0_vector_copy, rr)
+        tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
-            te2[:,dim] *= na.sum(tC_vector**2., axis = 1)**(-0.5)
-        te1 = na.cross(te2, e0_vector_copy)
-        length = na.abs(-na.sum(rr * te1, axis = 1) * \
-            (1. - na.sum(rr * e0_vector_copy, axis = 1)**2. * \
+            te2[:,dim] *= np.sum(tC_vector**2., axis = 1)**(-0.5)
+        te1 = np.cross(te2, e0_vector_copy)
+        length = np.abs(-np.sum(rr * te1, axis = 1) * \
+            (1. - np.sum(rr * e0_vector_copy, axis = 1)**2. * \
             mag_A**-2.)**(-0.5))
         # This problem apparently happens sometimes, that the NaNs are turned
         # into infs, which messes up the nanargmax below.
-        length[length == na.inf] = 0.
-        tB_index = na.nanargmax(length) # ignores NaNs created above.
+        length[length == np.inf] = 0.
+        tB_index = np.nanargmax(length) # ignores NaNs created above.
         mag_B = length[tB_index]
         e1_vector = te1[tB_index]
         e2_vector = te2[tB_index]
@@ -518,24 +518,24 @@
             temp_e0[:,dim] = e0_vector[dim]
             temp_e1[:,dim] = e1_vector[dim]
             temp_e2[:,dim] = e2_vector[dim]
-        length = na.abs(na.sum(rr * temp_e2, axis = 1) * (1 - \
-            na.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
-            na.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
-        length[length == na.inf] = 0.
-        tC_index = na.nanargmax(length)
+        length = np.abs(np.sum(rr * temp_e2, axis = 1) * (1 - \
+            np.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
+            np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+        length[length == np.inf] = 0.
+        tC_index = np.nanargmax(length)
         mag_C = length[tC_index]
         # tilt is calculated from the rotation about x axis
         # needed to align e1 vector with the y axis
         # after e0 is aligned with x axis
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0_vector[1] / e0_vector[0])
+        t1 = np.arctan(e0_vector[1] / e0_vector[0])
         RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
         r1 = (e0_vector * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
-        r2 = na.dot(RY, na.dot(RZ, e1_vector))
-        tilt = na.arctan(r2[2]/r2[1])
+        r2 = np.dot(RY, np.dot(RZ, e1_vector))
+        tilt = np.arctan(r2[2]/r2[1])
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
@@ -572,11 +572,11 @@
 
         #Halo.__init__(self,halo_list,index,
         self.size=Np 
-        self.CoM=na.array([X,Y,Z])
+        self.CoM=np.array([X,Y,Z])
         self.max_dens_point=-1
         self.group_total_mass=-1
         self.max_radius=Rvir
-        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.bulk_vel=np.array([VX,VY,VZ])*1e5
         self.rms_vel=-1
         self.group_total_mass = -1 #not implemented 
     
@@ -651,7 +651,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -704,7 +704,7 @@
         self.bin_count = bins
         period = self.data.pf.domain_right_edge - \
             self.data.pf.domain_left_edge
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
         cen = self.center_of_mass()
         # Cosmology
         h = self.data.pf.hubble_constant
@@ -716,7 +716,7 @@
         # If I own some of this halo operate on the particles.
         if self.indices is not None:
             # Get some pertinent information about the halo.
-            dist = na.empty(self.indices.size, dtype='float64')
+            dist = np.empty(self.indices.size, dtype='float64')
             mark = 0
             # Find the distances to the particles.
             # I don't like this much, but I
@@ -737,15 +737,15 @@
         dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(dist_min * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(dist_min * .99 + TINY),
             math.log10(dist_max * 1.01 + 2 * TINY), num=self.bin_count + 1)
         if self.indices is not None and self.indices.size > 1:
             # Find out which bin each particle goes into, and add the particle
             # mass to that bin.
-            inds = na.digitize(dist, self.radial_bins) - 1
-            for index in na.unique(inds):
+            inds = np.digitize(dist, self.radial_bins) - 1
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    na.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["ParticleMassMsun"][inds == index])
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -831,7 +831,7 @@
         self.saved_fields = {}
         self.particle_mask = None
         self.ds_sort = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -871,7 +871,7 @@
                     # The result of searchsorted is an array with the positions
                     # of the indexes in pid as they are in sp_pid. This is
                     # because each element of pid is in sp_pid only once.
-                    self.particle_mask = na.searchsorted(sp_pid, pid)
+                    self.particle_mask = np.searchsorted(sp_pid, pid)
                 # We won't store this field below in saved_fields because
                 # that would mean keeping two copies of it, one in the yt
                 # machinery and one here.
@@ -890,9 +890,9 @@
             return None
         elif field == 'particle_index' or field == 'particle_type':
             # the only integer field
-            field_data = na.empty(size, dtype='int64')
+            field_data = np.empty(size, dtype='int64')
         else:
-            field_data = na.empty(size, dtype='float64')
+            field_data = np.empty(size, dtype='float64')
         f.close()
         # Apparently, there's a bug in h5py that was keeping the file pointer
         # f closed, even though it's re-opened below. This del seems to fix
@@ -943,7 +943,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -1025,7 +1025,7 @@
         self.tilt = tilt
         self.bin_count = None
         self.overdensity = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -1084,7 +1084,7 @@
                 self.particle_fields[field] = \
                     self._data_source[field][ii].astype('float64')
             del self._data_source[field]
-        self._base_indices = na.arange(tot_part)[ii]
+        self._base_indices = np.arange(tot_part)[ii]
         gc.collect()
 
     def _get_dm_indices(self):
@@ -1099,10 +1099,10 @@
             return slice(None)
 
     def _parse_output(self):
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount(self.tags + 1)
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount(self.tags + 1)
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         dens = self.densities[sort_indices]
         cp = 0
         for i in unique_ids:
@@ -1112,7 +1112,7 @@
                 continue
             group_indices = grab_indices[cp:cp_c]
             self._groups.append(self._halo_class(self, i, group_indices))
-            md_i = na.argmax(dens[cp:cp_c])
+            md_i = np.argmax(dens[cp:cp_c])
             px, py, pz = \
                 [self.particle_fields['particle_position_%s' % ax][group_indices]
                                             for ax in 'xyz']
@@ -1201,7 +1201,7 @@
         """
         # Set up a vector to multiply other
         # vectors by to project along proj_dim
-        vec = na.array([1., 1., 1.])
+        vec = np.array([1., 1., 1.])
         vec[proj_dim] = 0.
         period = self.pf.domain_right_edge - self.pf.domain_left_edge
         period = period * vec
@@ -1367,9 +1367,9 @@
         splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
         for num in splits:
             if 'nan' not in num:
-                formats += na.array(eval(num)).dtype,
+                formats += np.array(eval(num)).dtype,
             else:
-                formats += na.dtype('float'),
+                formats += np.dtype('float'),
         assert len(formats) == len(names)
 
         #Jc = 1.98892e33/pf['mpchcm']*1e5
@@ -1384,7 +1384,7 @@
                     Rs=1.0/pf['kpchcm'],
                     JX=Jc,JY=Jc,JZ=Jc)
         dtype = {'names':names,'formats':formats}
-        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
         #convert position units  
         for name in names:
             halo_table[name]=halo_table[name]*conv.get(name,1)
@@ -1470,7 +1470,7 @@
                self.particle_fields["particle_position_y"] / self.period[1],
                self.particle_fields["particle_position_z"] / self.period[2],
                self.link)
-        self.densities = na.ones(self.tags.size, dtype='float64') * -1
+        self.densities = np.ones(self.tags.size, dtype='float64') * -1
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
@@ -1518,12 +1518,12 @@
             size = int(line[2])
             fnames = locations[halo]
             # Everything else
-            CoM = na.array([float(line[7]), float(line[8]), float(line[9])])
-            max_dens_point = na.array([float(line[3]), float(line[4]),
+            CoM = np.array([float(line[7]), float(line[8]), float(line[9])])
+            max_dens_point = np.array([float(line[3]), float(line[4]),
                 float(line[5]), float(line[6])])
             group_total_mass = float(line[1])
             max_radius = float(line[13])
-            bulk_vel = na.array([float(line[10]), float(line[11]),
+            bulk_vel = np.array([float(line[10]), float(line[11]),
                 float(line[12])])
             rms_vel = float(line[14])
             if len(line) == 15:
@@ -1541,7 +1541,7 @@
                 e1_vec0 = float(line[18])
                 e1_vec1 = float(line[19])
                 e1_vec2 = float(line[20])
-                e1_vec = na.array([e1_vec0, e1_vec1, e1_vec2])
+                e1_vec = np.array([e1_vec0, e1_vec1, e1_vec2])
                 tilt = float(line[21])
                 self._groups.append(LoadedHalo(self.pf, halo, size = size,
                     CoM = CoM,
@@ -1596,7 +1596,7 @@
             y = float(line[columns['y']])
             z = float(line[columns['z']])
             r = float(line[columns['r']])
-            cen = na.array([x, y, z])
+            cen = np.array([x, y, z])
             # Now we see if there's anything else.
             if extra:
                 temp_dict = {}
@@ -1631,7 +1631,7 @@
         self.rearrange = rearrange
         self.period = period
         self.old_period = period.copy()
-        self.period = na.array([1.] * 3)
+        self.period = np.array([1.] * 3)
         self._data_source = data_source
         self.premerge = premerge
         self.tree = tree
@@ -1645,20 +1645,20 @@
         if (self.particle_fields["particle_index"] < 0).any():
             mylog.error("Negative values in particle_index field. Parallel HOP will fail.")
             exit = True
-        if na.unique(self.particle_fields["particle_index"]).size != \
+        if np.unique(self.particle_fields["particle_index"]).size != \
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
             self.particle_fields['ParticleMassMsun'])
-        na.divide(self.particle_fields["particle_position_x"],
+        np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
-        na.divide(self.particle_fields["particle_position_y"],
+        np.divide(self.particle_fields["particle_position_y"],
             self.old_period[1], self.particle_fields["particle_position_y"])
-        na.divide(self.particle_fields["particle_position_z"],
+        np.divide(self.particle_fields["particle_position_z"],
             self.old_period[2], self.particle_fields["particle_position_z"])
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
@@ -1688,20 +1688,20 @@
         self.period = self.old_period.copy()
         # Precompute the bulk velocity in parallel.
         yt_counters("Precomp bulk vel.")
-        self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
+        self.bulk_vel = np.zeros((self.group_count, 3), dtype='float64')
         yt_counters("bulk vel. reading data")
         pm = obj.mass
         # Fix this back to un-normalized units.
-        na.multiply(pm, self.total_mass, pm)
+        np.multiply(pm, self.total_mass, pm)
         xv = self._data_source["particle_velocity_x"][self._base_indices]
         yv = self._data_source["particle_velocity_y"][self._base_indices]
         zv = self._data_source["particle_velocity_z"][self._base_indices]
         yt_counters("bulk vel. reading data")
         yt_counters("bulk vel. computing")
         select = (self.tags >= 0)
-        calc = len(na.where(select == True)[0])
+        calc = len(np.where(select == True)[0])
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             ms = pm[select]
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
@@ -1710,13 +1710,13 @@
             sort = subchain.argsort()
             vel = vel[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i + 1]], axis=0)
+                self.bulk_vel[u] = np.sum(vel[marks[i]:marks[i + 1]], axis=0)
             del vel, subchain, sort_subchain
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
@@ -1729,27 +1729,27 @@
         # Now calculate the RMS velocity of the groups in parallel, very
         # similarly to the bulk velocity and re-using some of the arrays.
         yt_counters("rms vel computing")
-        rms_vel_temp = na.zeros((self.group_count, 2), dtype='float64')
+        rms_vel_temp = np.zeros((self.group_count, 2), dtype='float64')
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
             vel[:, 2] = zv[select] * ms
             vel = vel[sort]
             for i, u in enumerate(uniq_subchain):
                 # This finds the sum locally.
-                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i + 1]] - \
+                rms_vel_temp[u][0] = np.sum(((vel[marks[i]:marks[i + 1]] - \
                     self.bulk_vel[u]) / self.Tot_M[u]) ** 2.)
                 # I could use self.group_sizes...
                 rms_vel_temp[u][1] = marks[i + 1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
         rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
-        self.rms_vel = na.empty(self.group_count, dtype='float64')
+        self.rms_vel = np.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
             self.rms_vel[groupID] = \
-                na.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
+                np.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
                 self.group_sizes[groupID]
         del rms_vel_temp
         yt_counters("rms vel computing")
@@ -1764,16 +1764,16 @@
         """
         Each task will make an entry for all groups, but it may be empty.
         """
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount((self.tags + 1).tolist())
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount((self.tags + 1).tolist())
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         del sort_indices
         cp = 0
         index = 0
         # We want arrays for parallel HOP
-        self._groups = na.empty(self.group_count, dtype='object')
-        self._max_dens = na.empty((self.group_count, 4), dtype='float64')
+        self._groups = np.empty(self.group_count, dtype='object')
+        self._max_dens = np.empty((self.group_count, 4), dtype='float64')
         if self.group_count == 0:
             mylog.info("There are no halos found.")
             return
@@ -1861,7 +1861,7 @@
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
-        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge)) / 2.0
+        self.center = (np.array(ds.right_edge) + np.array(ds.left_edge)) / 2.0
 
     def _parse_halolist(self, threshold_adjustment):
         groups = []
@@ -1871,7 +1871,7 @@
         for halo in self._groups:
             this_max_dens = halo.maximum_density_location()
             # if the most dense particle is in the box, keep it
-            if na.all((this_max_dens >= LE) & (this_max_dens <= RE)):
+            if np.all((this_max_dens >= LE) & (this_max_dens <= RE)):
                 # Now we add the halo information to OURSELVES, taken from the
                 # self.hop_list
                 # We need to mock up the HOPHaloList thingie, so we need to
@@ -2128,8 +2128,8 @@
         >>> halos = parallelHF(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding=0.0)
@@ -2141,7 +2141,7 @@
         if self.tree != 'F' and self.tree != 'C':
             mylog.error("No kD Tree specified!")
         period = pf.domain_right_edge - pf.domain_left_edge
-        topbounds = na.array([[0., 0., 0.], period])
+        topbounds = np.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
@@ -2190,14 +2190,14 @@
             # approximation, but it's OK with the safety factor
             padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
-            self.padding = (na.ones(3, dtype='float64') * padding,
-                na.ones(3, dtype='float64') * padding)
+            self.padding = (np.ones(3, dtype='float64') * padding,
+                np.ones(3, dtype='float64') * padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
                 (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
-            LE_padding = na.empty(3, dtype='float64')
-            RE_padding = na.empty(3, dtype='float64')
+            LE_padding = np.empty(3, dtype='float64')
+            RE_padding = np.empty(3, dtype='float64')
             avg_spacing = (float(vol) / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
@@ -2215,9 +2215,9 @@
                     self._data_source.left_edge[(dim + 2) % 3])
                 bin_width = base_padding
                 num_bins = int(math.ceil(width / bin_width))
-                bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+                bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
                     self._data_source.left_edge[dim]
-                counts, bins = na.histogram(data, bins)
+                counts, bins = np.histogram(data, bins)
                 # left side.
                 start = 0
                 count = counts[0]
@@ -2250,8 +2250,8 @@
             total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
-            self.padding = (na.zeros(3, dtype='float64'),
-                na.zeros(3, dtype='float64'))
+            self.padding = (np.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
@@ -2282,8 +2282,8 @@
         n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
-        my_points = na.empty((n_random, 3), dtype='float64')
-        uni = na.array(random.sample(xrange(xp.size), n_random))
+        my_points = np.empty((n_random, 3), dtype='float64')
+        uni = np.array(random.sample(xrange(xp.size), n_random))
         uni = uni[uni.argsort()]
         my_points[:, 0] = xp[uni]
         del xp
@@ -2297,10 +2297,10 @@
         mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
-            root_points = na.empty((tot_random, 3), dtype='float64')
+            root_points = np.empty((tot_random, 3), dtype='float64')
             root_points.shape = (1, 3 * tot_random)
         else:
-            root_points = na.empty([])
+            root_points = np.empty([])
         my_points.shape = (1, n_random * 3)
         root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
@@ -2315,9 +2315,9 @@
         num_bins = 1000
         width = bounds[1][dim] - bounds[0][dim]
         bin_width = width / num_bins
-        bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+        bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
             bounds[0][dim]
-        counts, bins = na.histogram(points[:, dim], bins)
+        counts, bins = np.histogram(points[:, dim], bins)
         # Find the bin that passes the cut points.
         midpoints = [bounds[0][dim]]
         sum = 0
@@ -2341,7 +2341,7 @@
         subpoints = []
         subbounds = []
         for pair in zip(midpoints[:-1], midpoints[1:]):
-            select = na.bitwise_and(points[:, dim] >= pair[0],
+            select = np.bitwise_and(points[:, dim] >= pair[0],
                 points[:, dim] < pair[1])
             subpoints.append(points[select])
             nb = bounds.copy()
@@ -2363,7 +2363,7 @@
         ms = -self.Tot_M.copy()
         del self.Tot_M
         Cx = self.CoM[:, 0].copy()
-        sorted = na.lexsort([Cx, ms])
+        sorted = np.lexsort([Cx, ms])
         del Cx, ms
         self._groups = self._groups[sorted]
         self._max_dens = self._max_dens[sorted]
@@ -2426,8 +2426,8 @@
         >>> halos = HaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
@@ -2520,8 +2520,8 @@
         >>> halos = FOFHaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self.pf = pf
         self.hierarchy = pf.h
@@ -2544,7 +2544,7 @@
             avg_spacing = (float(vol) / n_parts) ** (1. / 3.)
             linking_length = link * avg_spacing
         else:
-            linking_length = na.abs(link)
+            linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -25,7 +25,7 @@
 
 from collections import defaultdict
 import itertools, sys
-import numpy as na
+import numpy as np
 import gc
 
 from yt.funcs import *
@@ -88,23 +88,23 @@
         for taskID in global_bounds:
             thisLE, thisRE = global_bounds[taskID]
             if self.mine != taskID:
-                vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
             if self.mine == taskID:
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2]]))
         # Find the neighbors we share corners with. Yes, this is lazy with
         # a double loop, but it works and this is definitely not a performance
         # bottleneck.
@@ -119,13 +119,13 @@
                 # Also test to see if the distance to this corner is within
                 # max_padding, which is more likely the case with load-balancing
                 # turned on.
-                dx = min( na.fabs(my_vertex[0] - vertex[0]), \
-                    self.period[0] - na.fabs(my_vertex[0] - vertex[0]))
-                dy = min( na.fabs(my_vertex[1] - vertex[1]), \
-                    self.period[1] - na.fabs(my_vertex[1] - vertex[1]))
-                dz = min( na.fabs(my_vertex[2] - vertex[2]), \
-                    self.period[2] - na.fabs(my_vertex[2] - vertex[2]))
-                d = na.sqrt(dx*dx + dy*dy + dz*dz)
+                dx = min( np.fabs(my_vertex[0] - vertex[0]), \
+                    self.period[0] - np.fabs(my_vertex[0] - vertex[0]))
+                dy = min( np.fabs(my_vertex[1] - vertex[1]), \
+                    self.period[1] - np.fabs(my_vertex[1] - vertex[1]))
+                dz = min( np.fabs(my_vertex[2] - vertex[2]), \
+                    self.period[2] - np.fabs(my_vertex[2] - vertex[2]))
+                d = np.sqrt(dx*dx + dy*dy + dz*dz)
                 if d <= self.max_padding:
                     self.neighbors.add(int(vertex[3]))
         # Faces and edges.
@@ -219,13 +219,13 @@
         annulus data.
         """
         if round == 'first':
-            max_pad = na.max(self.padding)
+            max_pad = np.max(self.padding)
             self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
             for neighbor in self.neighbors:
-                self.max_padding = na.maximum(self.global_padding[neighbor], \
+                self.max_padding = np.maximum(self.global_padding[neighbor], \
                     self.max_padding)
 
     def _communicate_padding_data(self):
@@ -247,7 +247,7 @@
         # This will reduce the size of the loop over particles.
         yt_counters("Picking padding data to send.")
         send_count = self.is_inside_annulus.sum()
-        points = na.empty((send_count, 3), dtype='float64')
+        points = np.empty((send_count, 3), dtype='float64')
         points[:,0] = self.xpos[self.is_inside_annulus]
         points[:,1] = self.ypos[self.is_inside_annulus]
         points[:,2] = self.zpos[self.is_inside_annulus]
@@ -280,9 +280,9 @@
         recv_size = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_points[opp_neighbor] = na.empty((opp_size, 3), dtype='float64')
-            recv_mass[opp_neighbor] = na.empty(opp_size, dtype='float64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_points[opp_neighbor] = np.empty((opp_size, 3), dtype='float64')
+            recv_mass[opp_neighbor] = np.empty(opp_size, dtype='float64')
             recv_size += opp_size
         yt_counters("Initalizing recv arrays.")
         # Setup the receiving slots.
@@ -306,11 +306,11 @@
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
         # Now we add the data to ourselves.
-        self.index_pad = na.empty(recv_size, dtype='int64')
-        self.xpos_pad = na.empty(recv_size, dtype='float64')
-        self.ypos_pad = na.empty(recv_size, dtype='float64')
-        self.zpos_pad = na.empty(recv_size, dtype='float64')
-        self.mass_pad = na.empty(recv_size, dtype='float64')
+        self.index_pad = np.empty(recv_size, dtype='int64')
+        self.xpos_pad = np.empty(recv_size, dtype='float64')
+        self.ypos_pad = np.empty(recv_size, dtype='float64')
+        self.zpos_pad = np.empty(recv_size, dtype='float64')
+        self.mass_pad = np.empty(recv_size, dtype='float64')
         so_far = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
@@ -335,7 +335,7 @@
         yt_counters("Flipping coordinates around the periodic boundary.")
         self.size = self.index.size + self.index_pad.size
         # Now that we have the full size, initialize the chainID array
-        self.chainID = na.ones(self.size,dtype='int64') * -1
+        self.chainID = np.ones(self.size,dtype='int64') * -1
         # Clean up explicitly, but these should be empty dicts by now.
         del recv_real_indices, hooks, recv_points, recv_mass
         yt_counters("Communicate discriminated padding")
@@ -348,10 +348,10 @@
         if self.tree == 'F':
             # Yes, we really do need to initialize this many arrays.
             # They're deleted in _parallelHOP.
-            fKD.dens = na.zeros(self.size, dtype='float64', order='F')
-            fKD.mass = na.concatenate((self.mass, self.mass_pad))
+            fKD.dens = np.zeros(self.size, dtype='float64', order='F')
+            fKD.mass = np.concatenate((self.mass, self.mass_pad))
             del self.mass
-            fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
+            fKD.pos = np.empty((3, self.size), dtype='float64', order='F')
             # This actually copies the data into the fortran space.
             self.psize = self.xpos.size
             fKD.pos[0, :self.psize] = self.xpos
@@ -364,7 +364,7 @@
             fKD.pos[2, self.psize:] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
+            fKD.qv = np.asfortranarray(np.empty(3, dtype='float64'))
             fKD.nn = self.num_neighbors
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
@@ -375,8 +375,8 @@
             # Now call the fortran.
             create_tree(0)
         elif self.tree == 'C':
-            self.mass = na.concatenate((self.mass, self.mass_pad))
-            self.pos = na.empty((self.size, 3), dtype='float64')
+            self.mass = np.concatenate((self.mass, self.mass_pad))
+            self.pos = np.empty((self.size, 3), dtype='float64')
             self.psize = self.xpos.size
             self.pos[:self.psize, 0] = self.xpos
             self.pos[:self.psize, 1] = self.ypos
@@ -407,7 +407,7 @@
         # Test to see if the points are in the 'real' region
         (LE, RE) = self.bounds
         if round == 'first':
-            points = na.empty((self.real_size, 3), dtype='float64')
+            points = np.empty((self.real_size, 3), dtype='float64')
             points[:,0] = self.xpos
             points[:,1] = self.ypos
             points[:,2] = self.zpos
@@ -426,21 +426,21 @@
         temp_LE = LE + self.max_padding
         temp_RE = RE - self.max_padding
         if round == 'first':
-            inner = na.invert( (points >= temp_LE).all(axis=1) * \
+            inner = np.invert( (points >= temp_LE).all(axis=1) * \
                 (points < temp_RE).all(axis=1) )
         elif round == 'second' or round == 'third':
             if self.tree == 'F':
-                inner = na.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
+                inner = np.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
                     (fKD.pos.T < temp_RE).all(axis=1) )
             elif self.tree == 'C':
-                inner = na.invert( (self.pos >= temp_LE).all(axis=1) * \
+                inner = np.invert( (self.pos >= temp_LE).all(axis=1) * \
                     (self.pos < temp_RE).all(axis=1) )
         if round == 'first':
             del points
         # After inverting the logic above, we want points that are both
         # inside the real region, but within one padding of the boundary,
         # and this will do it.
-        self.is_inside_annulus = na.bitwise_and(self.is_inside, inner)
+        self.is_inside_annulus = np.bitwise_and(self.is_inside, inner)
         del inner
         # Below we make a mapping of real particle index->local ID
         # Unf. this has to be a dict, because any task can have
@@ -449,10 +449,10 @@
         # as the full number of particles.
         # We can skip this the first two times around.
         if round == 'third':
-            temp = na.arange(self.size)
-            my_part = na.bitwise_or(na.invert(self.is_inside), self.is_inside_annulus)
-            my_part = na.bitwise_and(my_part, (self.chainID != -1))
-            catted_indices = na.concatenate(
+            temp = np.arange(self.size)
+            my_part = np.bitwise_or(np.invert(self.is_inside), self.is_inside_annulus)
+            my_part = np.bitwise_and(my_part, (self.chainID != -1))
+            catted_indices = np.concatenate(
                 (self.index, self.index_pad))[my_part]
             self.rev_index = dict.fromkeys(catted_indices)
             self.rev_index.update(itertools.izip(catted_indices, temp[my_part]))
@@ -468,11 +468,11 @@
         keeping the all of this data, just using it.
         """
         yt_counters("densestNN")
-        self.densestNN = na.empty(self.size,dtype='int64')
+        self.densestNN = np.empty(self.size,dtype='int64')
         # We find nearest neighbors in chunks.
         chunksize = 10000
         if self.tree == 'F':
-            fKD.chunk_tags = na.asfortranarray(na.empty((self.num_neighbors, chunksize), dtype='int64'))
+            fKD.chunk_tags = np.asfortranarray(np.empty((self.num_neighbors, chunksize), dtype='int64'))
             start = 1 # Fortran counting!
             finish = 0
             while finish < self.size:
@@ -486,8 +486,8 @@
                 chunk_NNtags = (fKD.chunk_tags[:,:finish-start+1] - 1).transpose()
                 # Find the densest nearest neighbors by referencing the already
                 # calculated density.
-                n_dens = na.take(self.density,chunk_NNtags)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density,chunk_NNtags)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start + 1): # +1 for fortran counting.
                     j = start + i - 1 # -1 for fortran counting.
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -502,9 +502,9 @@
                 # be as memory efficient - fragmenting?
                 chunk_NNtags = self.kdtree.find_chunk_nearest_neighbors(start, \
                     finish, num_neighbors=self.num_neighbors)
-                n_dens = na.take(self.density, chunk_NNtags)
-                max_loc = na.argmax(n_dens, axis=1)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density, chunk_NNtags)
+                max_loc = np.argmax(n_dens, axis=1)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start):
                     j = start + i
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -520,8 +520,8 @@
         """
         yt_counters("build_chains")
         chainIDmax = 0
-        self.densest_in_chain = na.ones(10000, dtype='float64') * -1 # chainID->density, one to one
-        self.densest_in_chain_real_index = na.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
+        self.densest_in_chain = np.ones(10000, dtype='float64') * -1 # chainID->density, one to one
+        self.densest_in_chain_real_index = np.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
         for i in xrange(int(self.size)):
             # If it's already in a group, move on, or if this particle is
             # in the padding, move on because chains can only terminate in
@@ -536,7 +536,7 @@
             # in the next loop.
             if chainIDnew == chainIDmax:
                 chainIDmax += 1
-        self.padded_particles = na.array(self.padded_particles, dtype='int64')
+        self.padded_particles = np.array(self.padded_particles, dtype='int64')
         self.densest_in_chain = self.__clean_up_array(self.densest_in_chain)
         self.densest_in_chain_real_index = self.__clean_up_array(self.densest_in_chain_real_index)
         yt_counters("build_chains")
@@ -598,9 +598,9 @@
         yt_counters("preconnect_chains")
         yt_counters("local chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] = na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] = np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -626,8 +626,8 @@
         elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
             for i in xrange(self.size):
@@ -685,7 +685,7 @@
         # link is to itself. At that point we've found the densest chain
         # in this set of sets and we keep a record of that.
         yt_counters("preconnect pregrouping.")
-        final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
+        final_chain_map = np.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
         for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
@@ -701,9 +701,9 @@
                 self.chainID[i] = final_chain_map[self.chainID[i]]
         del final_chain_map
         # Now make the chainID assignments consecutive.
-        map = na.empty(self.densest_in_chain.size, dtype='int64')
-        dic_new = na.empty(chain_count - removed, dtype='float64')
-        dicri_new = na.empty(chain_count - removed, dtype='int64')
+        map = np.empty(self.densest_in_chain.size, dtype='int64')
+        dic_new = np.empty(chain_count - removed, dtype='float64')
+        dicri_new = np.empty(chain_count - removed, dtype='int64')
         new = 0
         for i,dic in enumerate(self.densest_in_chain):
             if dic > 0:
@@ -763,9 +763,9 @@
         mylog.info("Sorting chains...")
         yt_counters("global chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] =na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] =np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -779,14 +779,14 @@
         mylog.info("Pre-linking chains 'by hand'...")
         yt_counters("global chain hand-linking.")
         # If there are no repeats, we can skip this mess entirely.
-        uniq = na.unique(self.densest_in_chain_real_index)
+        uniq = np.unique(self.densest_in_chain_real_index)
         if uniq.size != self.densest_in_chain_real_index.size:
             # Find only the real particle indices that are repeated to reduce
             # the dict workload below.
             dicri = self.densest_in_chain_real_index[self.densest_in_chain_real_index.argsort()]
-            diff = na.ediff1d(dicri)
+            diff = np.ediff1d(dicri)
             diff = (diff == 0) # Picks out the places where the ids are equal
-            diff = na.concatenate((diff, [False])) # Makes it the same length
+            diff = np.concatenate((diff, [False])) # Makes it the same length
             # This has only the repeated IDs. Sets are faster at searches than
             # arrays.
             dicri = set(dicri[diff])
@@ -837,11 +837,11 @@
         for opp_neighbor in self.neighbors:
             opp_size = self.global_padded_count[opp_neighbor]
             to_recv_count += opp_size
-            temp_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            temp_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            temp_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            temp_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # The arrays we'll actually keep around...
-        self.recv_real_indices = na.empty(to_recv_count, dtype='int64')
-        self.recv_chainIDs = na.empty(to_recv_count, dtype='int64')
+        self.recv_real_indices = np.empty(to_recv_count, dtype='int64')
+        self.recv_chainIDs = np.empty(to_recv_count, dtype='int64')
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -899,9 +899,9 @@
         """
         yt_counters("connect_chains_across_tasks")
         # Remote (lower dens) chain -> local (higher) chain.
-        chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
+        chainID_translate_map_local = np.arange(self.nchains, dtype='int64')
         # Build the stuff to send.
-        self.uphill_real_indices = na.concatenate((
+        self.uphill_real_indices = np.concatenate((
             self.index, self.index_pad))[self.padded_particles]
         self.uphill_chainIDs = self.chainID[self.padded_particles]
         del self.padded_particles
@@ -991,7 +991,7 @@
         """
         yt_counters("communicate_annulus_chainIDs")
         # Pick the particles in the annulus.
-        real_indices = na.concatenate(
+        real_indices = np.concatenate(
             (self.index, self.index_pad))[self.is_inside_annulus]
         chainIDs = self.chainID[self.is_inside_annulus]
         # We're done with this here.
@@ -1012,8 +1012,8 @@
         recv_chainIDs = dict.fromkeys(self.neighbors)
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -1062,8 +1062,8 @@
         # Plus 2 because we're looking for that neighbor, but only keeping 
         # nMerge + 1 neighbor tags, skipping ourselves.
         if self.tree == 'F':
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge+2
         elif self.tree == 'C':
@@ -1160,9 +1160,9 @@
                 top_keys.append(top_key)
                 bot_keys.append(bot_key)
                 vals.append(data[top_key][bot_key])
-        top_keys = na.array(top_keys, dtype='int64')
-        bot_keys = na.array(bot_keys, dtype='int64')
-        vals = na.array(vals, dtype='float64')
+        top_keys = np.array(top_keys, dtype='int64')
+        bot_keys = np.array(bot_keys, dtype='int64')
+        vals = np.array(vals, dtype='float64')
 
         data.clear()
 
@@ -1179,14 +1179,14 @@
         # We need to find out which pairs of self.top_keys, self.bot_keys are
         # both < self.peakthresh, and create arrays that will store this
         # relationship.
-        both = na.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
+        both = np.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
             (self.densest_in_chain[self.bot_keys] < self.peakthresh))
         g_high = self.top_keys[both]
         g_low = self.bot_keys[both]
         g_dens = self.vals[both]
         del both
-        self.reverse_map = na.ones(self.densest_in_chain.size) * -1
-        densestbound = na.ones(self.densest_in_chain.size) * -1.0
+        self.reverse_map = np.ones(self.densest_in_chain.size) * -1
+        densestbound = np.ones(self.densest_in_chain.size) * -1.0
         for i, gl in enumerate(g_low):
             if g_dens[i] > densestbound[gl]:
                 densestbound[gl] = g_dens[i]
@@ -1200,7 +1200,7 @@
             if self.densest_in_chain[chainID] >= self.peakthresh:
                 self.reverse_map[chainID] = groupID
                 groupID += 1
-        group_equivalancy_map = na.empty(groupID, dtype='object')
+        group_equivalancy_map = np.empty(groupID, dtype='object')
         for i in xrange(groupID):
             group_equivalancy_map[i] = set([])
         # Loop over all of the chain linkages.
@@ -1259,7 +1259,7 @@
         # Shack.'
         Set_list = []
         # We only want the holes that are modulo mine.
-        keys = na.arange(groupID, dtype='int64')
+        keys = np.arange(groupID, dtype='int64')
         size = self.comm.size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
@@ -1298,7 +1298,7 @@
         del group_equivalancy_map, final_set, keys, select, groupIDs, current_sets
         del mine_groupIDs, not_mine_groupIDs, new_set, to_add_set, liter
         # Convert this list of sets into a look-up table
-        lookup = na.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
+        lookup = np.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
         for i,item in enumerate(Set_list):
             item_min = min(item)
             for groupID in item:
@@ -1353,7 +1353,7 @@
             # There are no groups, probably.
             pass
         # Make a secondary map to make the IDs consecutive.
-        values = na.arange(len(temp))
+        values = np.arange(len(temp))
         secondary_map = dict(itertools.izip(temp, values))
         del values
         # Update reverse_map
@@ -1386,8 +1386,8 @@
                 self.chainID[i] = -1
         del self.is_inside
         # Create a densest_in_group, analogous to densest_in_chain.
-        keys = na.arange(group_count)
-        vals = na.zeros(group_count)
+        keys = np.arange(group_count)
+        vals = np.zeros(group_count)
         self.densest_in_group = dict(itertools.izip(keys,vals))
         self.densest_in_group_real_index = self.densest_in_group.copy()
         del keys, vals
@@ -1409,12 +1409,12 @@
         velocity, to save time in HaloFinding.py (fewer barriers!).
         """
         select = (self.chainID != -1)
-        calc = len(na.where(select == True)[0])
-        loc = na.empty((calc, 3), dtype='float64')
+        calc = len(np.where(select == True)[0])
+        loc = np.empty((calc, 3), dtype='float64')
         if self.tree == 'F':
-            loc[:, 0] = na.concatenate((self.xpos, self.xpos_pad))[select]
-            loc[:, 1] = na.concatenate((self.ypos, self.ypos_pad))[select]
-            loc[:, 2] = na.concatenate((self.zpos, self.zpos_pad))[select]
+            loc[:, 0] = np.concatenate((self.xpos, self.xpos_pad))[select]
+            loc[:, 1] = np.concatenate((self.ypos, self.ypos_pad))[select]
+            loc[:, 2] = np.concatenate((self.zpos, self.zpos_pad))[select]
             self.__max_memory()
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
         elif self.tree == 'C':
@@ -1424,15 +1424,15 @@
         # I think this will be faster than several vector operations that need
         # to pull the entire chainID array out of memory several times.
         yt_counters("max dens point")
-        max_dens_point = na.zeros((self.group_count,4),dtype='float64')
-        for i,part in enumerate(na.arange(self.size)[select]):
+        max_dens_point = np.zeros((self.group_count,4),dtype='float64')
+        for i,part in enumerate(np.arange(self.size)[select]):
             groupID = self.chainID[part]
             if part < self.real_size:
                 real_index = self.index[part]
             else:
                 real_index = self.index_pad[part - self.real_size]
             if real_index == self.densest_in_group_real_index[groupID]:
-                max_dens_point[groupID] = na.array([self.density[part], \
+                max_dens_point[groupID] = np.array([self.density[part], \
                 loc[i, 0], loc[i, 1], loc[i, 2]])
         del self.index, self.index_pad, self.densest_in_group_real_index
         # Now we broadcast this, effectively, with an allsum. Even though
@@ -1443,25 +1443,25 @@
         yt_counters("max dens point")
         # Now CoM.
         yt_counters("CoM")
-        CoM_M = na.zeros((self.group_count,3),dtype='float64')
-        Tot_M = na.zeros(self.group_count, dtype='float64')
-        #c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
+        CoM_M = np.zeros((self.group_count,3),dtype='float64')
+        Tot_M = np.zeros(self.group_count, dtype='float64')
+        #c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
         if calc:
-            c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
-            size = na.bincount(self.chainID[select]).astype('int64')
+            c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
+            size = np.bincount(self.chainID[select]).astype('int64')
         else:
             # This task has no particles in groups!
-            size = na.zeros(self.group_count, dtype='int64')
+            size = np.zeros(self.group_count, dtype='int64')
         # In case this task doesn't have all the groups, add trailing zeros.
         if size.size != self.group_count:
-            size = na.concatenate((size, na.zeros(self.group_count - size.size, dtype='int64')))
+            size = np.concatenate((size, np.zeros(self.group_count - size.size, dtype='int64')))
         if calc:
             cc = loc - c_vec
-            cc = cc - na.floor(cc)
-            ms = na.concatenate((self.mass, self.mass_pad))[select]
+            cc = cc - np.floor(cc)
+            ms = np.concatenate((self.mass, self.mass_pad))[select]
             # Most of the time, the masses will be all the same, and we can try
             # to save some effort.
-            ms_u = na.unique(ms)
+            ms_u = np.unique(ms)
             if ms_u.size == 1:
                 single = True
                 Tot_M = size.astype('float64') * ms_u
@@ -1475,13 +1475,13 @@
             sort = subchain.argsort()
             cc = cc[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                CoM_M[u] = na.sum(cc[marks[i]:marks[i+1]], axis=0)
+                CoM_M[u] = np.sum(cc[marks[i]:marks[i+1]], axis=0)
             if not single:
                 for i,groupID in enumerate(subchain):
                     Tot_M[groupID] += ms[i]
@@ -1490,31 +1490,31 @@
                 # Don't divide by zero.
                 if groupID in self.I_own:
                     CoM_M[groupID] /= Tot_M[groupID]
-                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
+                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - np.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
         self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
         CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
         self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
-        self.CoM = na.empty((self.group_count,3), dtype='float64')
+        self.CoM = np.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
         yt_counters("CoM")
         self.__max_memory()
         # Now we find the maximum radius for all groups.
         yt_counters("max radius")
-        max_radius = na.zeros(self.group_count, dtype='float64')
+        max_radius = np.zeros(self.group_count, dtype='float64')
         if calc:
             com = self.CoM[subchain]
-            rad = na.fabs(com - loc)
-            dist = (na.minimum(rad, self.period - rad)**2.).sum(axis=1)
+            rad = np.fabs(com - loc)
+            dist = (np.minimum(rad, self.period - rad)**2.).sum(axis=1)
             dist = dist[sort]
             for i, u in enumerate(uniq_subchain):
-                max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
+                max_radius[u] = np.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
         self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
-        self.max_radius = na.sqrt(self.max_radius)
+        self.max_radius = np.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")
         self.__max_memory()
@@ -1558,7 +1558,7 @@
         chain_count = self._build_chains()
         # This array tracks whether or not relationships for this particle
         # need to be examined twice, in preconnect_chains and in connect_chains
-        self.search_again = na.ones(self.size, dtype='bool')
+        self.search_again = np.ones(self.size, dtype='bool')
         if self.premerge:
             chain_count = self._preconnect_chains(chain_count)
         mylog.info('Gobally assigning chainIDs...')
@@ -1625,7 +1625,7 @@
         try:
             arr[key] = value
         except IndexError:
-            arr = na.concatenate((arr, na.ones(10000, dtype=type)*-1))
+            arr = np.concatenate((arr, np.ones(10000, dtype=type)*-1))
             arr[key] = value
         return arr
     


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math, time
 
 from yt.funcs import *
@@ -186,7 +186,7 @@
         f = open(self.halo_file,'r')
         line = f.readline()
         if line == "":
-            self.haloes = na.array([])
+            self.haloes = np.array([])
             return
         while line[0] == '#':
             line = f.readline()
@@ -198,16 +198,16 @@
                 self.haloes.append(float(line[self.mass_column]))
             line = f.readline()
         f.close()
-        self.haloes = na.array(self.haloes)
+        self.haloes = np.array(self.haloes)
 
     def bin_haloes(self):
         """
         With the list of virial masses, find the halo mass function.
         """
-        bins = na.logspace(self.log_mass_min,
+        bins = np.logspace(self.log_mass_min,
             self.log_mass_max,self.num_sigma_bins)
         avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = na.histogram(self.haloes,bins)
+        dis, bins = np.histogram(self.haloes,bins)
         # add right to left
         for i,b in enumerate(dis):
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
@@ -246,13 +246,13 @@
 
         # output arrays
         # 1) log10 of mass (Msolar, NOT Msolar/h)
-        self.Rarray = na.empty(self.num_sigma_bins,dtype='float64')
+        self.Rarray = np.empty(self.num_sigma_bins,dtype='float64')
         # 2) mass (Msolar/h)
-        self.logmassarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.logmassarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 3) spatial scale corresponding to that radius (Mpc/h)
-        self.massarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.massarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 4) sigma(M, z=0, where mass is in Msun/h)
-        self.sigmaarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')
 
         # get sigma_8 normalization
         R = 8.0;  # in units of Mpc/h (comoving)
@@ -305,9 +305,9 @@
         
         # output arrays
         # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-        self.dn_M_z = na.empty(self.num_sigma_bins, dtype='float64')
+        self.dn_M_z = np.empty(self.num_sigma_bins, dtype='float64')
         # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-        self.nofmz_cum = na.zeros(self.num_sigma_bins, dtype='float64')
+        self.nofmz_cum = np.zeros(self.num_sigma_bins, dtype='float64')
         
         for j in xrange(self.num_sigma_bins - 1):
             i = (self.num_sigma_bins - 2) - j
@@ -360,7 +360,7 @@
 
         Rcom = self.R;  # this is R in comoving Mpc/h
 
-        f = k*k*self.PofK(k)*na.power( abs(self.WofK(Rcom,k)), 2.0);
+        f = k*k*self.PofK(k)*np.power( abs(self.WofK(Rcom,k)), 2.0);
 
         return f
 
@@ -369,7 +369,7 @@
         /* returns power spectrum as a function of wavenumber k */
         """
 
-        thisPofK = na.power(k, self.primordial_index) * na.power( self.TofK(k), 2.0);
+        thisPofK = np.power(k, self.primordial_index) * np.power( self.TofK(k), 2.0);
 
         return thisPofK;
 
@@ -389,7 +389,7 @@
 
         x = R*k;
 
-        thisWofK = 3.0 * ( na.sin(x) - x*na.cos(x) ) / (x*x*x);
+        thisWofK = 3.0 * ( np.sin(x) - x*np.cos(x) ) / (x*x*x);
 
         return thisWofK;
 
@@ -660,22 +660,22 @@
         self.y_freestream = 17.2*self.f_hdm*(1+0.488*math.pow(self.f_hdm,-7.0/6.0))* \
             SQR(self.num_degen_hdm*self.qq/self.f_hdm);
         temp1 = math.pow(self.growth_k0, 1.0-self.p_cb);
-        temp2 = na.power(self.growth_k0/(1+self.y_freestream),0.7);
-        self.growth_cb = na.power(1.0+temp2, self.p_cb/0.7)*temp1;
-        self.growth_cbnu = na.power(na.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
+        temp2 = np.power(self.growth_k0/(1+self.y_freestream),0.7);
+        self.growth_cb = np.power(1.0+temp2, self.p_cb/0.7)*temp1;
+        self.growth_cbnu = np.power(np.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
     
         # Compute the master function
         self.gamma_eff = self.omhh*(self.alpha_gamma+(1-self.alpha_gamma)/ \
             (1+SQR(SQR(kk*self.sound_horizon_fit*0.43))));
         self.qq_eff = self.qq*self.omhh/self.gamma_eff;
     
-        tf_sup_L = na.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
-        tf_sup_C = 14.4+325/(1+60.5*na.power(self.qq_eff,1.11));
+        tf_sup_L = np.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
+        tf_sup_C = 14.4+325/(1+60.5*np.power(self.qq_eff,1.11));
         self.tf_sup = tf_sup_L/(tf_sup_L+tf_sup_C*SQR(self.qq_eff));
     
         self.qq_nu = 3.92*self.qq*math.sqrt(self.num_degen_hdm/self.f_hdm);
         self.max_fs_correction = 1+1.2*math.pow(self.f_hdm,0.64)*math.pow(self.num_degen_hdm,0.3+0.6*self.f_hdm)/ \
-            (na.power(self.qq_nu,-1.6)+na.power(self.qq_nu,0.8));
+            (np.power(self.qq_nu,-1.6)+np.power(self.qq_nu,0.8));
         self.tf_master = self.tf_sup*self.max_fs_correction;
     
         # Now compute the CDM+HDM+baryon transfer functions
@@ -707,21 +707,21 @@
     changes by less than *error*. Hopefully someday we can do something
     better than this!
     """
-    xvals = na.logspace(0,na.log10(initial_guess), initial_guess+1)-.9
+    xvals = np.logspace(0,np.log10(initial_guess), initial_guess+1)-.9
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
-    # Trapezoid rule, but with different dxes between values, so na.trapz
+    # Trapezoid rule, but with different dxes between values, so np.trapz
     # will not work.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area0 = na.sum(areas)
+    area0 = np.sum(areas)
     # Next guess.
     next_guess = 10 * initial_guess
-    xvals = na.logspace(0,na.log10(next_guess), 2*initial_guess**2+1)-.99
+    xvals = np.logspace(0,np.log10(next_guess), 2*initial_guess**2+1)-.99
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
     # Trapezoid rule.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area1 = na.sum(areas)
+    area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
     area_final = area1
@@ -729,12 +729,12 @@
     one_pow = 3
     while diff > error:
         next_guess *= 10
-        xvals = na.logspace(0,na.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
+        xvals = np.logspace(0,np.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
         yvals = fcn(xvals)
         xdiffs = xvals[1:] - xvals[:-1]
         # Trapezoid rule.
         areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-        area_next = na.sum(areas)
+        area_next = np.sum(areas)
         diff = area_next - area_last
         area_last = area_next
         one_pow+=1


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -41,7 +41,7 @@
 # 8. Parentage is described by a fraction of particles that pass from one to
 #    the other; we have both descendent fractions and ancestory fractions. 
 
-import numpy as na
+import numpy as np
 import h5py
 import time
 import pdb
@@ -119,7 +119,7 @@
             x,y,z = [float(f) for f in line.split(None, 3)[:-1]]
             hp.append([x,y,z])
         if hp != []:
-            self.halo_positions = na.array(hp)
+            self.halo_positions = np.array(hp)
             self.halo_kdtree = KDTree(self.halo_positions)
         else:
             self.halo_positions = None
@@ -158,7 +158,7 @@
 class HaloParticleList(object):
     def __init__(self, halo_id, position, particle_ids):
         self.halo_id = halo_id
-        self.position = na.array(position)
+        self.position = np.array(position)
         self.particle_ids = particle_ids
         self.number_of_particles = particle_ids.size
 
@@ -168,7 +168,7 @@
     def find_relative_parentage(self, child):
         # Return two values: percent this halo gave to the other, and percent
         # of the other that comes from this halo
-        overlap = na.intersect1d(self.particle_ids, child.particle_ids).size
+        overlap = np.intersect1d(self.particle_ids, child.particle_ids).size
         of_child_from_me = float(overlap)/child.particle_ids.size
         of_mine_from_me = float(overlap)/self.particle_ids.size
         return of_child_from_me, of_mine_from_me


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os, glob, time, gc, md5, sys
 import h5py
 import types
@@ -174,7 +174,7 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
-        self.with_halos = na.ones(len(restart_files), dtype='bool')
+        self.with_halos = np.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
         self.halo_finder_function = halo_finder_function # which halo finder to use
         self.halo_finder_threshold = halo_finder_threshold # overdensity threshold
@@ -350,11 +350,11 @@
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
             # Turn it into fortran.
-            child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
+            child_points = np.array(child_points)
+            fKD.pos = np.asfortranarray(child_points.T)
+            fKD.qv = np.empty(3, dtype='float64')
+            fKD.dist = np.empty(NumNeighbors, dtype='float64')
+            fKD.tags = np.empty(NumNeighbors, dtype='int64')
             fKD.nn = NumNeighbors
             fKD.sort = True
             fKD.rearrange = True
@@ -373,7 +373,7 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                fKD.qv = np.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
                 find_nn_nearest_neighbors()
@@ -400,7 +400,7 @@
         # The +1 is an extra element in the array that collects garbage
         # values. This is allowing us to eliminate a try/except later.
         # This extra array element will be cut off eventually.
-        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+        self.child_mass_arr = np.zeros(len(candidates)*NumNeighbors + 1,
             dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
@@ -450,9 +450,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = np.array([], dtype='int64')
+            parent_masses = np.array([], dtype='float64')
+            parent_halos = np.array([], dtype='int32')
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,10 +460,10 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs = np.concatenate((parent_IDs, thisIDs))
+                        parent_masses = np.concatenate((parent_masses, thisMasses))
+                        parent_halos = np.concatenate((parent_halos, 
+                            np.ones(thisIDs.size, dtype='int32') * gID))
                         del thisIDs, thisMasses
                     h5fp.close()
             
@@ -477,14 +477,14 @@
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
-        parent_send = na.ones(parent_IDs.size, dtype='bool')
+        parent_send = np.ones(parent_IDs.size, dtype='bool')
         
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
+        child_IDs = np.array([], dtype='int64')
+        child_masses = np.array([], dtype='float64')
+        child_halos = np.array([], dtype='int32')
         for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
@@ -492,10 +492,10 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs = np.concatenate((child_IDs, thisIDs))
+                    child_masses = np.concatenate((child_masses, thisMasses))
+                    child_halos = np.concatenate((child_halos, 
+                        np.ones(thisIDs.size, dtype='int32') * gID))
                     del thisIDs, thisMasses
                 h5fp.close()
         
@@ -504,7 +504,7 @@
         child_IDs = child_IDs[sort]
         child_masses = child_masses[sort]
         child_halos = child_halos[sort]
-        child_send = na.ones(child_IDs.size, dtype='bool')
+        child_send = np.ones(child_IDs.size, dtype='bool')
         del sort
         
         # Match particles in halos.
@@ -618,8 +618,8 @@
     def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
             parent_masses, parent_send = None, child_send = None):
         # Pick out IDs that are in both arrays.
-        parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
-        child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+        parent_in_child = np.in1d(parent_IDs, child_IDs, assume_unique = True)
+        child_in_parent = np.in1d(child_IDs, parent_IDs, assume_unique = True)
         # Pare down the arrays to just matched particle IDs.
         parent_halos_cut = parent_halos[parent_in_child]
         child_halos_cut = child_halos[child_in_parent]


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -24,7 +24,7 @@
 """
 
 from copy import deepcopy
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -105,11 +105,11 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = na.log10(temp_profile[field])
+            temp_profile[field] = np.log10(temp_profile[field])
 
     virial = dict((field, 0.0) for field in fields)
 
-    if (not (na.array(overDensity) >= virial_overdensity).any()) and \
+    if (not (np.array(overDensity) >= virial_overdensity).any()) and \
             must_be_virialized:
         mylog.debug("This halo is not virialized!")
         return [False, {}]
@@ -123,7 +123,7 @@
     elif (overDensity[-1] >= virial_overdensity):
         index = -2
     else:
-        for q in (na.arange(len(overDensity),0,-1)-1):
+        for q in (np.arange(len(overDensity),0,-1)-1):
             if (overDensity[q] < virial_overdensity) and (overDensity[q-1] >= virial_overdensity):
                 index = q - 1
                 break
@@ -144,7 +144,7 @@
 
     if use_log:
         for field in virial.keys():
-            virial[field] = na.power(10, virial[field])
+            virial[field] = np.power(10, virial[field])
 
     for vfilter in virial_filters:
         if eval("%s %s %s" % (virial[vfilter[0]],vfilter[1],vfilter[2])):


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os
 import h5py
 import types
@@ -684,7 +684,7 @@
                 max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
                                                                                  lazy_reader=True)
                 max_grid = self.pf.h.grids[mg]
-                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                max_cell = np.unravel_index(maxi, max_grid.ActiveDimensions)
                 sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
                                                              max_grid['y-velocity'][max_cell],
                                                              max_grid['z-velocity'][max_cell]])
@@ -845,7 +845,7 @@
                               (self.projection_output_dir, halo['id'],
                                dataset_name, axis_labels[w])
                             if (frb[hp['field']] != 0).any():
-                                write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                                write_image(np.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
                             else:
                                 mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
                                             (hp['field'], halo['id']))
@@ -1076,7 +1076,7 @@
                     profile[field].append(float(onLine[q]))
 
         for field in fields:
-            profile[field] = na.array(profile[field])
+            profile[field] = np.array(profile[field])
 
         profile_obj._data = profile
 
@@ -1171,7 +1171,7 @@
         for halo in self.filtered_halos:
             for halo_field in halo_fields:
                 if isinstance(halo[halo_field], types.ListType):
-                    field_data = na.array(halo[halo_field])
+                    field_data = np.array(halo[halo_field])
                     field_data.tofile(out_file, sep="\t", format=format)
                 else:
                     if halo_field == 'id':
@@ -1179,7 +1179,7 @@
                     else:
                         out_file.write("%s" % halo[halo_field])
                 out_file.write("\t")
-            field_data = na.array([halo[field] for field in fields])
+            field_data = np.array([halo[field] for field in fields])
             field_data.tofile(out_file, sep="\t", format=format)
             out_file.write("\n")
         out_file.close()
@@ -1207,7 +1207,7 @@
             value_list = []
             for halo in self.filtered_halos:
                 value_list.append(halo[halo_field])
-            value_list = na.array(value_list)
+            value_list = np.array(value_list)
             out_file.create_dataset(halo_field, data=value_list)
         out_file.close()
 
@@ -1215,7 +1215,7 @@
         fid = open(filename, "w")
         fields = [field for field in sorted(profile.keys()) if field != "UsedBins"]
         fid.write("\t".join(["#"] + fields + ["\n"]))
-        field_data = na.array([profile[field] for field in fields])
+        field_data = np.array([profile[field] for field in fields])
         for line in range(field_data.shape[1]):
             field_data[:, line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -1300,17 +1300,17 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px,
+        plot.field_data['px'] = np.concatenate([plot['px'], add_x_px, add_y_px,
                                                 add2_x_px, add2_y_px])
-        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py,
+        plot.field_data['py'] = np.concatenate([plot['py'], add_x_py, add_y_py,
                                                 add2_x_py, add2_y_py])
-        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
+        plot.field_data['pdx'] = np.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
                                                  add2_x_pdx, add2_y_pdx])
-        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
+        plot.field_data['pdy'] = np.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
                                                  add2_x_pdy, add2_y_pdy])
-        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field,
+        plot.field_data[field] = np.concatenate([plot[field], add_x_field, add_y_field,
                                                  add2_x_field, add2_y_field])
-        plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
+        plot.field_data['weight_field'] = np.concatenate([plot['weight_field'],
                                                           add_x_weight_field, add_y_weight_field,
                                                           add2_x_weight_field, add2_y_weight_field])
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
@@ -24,7 +24,7 @@
 """
 
 import h5py, os.path
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.data_containers import YTFieldData
@@ -57,7 +57,7 @@
         self.Level = level
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
-        self.start_index = na.min([grid.get_global_startindex() for grid in
+        self.start_index = np.min([grid.get_global_startindex() for grid in
                              base_pf.h.select_grids(level)], axis=0).astype('int64')
         self.dds = base_pf.h.select_grids(level)[0].dds.copy()
         dims = (self.RightEdge-self.LeftEdge)/self.dds
@@ -106,11 +106,11 @@
         self.pf = pf
         self.always_copy = always_copy
         self.min_level = min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                              pf.h.select_grids(min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.h.select_grids(min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                                    pf.h.select_grids(min_level)], axis=0).astype('float64')
         if offset is None: offset = (max_right + min_left)/2.0
         self.left_edge_offset = offset
@@ -151,7 +151,7 @@
         # Grid objects on this level...
         if grids is None: grids = self.pf.h.select_grids(level+self.min_level)
         level_node.attrs['delta'] = grids[0].dds*self.mult_factor
-        level_node.attrs['relativeRefinementFactor'] = na.array([2]*3, dtype='int32')
+        level_node.attrs['relativeRefinementFactor'] = np.array([2]*3, dtype='int32')
         level_node.attrs['numGrids'] = len(grids)
         for i,g in enumerate(grids):
             self.export_grid(afile, level_node, g, i, field)
@@ -169,8 +169,8 @@
         int_origin, lint, origin, dds = self._convert_grid(grid)
         grid_node.attrs['integerOrigin'] = int_origin
         grid_node.attrs['origin'] = origin
-        grid_node.attrs['ghostzoneFlags'] = na.zeros(6, dtype='int32')
-        grid_node.attrs['numGhostzones'] = na.zeros(3, dtype='int32')
+        grid_node.attrs['ghostzoneFlags'] = np.zeros(6, dtype='int32')
+        grid_node.attrs['numGhostzones'] = np.zeros(3, dtype='int32')
         grid_node.attrs['dims'] = grid.ActiveDimensions[::-1].astype('int32')
         if not self.always_copy and self.pf.h.data_style == 6 \
            and field in self.pf.h.field_list:
@@ -203,11 +203,11 @@
         # First we set up our translation between original and extracted
         self.data_style = data_style
         self.min_level = pf.min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
         level_dx = pf.base_pf.h.select_grids(pf.min_level)[0].dds[0]
         dims = ((max_right-min_left)/level_dx)
@@ -247,12 +247,12 @@
         # Here we need to set up the grid info, which for the Enzo hierarchy
         # is done like:
         # self.grid_dimensions.flat[:] = ei
-        # self.grid_dimensions -= na.array(si, self.float_type)
+        # self.grid_dimensions -= np.array(si, self.float_type)
         # self.grid_dimensions += 1
         # self.grid_left_edge.flat[:] = LE
         # self.grid_right_edge.flat[:] = RE
         # self.grid_particle_count.flat[:] = np
-        # self.grids = na.array(self.grids, dtype='object')
+        # self.grids = np.array(self.grids, dtype='object')
         #
         # For now, we make the presupposition that all of our grids are
         # strictly nested and we are not doing any cuts.  However, we do
@@ -285,7 +285,7 @@
 
         self.grid_left_edge = self._convert_coords(self.grid_left_edge)
         self.grid_right_edge = self._convert_coords(self.grid_right_edge)
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
 
     def _fill_grid_arrays(self, grid, i):
         # This just fills in the grid arrays for a single grid --


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -22,7 +22,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/level_sets/clump_tools.py
--- a/yt/analysis_modules/level_sets/clump_tools.py
+++ b/yt/analysis_modules/level_sets/clump_tools.py
@@ -23,8 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
-nar = na.array
+import numpy as np
+nar = np.array
 
 counter = 0
 def recursive_all_clumps(clump,list,level,parentnumber):
@@ -89,7 +89,7 @@
     yt.visualization.plot_modification.ClumpContourCallback"""
     minDensity = [c['Density'].min() for c in clump_list]
     
-    args = na.argsort(minDensity)
+    args = np.argsort(minDensity)
     list = nar(clump_list)[args]
     reverse = range(list.size-1,-1,-1)
     return list[reverse]


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -24,7 +24,7 @@
 """
 
 from itertools import chain
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.data_point_utilities as data_point_utilities
@@ -63,12 +63,12 @@
     tr = []
     for k in joins.keys():
         v = joins.pop(k)
-        tr.append((k, na.array(list(v), dtype="int64")))
+        tr.append((k, np.array(list(v), dtype="int64")))
     return tr
 
 def identify_contours(data_source, field, min_val, max_val,
                           cached_fields=None):
-    cur_max_id = na.sum([g.ActiveDimensions.prod() for g in data_source._grids])
+    cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
     pbar = get_pbar("First pass", len(data_source._grids))
     grids = sorted(data_source._grids, key=lambda g: -g.Level)
     total_contours = 0
@@ -76,27 +76,27 @@
     for gi,grid in enumerate(grids):
         pbar.update(gi+1)
         cm = data_source._get_cut_mask(grid)
-        if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='bool')
+        if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
         old_field_parameters = grid.field_parameters
         grid.field_parameters = data_source.field_parameters
-        local_ind = na.where( (grid[field] > min_val)
+        local_ind = np.where( (grid[field] > min_val)
                             & (grid[field] < max_val) & cm )
         grid.field_parameters = old_field_parameters
         if local_ind[0].size == 0: continue
-        kk = na.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
-        grid["tempContours"] = na.ones(grid.ActiveDimensions, dtype='int64') * -1
+        kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
+        grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
         grid["tempContours"][local_ind] = kk[:]
         cur_max_id -= local_ind[0].size
-        xi_u,yi_u,zi_u = na.where(grid["tempContours"] > -1)
-        cor_order = na.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
+        xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
+        cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
         fd_orig = grid["tempContours"].copy()
         xi = xi_u[cor_order]
         yi = yi_u[cor_order]
         zi = zi_u[cor_order]
         while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
             pass
-        total_contours += na.unique(grid["tempContours"][grid["tempContours"] > -1]).size
-        new_contours = na.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
+        total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
+        new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
         tree += zip(new_contours, new_contours)
     tree = set(tree)
     pbar.finish()
@@ -110,10 +110,10 @@
         boundary_tree = amr_utils.construct_boundary_relationships(fd)
         tree.update(((a, b) for a, b in boundary_tree))
     pbar.finish()
-    sort_new = na.array(list(tree), dtype='int64')
+    sort_new = np.array(list(tree), dtype='int64')
     mylog.info("Coalescing %s joins", sort_new.shape[0])
     joins = coalesce_join_tree(sort_new)
-    #joins = [(i, na.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
+    #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
     pbar = get_pbar("Joining ", len(joins))
     # This process could and should be done faster
     print "Joining..."
@@ -136,9 +136,9 @@
     data_source.get_data("tempContours", in_grids=True)
     contour_ind = {}
     i = 0
-    for contour_id in na.unique(data_source["tempContours"]):
+    for contour_id in np.unique(data_source["tempContours"]):
         if contour_id == -1: continue
-        contour_ind[i] = na.where(data_source["tempContours"] == contour_id)
+        contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
         mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
         i += 1
     mylog.info("Identified %s contours between %0.5e and %0.5e",


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/radial_column_density/radial_column_density.py
--- a/yt/analysis_modules/radial_column_density/radial_column_density.py
+++ b/yt/analysis_modules/radial_column_density/radial_column_density.py
@@ -105,14 +105,14 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.center = na.asarray(center)
+        self.center = np.asarray(center)
         self.max_radius = max_radius
         self.steps = steps
         self.base = base
         self.Nside = Nside
         self.ang_divs = ang_divs
-        self.real_ang_divs = int(na.abs(ang_divs))
-        self.phi, self.theta = na.mgrid[0.0:2*na.pi:ang_divs, 0:na.pi:ang_divs]
+        self.real_ang_divs = int(np.abs(ang_divs))
+        self.phi, self.theta = np.mgrid[0.0:2*np.pi:ang_divs, 0:np.pi:ang_divs]
         self.phi1d = self.phi[:,0]
         self.theta1d = self.theta[0,:]
         self.dphi = self.phi1d[1] - self.phi1d[0]
@@ -135,20 +135,20 @@
         # but this will work for now.
         right = self.pf.domain_right_edge - self.center
         left = self.center - self.pf.domain_left_edge
-        min_r = na.min(right)
-        min_l = na.min(left)
-        self.max_radius = na.min([self.max_radius, min_r, min_l])
+        min_r = np.min(right)
+        min_l = np.min(left)
+        self.max_radius = np.min([self.max_radius, min_r, min_l])
     
     def _make_bins(self):
         # We'll make the bins start from the smallest cell size to the
         # specified radius. Column density inside the same cell as our 
         # center is kind of ill-defined, anyway.
         if self.base == 'lin':
-            self.bins = na.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
+            self.bins = np.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
                 self.steps)
         elif self.base == 'log':
-            self.bins = na.logspace(na.log10(self.pf.h.get_smallest_dx()),
-                na.log10(self.max_radius), self.steps)
+            self.bins = np.logspace(np.log10(self.pf.h.get_smallest_dx()),
+                np.log10(self.max_radius), self.steps)
     
     def _build_surfaces(self, field):
         # This will be index by bin index.
@@ -172,17 +172,17 @@
             Values of zero are found outside the maximum radius and
             in the cell of the user-specified center point.
             This setting is useful if the field is going to be logged
-            (e.g. na.log10) where zeros are inconvenient.
+            (e.g. np.log10) where zeros are inconvenient.
             Default = None
         """
         x = data['x']
         sh = x.shape
-        ad = na.prod(sh)
+        ad = np.prod(sh)
         if type(data) == type(FieldDetector()):
-            return na.ones(sh)
+            return np.ones(sh)
         y = data['y']
         z = data['z']
-        pos = na.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
+        pos = np.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
         del x, y, z
         vals = self._interpolate_value(pos)
         del pos
@@ -199,25 +199,25 @@
         # according to the points angle.
         # 1. Find the angle from the center point to the position.
         vec = pos - self.center
-        phi = na.arctan2(vec[:, 1], vec[:, 0])
+        phi = np.arctan2(vec[:, 1], vec[:, 0])
         # Convert the convention from [-pi, pi) to [0, 2pi).
         sel = (phi < 0)
-        phi[sel] += 2 * na.pi
+        phi[sel] += 2 * np.pi
         # Find the radius.
-        r = na.sqrt(na.sum(vec * vec, axis = 1))
+        r = np.sqrt(np.sum(vec * vec, axis = 1))
         # Keep track of the points outside of self.max_radius, which we'll
         # handle separately before we return.
         outside = (r > self.max_radius)
-        theta = na.arccos(vec[:, 2] / r)
+        theta = np.arccos(vec[:, 2] / r)
         # 2. Find the bin for this position.
-        digi = na.digitize(r, self.bins)
+        digi = np.digitize(r, self.bins)
         # Find the values on the inner and outer surfaces.
-        in_val = na.zeros_like(r)
-        out_val = na.zeros_like(r)
+        in_val = np.zeros_like(r)
+        out_val = np.zeros_like(r)
         # These two will be used for interpolation.
-        in_r = na.zeros_like(r)
-        out_r = na.zeros_like(r)
-        for bin in na.unique(digi):
+        in_r = np.zeros_like(r)
+        out_r = np.zeros_like(r)
+        for bin in np.unique(digi):
             sel = (digi == bin)
             # Special case if we're outside the largest sphere.
             if bin == len(self.bins):
@@ -229,7 +229,7 @@
                 continue
             # Special case if we're inside the smallest sphere.
             elif bin == 0:
-                in_val[sel] = na.zeros_like(phi[sel])
+                in_val[sel] = np.zeros_like(phi[sel])
                 in_r[sel] = 0.
                 out_val[sel] = self._interpolate_surface_value(1,
                     phi[sel], theta[sel])
@@ -244,11 +244,11 @@
                     phi[sel], theta[sel])
                 out_r[sel] = self.bins[bin]
         # Interpolate using a linear fit in column density / r space.
-        val = na.empty_like(r)
+        val = np.empty_like(r)
         # Special case for inside smallest sphere.
         sel = (digi == 0)
         val[sel] = (1. - (out_r[sel] - r[sel]) / out_r[sel]) * out_val[sel]
-        na.invert(sel, sel) # In-place operation!
+        np.invert(sel, sel) # In-place operation!
         val[sel] = (out_val[sel] - in_val[sel]) / (out_r[sel] - in_r[sel]) * \
             (r[sel] - in_r[sel]) + in_val[sel]
         # Fix the things to zero that should be zero.
@@ -259,8 +259,8 @@
         # Given a surface bin and an angle, interpolate the value on
         # that surface to the angle.
         # 1. Find the four values closest to the angle.
-        phi_bin = na.digitize(phi, self.phi1d)
-        theta_bin = na.digitize(theta, self.theta1d)
+        phi_bin = np.digitize(phi, self.phi1d)
+        theta_bin = np.digitize(theta, self.theta1d)
         val00 = self.surfaces[bin][phi_bin - 1, theta_bin - 1]
         val01 = self.surfaces[bin][phi_bin - 1, theta_bin]
         val10 = self.surfaces[bin][phi_bin, theta_bin - 1]


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -47,18 +47,18 @@
 
         self.bounds = bounds
         self.ev_bounds = ev_bounds
-        self.ev_vals = na.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
+        self.ev_vals = np.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
         
     def _get_interpolator(self, ev_min, ev_max):
         """
         Integrates from ev_min to ev_max and returns an interpolator.
         """
-        e_is, e_ie = na.digitize([ev_min, ev_max], self.ev_vals)
-        bin_table = na.trapz(self.table[...,e_is-1:e_ie],
+        e_is, e_ie = np.digitize([ev_min, ev_max], self.ev_vals)
+        bin_table = np.trapz(self.table[...,e_is-1:e_ie],
                              2.41799e17*
             (self.ev_vals[e_is:e_ie+1]-self.ev_vals[e_is-1:e_is]),
                              axis=-1)
-        bin_table = na.log10(bin_table.clip(1e-80,bin_table.max()))
+        bin_table = np.log10(bin_table.clip(1e-80,bin_table.max()))
         return BilinearFieldInterpolator(
             bin_table, self.bounds, self.field_names[:],
             truncate=True)
@@ -73,8 +73,8 @@
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : na.log10(data["NumberDensity"]),
-                  'Temperature'   : na.log10(data["Temperature"])}
+            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+                  'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
@@ -91,8 +91,8 @@
     e_n_bins, e_min, e_max = e_spec
     T_n_bins, T_min, T_max = T_spec
     # The second one is the fast-varying one
-    rho_is, e_is = na.mgrid[0:rho_n_bins,0:e_n_bins]
-    table = na.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
+    rho_is, e_is = np.mgrid[0:rho_n_bins,0:e_n_bins]
+    table = np.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
     mylog.info("Parsing Cloudy files")
     for i,ri,ei in zip(range(rho_n_bins*e_n_bins), rho_is.ravel(), e_is.ravel()):
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 import math, itertools
 
@@ -66,8 +66,8 @@
         """
         self._pf = pf
         self._data_source = data_source
-        self.star_mass = na.array(star_mass)
-        self.star_creation_time = na.array(star_creation_time)
+        self.star_mass = np.array(star_mass)
+        self.star_creation_time = np.array(star_creation_time)
         self.volume = volume
         self.bin_count = bins
         # Check to make sure we have the right set of informations.
@@ -114,13 +114,13 @@
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = na.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
-        inds = na.digitize(ct_stars, self.time_bins) - 1
+        inds = np.digitize(ct_stars, self.time_bins) - 1
         # Sum up the stars created in each time bin.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        for index in na.unique(inds):
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        for index in np.unique(inds):
             self.mass_bins[index] += sum(mass_stars[inds == index])
         # Calculate the cumulative mass sum over time by forward adding.
         self.cum_mass_bins = self.mass_bins.copy()
@@ -162,13 +162,13 @@
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])
             self.Msol_cumulative.append(self.cum_mass_bins[i])
-        self.time = na.array(self.time)
-        self.lookback_time = na.array(self.lookback_time)
-        self.redshift = na.array(self.redshift)
-        self.Msol_yr = na.array(self.Msol_yr)
-        self.Msol_yr_vol = na.array(self.Msol_yr_vol)
-        self.Msol = na.array(self.Msol)
-        self.Msol_cumulative = na.array(self.Msol_cumulative)
+        self.time = np.array(self.time)
+        self.lookback_time = np.array(self.lookback_time)
+        self.redshift = np.array(self.redshift)
+        self.Msol_yr = np.array(self.Msol_yr)
+        self.Msol_yr_vol = np.array(self.Msol_yr_vol)
+        self.Msol = np.array(self.Msol)
+        self.Msol_cumulative = np.array(self.Msol_cumulative)
     
     def write_out(self, name="StarFormationRate.out"):
         r"""Write out the star analysis to a text file *name*. The columns are in
@@ -234,10 +234,10 @@
 METAL3 = 0.2828
 METAL4 = 0.6325
 METAL5 = 1.5811
-METALS = na.array([METAL1, METAL2, METAL3, METAL4, METAL5])
+METALS = np.array([METAL1, METAL2, METAL3, METAL4, METAL5])
 
 # Translate METALS array digitize to the table dicts
-MtoD = na.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
+MtoD = np.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
 
 """
 This spectrum code is based on code from Ken Nagamine, converted from C to Python.
@@ -340,7 +340,7 @@
         >>> spec.calculate_spectrum(data_source=sp, min_age = 1.e6)
         """
         # Initialize values
-        self.final_spec = na.zeros(self.wavelength.size, dtype='float64')
+        self.final_spec = np.zeros(self.wavelength.size, dtype='float64')
         self._data_source = data_source
         if iterable(star_mass):
             self.star_mass = star_mass
@@ -372,7 +372,7 @@
                 """)
                 return None
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             if star_metallicity_fraction is not None:
                 self.star_metal = star_metallicity_fraction
@@ -382,7 +382,7 @@
             self.star_creation_time = ct[ct > 0]
             self.star_mass = self._data_source["ParticleMassMsun"][ct > 0]
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             else:
                 self.star_metal = self._data_source["metallicity_fraction"][ct > 0]
@@ -390,7 +390,7 @@
         self.star_metal /= Zsun
         # Age of star in years.
         dt = (self.time_now - self.star_creation_time * self._pf['Time']) / YEAR
-        dt = na.maximum(dt, 0.0)
+        dt = np.maximum(dt, 0.0)
         # Remove young stars
         sub = dt >= self.min_age
         if len(sub) == 0: return
@@ -398,18 +398,18 @@
         dt = dt[sub]
         self.star_creation_time = self.star_creation_time[sub]
         # Figure out which METALS bin the star goes into.
-        Mindex = na.digitize(self.star_metal, METALS)
+        Mindex = np.digitize(self.star_metal, METALS)
         # Replace the indices with strings.
         Mname = MtoD[Mindex]
         # Figure out which age bin this star goes into.
-        Aindex = na.digitize(dt, self.age)
+        Aindex = np.digitize(dt, self.age)
         # Ratios used for the interpolation.
         ratio1 = (dt - self.age[Aindex-1]) / (self.age[Aindex] - self.age[Aindex-1])
         ratio2 = (self.age[Aindex] - dt) / (self.age[Aindex] - self.age[Aindex-1])
         # Sort the stars by metallicity and then by age, which should reduce
         # memory access time by a little bit in the loop.
-        indexes = na.arange(self.star_metal.size)
-        sort = na.asarray([indexes[i] for i in na.lexsort([indexes, Aindex, Mname])])
+        indexes = np.arange(self.star_metal.size)
+        sort = np.asarray([indexes[i] for i in np.lexsort([indexes, Aindex, Mname])])
         Mname = Mname[sort]
         Aindex = Aindex[sort]
         ratio1 = ratio1[sort]
@@ -426,15 +426,15 @@
             # Get the one just before the one above.
             flux_1 = self.flux[star[0]][star[1]-1,:]
             # interpolate in log(flux), linear in time.
-            int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
+            int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux)
             # Add this flux to the total, weighted by mass.
-            self.final_spec += na.power(10., int_flux) * star[4]
+            self.final_spec += np.power(10., int_flux) * star[4]
             pbar.update(i)
         pbar.finish()    
         
         # Normalize.
-        self.total_mass = na.sum(self.star_mass)
-        self.avg_mass = na.mean(self.star_mass)
+        self.total_mass = np.sum(self.star_mass)
+        self.avg_mass = np.mean(self.star_mass)
         tot_metal = sum(self.star_metal * self.star_mass)
         self.avg_metal = math.log10(tot_metal / self.total_mass / Zsun)
 
@@ -455,25 +455,25 @@
 #             # From the flux array for this metal, and our selection, build
 #             # a new flux array just for the ages of these stars, in the 
 #             # same order as the selection of stars.
-#             this_flux = na.matrix(self.flux[metal_name][A])
+#             this_flux = np.matrix(self.flux[metal_name][A])
 #             # Make one for the last time step for each star in the same fashion
 #             # as above.
-#             this_flux_1 = na.matrix(self.flux[metal_name][A-1])
+#             this_flux_1 = np.matrix(self.flux[metal_name][A-1])
 #             # This is kind of messy, but we're going to multiply this_fluxes
 #             # by the appropriate ratios and add it together to do the 
 #             # interpolation in log(flux) and linear in time.
 #             print r1.size
-#             r1 = na.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
-#             r2 = na.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
+#             r1 = np.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
+#             r2 = np.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
 #             print this_flux_1.shape, r1.shape
-#             int_flux = na.multiply(na.log10(this_flux_1),r1) \
-#                 + na.multiply(na.log10(this_flux),r2)
+#             int_flux = np.multiply(np.log10(this_flux_1),r1) \
+#                 + np.multiply(np.log10(this_flux),r2)
 #             # Weight the fluxes by mass.
-#             sm = na.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
-#             int_flux = na.multiply(na.power(10., int_flux), sm)
+#             sm = np.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
+#             int_flux = np.multiply(np.power(10., int_flux), sm)
 #             # Sum along the columns, converting back to an array, adding
 #             # to the full spectrum.
-#             self.final_spec += na.array(int_flux.sum(axis=0))[0,:]
+#             self.final_spec += np.array(int_flux.sum(axis=0))[0,:]
 
     
     def write_out(self, name="sum_flux.out"):
@@ -518,8 +518,8 @@
         >>> spec.write_out_SED(name = "SED.out", flux_norm = 6000.)
         """
         # find the f_nu closest to flux_norm
-        fn_wavelength = na.argmin(abs(self.wavelength - flux_norm))
-        f_nu = self.final_spec * na.power(self.wavelength, 2.) / LIGHT
+        fn_wavelength = np.argmin(abs(self.wavelength - flux_norm))
+        f_nu = self.final_spec * np.power(self.wavelength, 2.) / LIGHT
         # Normalize f_nu
         self.f_nu = f_nu / f_nu[fn_wavelength]
         # Write out.


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -32,7 +32,7 @@
     pass
 
 import time
-import numpy as na
+import numpy as np
 import numpy.linalg as linalg
 import collections
 
@@ -78,14 +78,14 @@
 
     """
 
-    fc = na.array(fc)
-    fwidth = na.array(fwidth)
+    fc = np.array(fc)
+    fwidth = np.array(fwidth)
     
     #we must round the dle,dre to the nearest root grid cells
     ile,ire,super_level,ncells_wide= \
             round_ncells_wide(pf.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide)
 
-    assert na.all((ile-ire)==(ile-ire)[0])
+    assert np.all((ile-ire)==(ile-ire)[0])
     mylog.info("rounding specified region:")
     mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth)))
     mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
@@ -153,7 +153,7 @@
         print "[%03i %03i %03i] "%tuple(dre),
         print " with %i halos"%num_halos
         dle,dre = domain
-        dle, dre = na.array(dle),na.array(dre)
+        dle, dre = np.array(dle),np.array(dre)
         fn = fni 
         fn += "%03i_%03i_%03i-"%tuple(dle)
         fn += "%03i_%03i_%03i"%tuple(dre)
@@ -178,7 +178,7 @@
     dn = pf.domain_dimensions
     for halo in halo_list:
         fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir
-        dle,dre = na.floor(fle*dn), na.ceil(fre*dn)
+        dle,dre = np.floor(fle*dn), np.ceil(fre*dn)
         dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int'))
         if (dle,dre) in domains.keys():
             domains[(dle,dre)] += halo,
@@ -211,7 +211,7 @@
     del field_data
 
     #first we cast every cell as an oct
-    #ngrids = na.max([g.id for g in pf._grids])
+    #ngrids = np.max([g.id for g in pf._grids])
     grids = {}
     levels_all = {} 
     levels_finest = {}
@@ -220,13 +220,13 @@
         levels_all[l]=0
     pbar = get_pbar("Initializing octs ",len(pf.h.grids))
     for gi,g in enumerate(pf.h.grids):
-        ff = na.array([g[f] for f in fields])
+        ff = np.array([g[f] for f in fields])
         og = amr_utils.OctreeGrid(
                 g.child_index_mask.astype('int32'),
                 ff.astype("float64"),
                 g.LeftEdge.astype("float64"),
                 g.ActiveDimensions.astype("int32"),
-                na.ones(1,dtype="float64")*g.dds[0],
+                np.ones(1,dtype="float64")*g.dds[0],
                 g.Level,
                 g.id)
         grids[g.id] = og
@@ -246,11 +246,11 @@
     #oct_list =  amr_utils.OctreeGridList(grids)
     
     #initialize arrays to be passed to the recursion algo
-    o_length = na.sum(levels_all.values())
-    r_length = na.sum(levels_all.values())
-    output   = na.zeros((o_length,len(fields)), dtype='float64')
-    refined  = na.zeros(r_length, dtype='int32')
-    levels   = na.zeros(r_length, dtype='int32')
+    o_length = np.sum(levels_all.values())
+    r_length = np.sum(levels_all.values())
+    output   = np.zeros((o_length,len(fields)), dtype='float64')
+    refined  = np.zeros(r_length, dtype='int32')
+    levels   = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -332,7 +332,7 @@
         #calculate the floating point LE of the children
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
-        subgrid_ile = na.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
+        subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
         for i, (vertex,hilbert_child) in enumerate(hilbert):
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
@@ -340,7 +340,7 @@
                 subgrid = grid #we don't actually descend if we're a superlevel
                 child_ile = cell_index + vertex*2**(-level)
             else:
-                child_ile = subgrid_ile+na.array(vertex)
+                child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
                     subgrid,hilbert_child,output,refined,levels,grids,level+1,
@@ -381,17 +381,17 @@
     col_list.append(pyfits.Column("mass_metals", format='D',
                     array=fd['MetalMass'], unit="Msun"))
     # col_list.append(pyfits.Column("mass_stars", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("age_m", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("age_l", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("L_bol", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # col_list.append(pyfits.Column("L_lambda", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
@@ -402,7 +402,7 @@
                     array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
-                    array=na.zeros(size, dtype='D')))
+                    array=np.zeros(size, dtype='D')))
     cols = pyfits.ColDefs(col_list)
     mg_table = pyfits.new_table(cols)
     mg_table.header.update("M_g_tot", tm)
@@ -411,7 +411,7 @@
     mg_table.name = "GRIDDATA"
 
     # Add a dummy Primary; might be a better way to do this!
-    col_list = [pyfits.Column("dummy", format="F", array=na.zeros(1, dtype='float32'))]
+    col_list = [pyfits.Column("dummy", format="F", array=np.zeros(1, dtype='float32'))]
     cols = pyfits.ColDefs(col_list)
     md_table = pyfits.new_table(cols)
     md_table.header.update("snaptime", pf.current_time*pf['years'])
@@ -437,12 +437,12 @@
 
 def round_ncells_wide(dds,fle,fre,nwide=None):
     fc = (fle+fre)/2.0
-    assert na.all(fle < fc)
-    assert na.all(fre > fc)
-    ic = na.rint(fc*dds) #nearest vertex to the center
+    assert np.all(fle < fc)
+    assert np.all(fre > fc)
+    ic = np.rint(fc*dds) #nearest vertex to the center
     ile,ire = ic.astype('int'),ic.astype('int')
     cfle,cfre = fc.copy(),fc.copy()
-    idx = na.array([0,0,0]) #just a random non-equal array
+    idx = np.array([0,0,0]) #just a random non-equal array
     width = 0.0
     if nwide is None:
         #expand until borders are included and
@@ -450,41 +450,41 @@
         idxq,out=False,True
         while not out or not idxq:
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 0.1/dds
             #quit if idxq is true:
-            idxq = idx[0]>0 and na.all(idx==idx[0])
-            out  = na.all(fle>cfle) and na.all(fre<cfre) 
+            idxq = idx[0]>0 and np.all(idx==idx[0])
+            out  = np.all(fle>cfle) and np.all(fre<cfre) 
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
         #expand until we are nwide cells span
-        while not na.all(idx==nwide):
-            assert na.any(idx<=nwide)
+        while not np.all(idx==nwide):
+            assert np.any(idx<=nwide)
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 1e-2*1.0/dds
-    assert na.all(idx==nwide)
+    assert np.all(idx==nwide)
     assert idx[0]>0
-    maxlevel = -na.rint(na.log2(nwide)).astype('int')
-    assert abs(na.log2(nwide)-na.rint(na.log2(nwide)))<1e-5 #nwide should be a power of 2
+    maxlevel = -np.rint(np.log2(nwide)).astype('int')
+    assert abs(np.log2(nwide)-np.rint(np.log2(nwide)))<1e-5 #nwide should be a power of 2
     return ile,ire,maxlevel,nwide
 
 def round_nearest_edge(pf,fle,fre):
     dds = pf.domain_dimensions
-    ile = na.floor(fle*dds).astype('int')
-    ire = na.ceil(fre*dds).astype('int') 
+    ile = np.floor(fle*dds).astype('int')
+    ire = np.ceil(fre*dds).astype('int') 
     
     #this is the number of cells the super octree needs to expand to
     #must round to the nearest power of 2
-    width = na.max(ire-ile)
+    width = np.max(ire-ile)
     width = nearest_power(width)
     
-    maxlevel = -na.rint(na.log2(width)).astype('int')
+    maxlevel = -np.rint(np.log2(width)).astype('int')
     return ile,ire,maxlevel
 
 def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
@@ -497,14 +497,14 @@
         dd = pf.h.all_data()
     idx = dd["particle_type"] == star_type
     if pos is None:
-        pos = na.array([dd["particle_position_%s" % ax]
+        pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
     if vel is None:
-        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+        vel = np.array([dd["particle_velocity_%s" % ax][idx]
                         for ax in 'xyz']).transpose()
         # Velocity is cm/s, we want it to be kpc/yr
         #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
@@ -525,8 +525,8 @@
     formation_time = pf.current_time*pf['years']-age
     #create every column
     col_list = []
-    col_list.append(pyfits.Column("ID", format="J", array=na.arange(current_mass.size).astype('int32')))
-    col_list.append(pyfits.Column("parent_ID", format="J", array=na.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("ID", format="J", array=np.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("parent_ID", format="J", array=np.arange(current_mass.size).astype('int32')))
     col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
     col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
     col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
@@ -540,7 +540,7 @@
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
     #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=na.zeros(current_mass.size)))
+    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -570,7 +570,7 @@
                 / data["dynamical_time"])
         xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
                 / data["dynamical_time"])
-        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*np.exp(-xv1)))
         minitial = data["ParticleMassMsun"] / denom
         return minitial
 
@@ -698,14 +698,14 @@
     camera_positions in Sunrise.
     """
 
-    sim_center = na.array(sim_center)
+    sim_center = np.array(sim_center)
     if sim_sphere_radius is None:
         sim_sphere_radius = 10.0/pf['kpc']
     if sim_axis_short is None:
         if dd is None:
             dd = pf.h.all_data()
-        pos = na.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
+        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
+        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
         mas = dd["particle_mass"]
         pos = pos[idx]
         mas = mas[idx]
@@ -722,14 +722,14 @@
     if scene_distance is  None:
         scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
     if scene_fov is None:
-        radii = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))
+        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
         #idx= radii < sim_halo_radius*0.10
         #radii = radii[idx]
         #mass  = mas[idx] #copying mass into mas
-        si = na.argsort(radii)
+        si = np.argsort(radii)
         radii = radii[si]
         mass  = mas[si]
-        idx, = na.where(na.cumsum(mass)>mass.sum()/2.0)
+        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
         re = radii[idx[0]]
         scene_fov = 5*re
         scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
@@ -745,11 +745,11 @@
     
     #rotate the camera
     if scene_rot :
-        irotation = na.eye(3)
-    sunrise_pos = matmul(irotation,na.array(scene_position)*scene_distance) #do NOT include sim center
+        irotation = np.eye(3)
+    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
     sunrise_up  = matmul(irotation,scene_up)
     sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*na.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
+    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
 
     #change to physical kpc
     sunrise_pos *= pf['kpc']
@@ -763,11 +763,11 @@
     use this to muliply two matricies, it will think that you're
     trying to multiply by a set of vectors and all hell will break
     loose."""    
-    assert type(v) is not na.matrix
-    v = na.asarray(v)
-    m, vs = [na.asmatrix(a) for a in (m, v)]
+    assert type(v) is not np.matrix
+    v = np.asarray(v)
+    m, vs = [np.asmatrix(a) for a in (m, v)]
 
-    result = na.asarray(na.transpose(m * na.transpose(vs)))    
+    result = np.asarray(np.transpose(m * np.transpose(vs)))    
     if len(v.shape) == 1:
         return result[0]
     return result
@@ -775,14 +775,14 @@
 
 def mag(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
-        return na.sqrt( (vs**2).sum() )
-    return na.sqrt( (vs**2).sum(axis=1) )
+        return np.sqrt( (vs**2).sum() )
+    return np.sqrt( (vs**2).sum(axis=1) )
 
 def mag2(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
         return (vs**2).sum()
     return (vs**2).sum(axis=1)
@@ -791,25 +791,25 @@
 def position_moment(rs, ms=None, axes=None):
     """Find second position moment tensor.
     If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = na.asarray(rs)
+    rs = np.asarray(rs)
     Npart, N = rs.shape
-    if ms is None: ms = na.ones(Npart)
-    else: ms = na.asarray(ms)    
+    if ms is None: ms = np.ones(Npart)
+    else: ms = np.asarray(ms)    
     if axes is not None:
-        axes = na.asarray(axes,dtype=float64)
+        axes = np.asarray(axes,dtype=float64)
         axes = axes/axes.max()
         norms2 = mag2(rs/axes)
     else:
-        norms2 = na.ones(Npart)
+        norms2 = np.ones(Npart)
     M = ms.sum()
-    result = na.zeros((N,N))
+    result = np.zeros((N,N))
     # matrix is symmetric, so only compute half of it then fill in the
     # other half
     for i in range(N):
         for j in range(i+1):
             result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
         
-    result = result + result.transpose() - na.identity(N)*result
+    result = result + result.transpose() - np.identity(N)*result
     return result
     
 
@@ -826,7 +826,7 @@
     make the long axis line up with the x axis and the short axis line
     up with the x (z) axis for the 2 (3) dimensional case."""
     # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: na.sqrt(na.sum(x**2.0))
+    mag = lambda x: np.sqrt(np.sum(x**2.0))
     v = v/mag(v)
     w = w/mag(w)    
     if check:
@@ -843,7 +843,7 @@
     w_prime = euler_passive(w,phi,theta,0.)
     if w_prime[0] < 0: w_prime = -w_prime
     # Now last Euler angle should just be this:
-    psi = na.arctan2(w_prime[1],w_prime[0])
+    psi = np.arctan2(w_prime[1],w_prime[0])
     return phi, theta, psi
 
 def find_euler_phi_theta(v):
@@ -851,19 +851,19 @@
     direction"""
     # Make sure the vector is normalized
     v = v/mag(v)
-    theta = na.arccos(v[2])
-    phi = na.arctan2(v[0],-v[1])
+    theta = np.arccos(v[2])
+    phi = np.arctan2(v[0],-v[1])
     return phi,theta
 
 def euler_matrix(phi, the, psi):
     """Make an Euler transformation matrix"""
-    cpsi=na.cos(psi)
-    spsi=na.sin(psi)
-    cphi=na.cos(phi)
-    sphi=na.sin(phi)
-    cthe=na.cos(the)
-    sthe=na.sin(the)
-    m = na.mat(na.zeros((3,3)))
+    cpsi=np.cos(psi)
+    spsi=np.sin(psi)
+    cphi=np.cos(phi)
+    sphi=np.sin(phi)
+    cthe=np.cos(the)
+    sthe=np.sin(the)
+    m = np.mat(np.zeros((3,3)))
     m[0,0] = cpsi*cphi - cthe*sphi*spsi
     m[0,1] = cpsi*sphi + cthe*cphi*spsi
     m[0,2] = spsi*sthe
@@ -912,9 +912,9 @@
 cameraset_ring = collections.OrderedDict()
 
 segments = 20
-for angle in na.linspace(0,360,segments):
-    pos = [na.cos(angle),0.,na.sin(angle)]
-    vc  = [na.cos(90-angle),0.,na.sin(90-angle)] 
+for angle in np.linspace(0,360,segments):
+    pos = [np.cos(angle),0.,np.sin(angle)]
+    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
     cameraset_ring['02i'%angle]=(pos,vc)
             
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -144,10 +144,10 @@
             length_range[0] = math.sqrt(3) * self.pf.h.get_smallest_dx()
         # Make the list of ruler lengths.
         if length_type == "lin":
-            self.lengths = na.linspace(length_range[0], length_range[1],
+            self.lengths = np.linspace(length_range[0], length_range[1],
                 length_number)
         elif length_type == "log":
-            self.lengths = na.logspace(math.log10(length_range[0]),
+            self.lengths = np.logspace(math.log10(length_range[0]),
                 math.log10(length_range[1]), length_number)
         else:
             # Something went wrong.
@@ -177,7 +177,7 @@
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge
-        self.mt = na.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
+        self.mt = np.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
     
     def add_function(self, function, out_labels, sqrt, corr_norm=None):
         r"""Add a function to the list that will be evaluated at the
@@ -265,7 +265,7 @@
                 mylog.info("Doing length %1.5e" % length)
             # Things stop when this value below equals total_values.
             self.generated_points = 0
-            self.gen_array = na.zeros(self.size, dtype='int64')
+            self.gen_array = np.zeros(self.size, dtype='int64')
             self.comm_cycle_count = 0
             self.final_comm_cycle_count = 0
             self.sent_done = False
@@ -280,7 +280,7 @@
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
-                        #(na.abs(na.log10(na.abs(self.recv_points))) > 20).any():
+                        #(np.abs(np.log10(np.abs(self.recv_points))) > 20).any():
                     raise ValueError("self.recv_points is no good!")
                 self.points = self.recv_points.copy()
                 self.fields_vals = self.recv_fields_vals.copy()
@@ -312,7 +312,7 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        fKD.pos = na.asfortranarray(na.empty((3,xp.size), dtype='float64'))
+        fKD.pos = np.asfortranarray(np.empty((3,xp.size), dtype='float64'))
         # Normalize the grid points only within the kdtree.
         fKD.pos[0, :] = xp[:] / self.period[0]
         fKD.pos[1, :] = yp[:] / self.period[1]
@@ -332,8 +332,8 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        self.sizes = [na.unique(xp).size, na.unique(yp).size, na.unique(zp).size]        
-        self.sort = na.lexsort([zp, yp, xp])
+        self.sizes = [np.unique(xp).size, np.unique(yp).size, np.unique(zp).size]        
+        self.sort = np.lexsort([zp, yp, xp])
         del xp, yp, zp
         self.ds.clear_data()
     
@@ -341,7 +341,7 @@
         """
         Builds an array to store the field values array.
         """
-        self.fields_vals = na.empty((self.comm_size, len(self.fields)*2), \
+        self.fields_vals = np.empty((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         # At the same time build a dict to label the columns.
         self.fields_columns = {}
@@ -353,7 +353,7 @@
         Initializes the array that contains the random points as all negatives
         to start with.
         """
-        self.points = na.ones((self.comm_size, 6), dtype='float64') * -1.0
+        self.points = np.ones((self.comm_size, 6), dtype='float64') * -1.0
     
     def _setup_done_hooks_on_root(self):
         """
@@ -364,7 +364,7 @@
         self.recv_done = {}
         for task in xrange(self.size):
             if task == self.mine: continue
-            self.recv_done[task] = na.zeros(1, dtype='int64')
+            self.recv_done[task] = np.zeros(1, dtype='int64')
             self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
@@ -376,13 +376,13 @@
         if self.sent_done: return
         if self.mine !=0:
             # I send when I *think* things should finish.
-            self.send_done = na.ones(1, dtype='int64') * \
+            self.send_done = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
             self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
-            self.recv_done[0] = na.ones(1, dtype='int64') * \
+            self.recv_done[0] = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
         self.sent_done = True
     
@@ -416,10 +416,10 @@
         Creates the recv buffers and calls a non-blocking MPI receive pointing
         to the left-hand neighbor.
         """
-        self.recv_points = na.ones((self.comm_size, 6), dtype='float64') * -1.
-        self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
+        self.recv_points = np.ones((self.comm_size, 6), dtype='float64') * -1.
+        self.recv_fields_vals = np.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
-        self.recv_gen_array = na.zeros(self.size, dtype='int64')
+        self.recv_gen_array = np.zeros(self.size, dtype='int64')
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
@@ -470,7 +470,7 @@
         Picks out size random pairs separated by length *length*.
         """
         # First make random points inside this subvolume.
-        r1 = na.empty((size,3), dtype='float64')
+        r1 = np.empty((size,3), dtype='float64')
         for dim in range(3):
             r1[:,dim] = self.mt.uniform(low=self.ds.left_edge[dim],
                 high=self.ds.right_edge[dim], size=size)
@@ -480,15 +480,15 @@
         # but phi and theta are switched to the Physics convention.
         if self.constant_phi is None:
             phi = self.mt.uniform(low=0, high=2.*math.pi, size=size)
-        else: phi = self.constant_phi * na.ones(size, dtype='float64')
+        else: phi = self.constant_phi * np.ones(size, dtype='float64')
         if self.constant_theta is None:
             v = self.mt.uniform(low=0., high=1, size=size)
-            theta = na.arccos(2 * v - 1)
-        else: theta = self.constant_theta * na.ones(size, dtype='float64')
-        r2 = na.empty((size,3), dtype='float64')
-        r2[:,0] = r1[:,0] + length * na.cos(phi) * na.sin(theta)
-        r2[:,1] = r1[:,1] + length * na.sin(phi) * na.sin(theta)
-        r2[:,2] = r1[:,2] + length * na.cos(theta)
+            theta = np.arccos(2 * v - 1)
+        else: theta = self.constant_theta * np.ones(size, dtype='float64')
+        r2 = np.empty((size,3), dtype='float64')
+        r2[:,0] = r1[:,0] + length * np.cos(phi) * np.sin(theta)
+        r2[:,1] = r1[:,1] + length * np.sin(phi) * np.sin(theta)
+        r2[:,2] = r1[:,2] + length * np.cos(theta)
         # Reflect so it's inside the (full) volume.
         r2 %= self.period
         return (r1, r2)
@@ -508,7 +508,7 @@
             points[:, 1] = points[:, 1] / self.period[1]
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
-            fKD.nn_tags = na.asfortranarray(na.empty((1, points.shape[0]), dtype='int64'))
+            fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
             find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
@@ -521,7 +521,7 @@
         """
         # First find the grid data index field.
         indices = self._find_nearest_cell(points)
-        results = na.empty((len(indices), len(self.fields)), dtype='float64')
+        results = np.empty((len(indices), len(self.fields)), dtype='float64')
         # Put the field values into the columns of results.
         for field in self.fields:
             col = self.fields_columns[field]
@@ -547,7 +547,7 @@
                 self.generated_points += size
                 # If size != select.sum(), we need to pad the end of new_r1/r2
                 # which is what is effectively happening below.
-                newpoints = na.ones((ssum, 6), dtype='float64') * -1.
+                newpoints = np.ones((ssum, 6), dtype='float64') * -1.
                 newpoints[:size,:3] = new_r1
                 newpoints[:size,3:] = new_r2
                 # Now we insert them into self.points.
@@ -564,9 +564,9 @@
             # or I don't need to make any new points and I'm just processing the
             # array. Start by finding the indices of the points I own.
             self.points.shape = (self.comm_size*2, 3) # Doesn't make a copy - fast!
-            select = na.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
                 (self.points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             mypoints = self.points[select]
             if mypoints.size > 0:
                 # Get the fields values.
@@ -583,19 +583,19 @@
             # To run the functions, what is key is that the
             # second point in the pair is ours.
             second_points = self.points[:,3:]
-            select = na.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
                 (second_points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             if select.any():
                 points_to_eval = self.points[select]
                 fields_to_eval = self.fields_vals[select]
                 
                 # Find the normal vector between our points.
-                vec = na.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
-                norm = na.sqrt(na.sum(na.multiply(vec,vec), axis=1))
+                vec = np.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
+                norm = np.sqrt(np.sum(np.multiply(vec,vec), axis=1))
                 # I wish there was a better way to do this, but I can't find it.
                 for i, n in enumerate(norm):
-                    vec[i] = na.divide(vec[i], n)
+                    vec[i] = np.divide(vec[i], n)
                 
                 # Now evaluate the functions.
                 for fcn_set in self._fsets:
@@ -604,7 +604,7 @@
                     fcn_set._bin_results(length, fcn_results)
                 
                 # Now clear the buffers at the processed points.
-                self.points[select] = na.array([-1.]*6, dtype='float64')
+                self.points[select] = np.array([-1.]*6, dtype='float64')
                 
             else:
                 # We didn't clear any points, so we should move on with our
@@ -712,8 +712,8 @@
         self.corr_norm = corr_norm # A number used to normalize a correlation function.
         # These below are used to track how many times the function returns
         # unbinned results.
-        self.too_low = na.zeros(len(self.out_labels), dtype='int32')
-        self.too_high = na.zeros(len(self.out_labels), dtype='int32')
+        self.too_low = np.zeros(len(self.out_labels), dtype='int32')
+        self.too_high = np.zeros(len(self.out_labels), dtype='int32')
         
     def set_pdf_params(self, bin_type="lin", bin_number=1000, bin_range=None):
         r"""Set the parameters used to build the Probability Distribution Function
@@ -772,14 +772,14 @@
             bin_type, bin_number = [bin_type], [bin_number]
             bin_range = [bin_range]
         self.bin_type = bin_type
-        self.bin_number = na.array(bin_number) - 1
+        self.bin_number = np.array(bin_number) - 1
         self.dims = range(len(bin_type))
         # Create the dict that stores the arrays to store the bin hits, and
         # the arrays themselves.
         self.length_bin_hits = {}
         for length in self.tpf.lengths:
             # It's easier to index flattened, but will be unflattened later.
-            self.length_bin_hits[length] = na.zeros(self.bin_number,
+            self.length_bin_hits[length] = np.zeros(self.bin_number,
                 dtype='int64').flatten()
         # Create the bin edges for each dimension.
         # self.bins is indexed by dimension
@@ -792,10 +792,10 @@
                 raise ValueError("bin_range[1] must be larger than bin_range[0]")
             # Make the edges for this dimension.
             if bin_type[dim] == "lin":
-                self.bin_edges[dim] = na.linspace(bin_range[dim][0], bin_range[dim][1],
+                self.bin_edges[dim] = np.linspace(bin_range[dim][0], bin_range[dim][1],
                     bin_number[dim])
             elif bin_type[dim] == "log":
-                self.bin_edges[dim] = na.logspace(math.log10(bin_range[dim][0]),
+                self.bin_edges[dim] = np.logspace(math.log10(bin_range[dim][0]),
                     math.log10(bin_range[dim][1]), bin_number[dim])
             else:
                 raise SyntaxError("bin_edges is either \"lin\" or \"log\".")
@@ -822,32 +822,32 @@
         is flattened, so we need to figure out the offset for this hit by
         factoring the sizes of the other dimensions.
         """
-        hit_bin = na.zeros(results.shape[0], dtype='int64')
+        hit_bin = np.zeros(results.shape[0], dtype='int64')
         multi = 1
-        good = na.ones(results.shape[0], dtype='bool')
+        good = np.ones(results.shape[0], dtype='bool')
         for dim in range(len(self.out_labels)):
             for d1 in range(dim):
                 multi *= self.bin_edges[d1].size
             if dim == 0 and len(self.out_labels)==1:
                 try:
-                    digi = na.digitize(results, self.bin_edges[dim])
+                    digi = np.digitize(results, self.bin_edges[dim])
                 except ValueError:
                     # The user probably did something like 
                     # return a * b rather than
                     # return a[0] * b[0], which will only happen
                     # for single field functions.
-                    digi = na.digitize(results[0], self.bin_edges[dim])
+                    digi = np.digitize(results[0], self.bin_edges[dim])
             else:
-                digi = na.digitize(results[:,dim], self.bin_edges[dim])
+                digi = np.digitize(results[:,dim], self.bin_edges[dim])
             too_low = (digi == 0)
             too_high = (digi == self.bin_edges[dim].size)
             self.too_low[dim] += (too_low).sum()
             self.too_high[dim] += (too_high).sum()
-            newgood = na.bitwise_and(na.invert(too_low), na.invert(too_high))
-            good = na.bitwise_and(good, newgood)
-            hit_bin += na.multiply((digi - 1), multi)
-        digi_bins = na.arange(self.length_bin_hits[length].size+1)
-        hist, digi_bins = na.histogram(hit_bin[good], digi_bins)
+            newgood = np.bitwise_and(np.invert(too_low), np.invert(too_high))
+            good = np.bitwise_and(good, newgood)
+            hit_bin += np.multiply((digi - 1), multi)
+        digi_bins = np.arange(self.length_bin_hits[length].size+1)
+        hist, digi_bins = np.histogram(hit_bin[good], digi_bins)
         self.length_bin_hits[length] += hist
 
     def _dim_sum(self, a, dim):
@@ -855,11 +855,11 @@
         Given a multidimensional array a, this finds the sum over all the
         elements leaving the dimension dim untouched.
         """
-        dims = na.arange(len(a.shape))
-        dims = na.flipud(dims)
+        dims = np.arange(len(a.shape))
+        dims = np.flipud(dims)
         gt_dims = dims[dims > dim]
         lt_dims = dims[dims < dim]
-        iter_dims = na.concatenate((gt_dims, lt_dims))
+        iter_dims = np.concatenate((gt_dims, lt_dims))
         for this_dim in iter_dims:
             a = a.sum(axis=this_dim)
         return a
@@ -882,6 +882,6 @@
         """
         xi = {}
         for length in self.tpf.lengths:
-            xi[length] = -1 + na.sum(self.length_bin_hits[length] * \
+            xi[length] = -1 + np.sum(self.length_bin_hits[length] * \
                 self.bin_edges[0][:-1]) / self.corr_norm
         return xi


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import numpy.core.records as rec
 
 # Now define convenience functions
@@ -41,5 +41,5 @@
     """
     blanks = []
     for atype in desc['formats']:
-        blanks.append(na.zeros(elements, dtype=atype))
+        blanks.append(np.zeros(elements, dtype=atype))
     return rec.fromarrays(blanks, **desc)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -24,7 +24,7 @@
 """
 
 import glob
-import numpy as na
+import numpy as np
 import os, os.path, inspect, types
 from functools import wraps
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -29,7 +29,7 @@
 
 data_object_registry = {}
 
-import numpy as na
+import numpy as np
 import math
 import weakref
 import exceptions
@@ -74,9 +74,9 @@
         return item
     except AttributeError:
         if item:
-            return na.ones(shape, dtype='bool')
+            return np.ones(shape, dtype='bool')
         else:
-            return na.zeros(shape, dtype='bool')
+            return np.zeros(shape, dtype='bool')
 
 def restore_grid_state(func):
     """
@@ -181,13 +181,13 @@
         if field not in self.field_data.keys():
             if field == "RadiusCode":
                 center = self.field_parameters['center']
-                tempx = na.abs(self['x'] - center[0])
-                tempx = na.minimum(tempx, self.DW[0] - tempx)
-                tempy = na.abs(self['y'] - center[1])
-                tempy = na.minimum(tempy, self.DW[1] - tempy)
-                tempz = na.abs(self['z'] - center[2])
-                tempz = na.minimum(tempz, self.DW[2] - tempz)
-                tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
+                tempx = np.abs(self['x'] - center[0])
+                tempx = np.minimum(tempx, self.DW[0] - tempx)
+                tempy = np.abs(self['y'] - center[1])
+                tempy = np.minimum(tempy, self.DW[1] - tempy)
+                tempz = np.abs(self['z'] - center[2])
+                tempz = np.minimum(tempz, self.DW[2] - tempz)
+                tr = np.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
             else:
                 raise KeyError(field)
         else: tr = self.field_data[field]
@@ -235,14 +235,14 @@
             self.set_field_parameter(key, val)
 
     def __set_default_field_parameters(self):
-        self.set_field_parameter("center",na.zeros(3,dtype='float64'))
-        self.set_field_parameter("bulk_velocity",na.zeros(3,dtype='float64'))
+        self.set_field_parameter("center",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
             pass
-        elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
-            center = na.array(center)
+        elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
+            center = np.array(center)
         elif center in ("c", "center"):
             center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
@@ -250,7 +250,7 @@
         elif center.startswith("max_"):
             center = self.pf.h.find_max(center[4:])[1]
         else:
-            center = na.array(center, dtype='float64')
+            center = np.array(center, dtype='float64')
         self.center = center
         self.set_field_parameter('center', center)
 
@@ -376,7 +376,7 @@
         field_order += [field for field in fields if field not in field_order]
         fid = open(filename,"w")
         fid.write("\t".join(["#"] + field_order + ["\n"]))
-        field_data = na.array([self.field_data[field] for field in field_order])
+        field_data = np.array([self.field_data[field] for field in field_order])
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -421,11 +421,11 @@
         return grids
 
     def select_grid_indices(self, level):
-        return na.where(self.grid_levels == level)
+        return np.where(self.grid_levels == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
-            self.__grid_left_edge = na.array([g.LeftEdge for g in self._grids])
+            self.__grid_left_edge = np.array([g.LeftEdge for g in self._grids])
         return self.__grid_left_edge
 
     def __del_grid_left_edge(self):
@@ -441,7 +441,7 @@
 
     def __get_grid_right_edge(self):
         if self.__grid_right_edge == None:
-            self.__grid_right_edge = na.array([g.RightEdge for g in self._grids])
+            self.__grid_right_edge = np.array([g.RightEdge for g in self._grids])
         return self.__grid_right_edge
 
     def __del_grid_right_edge(self):
@@ -457,7 +457,7 @@
 
     def __get_grid_levels(self):
         if self.__grid_levels == None:
-            self.__grid_levels = na.array([g.Level for g in self._grids])
+            self.__grid_levels = np.array([g.Level for g in self._grids])
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +474,7 @@
 
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
-            self.__grid_dimensions = na.array([g.ActiveDimensions for g in self._grids])
+            self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
         return self.__grid_dimensions
 
     def __del_grid_dimensions(self):
@@ -516,13 +516,13 @@
             if field not in self.hierarchy.field_list and not in_grids:
                 if field not in ("dts", "t") and self._generate_field(field):
                     continue # True means we already assigned it
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
             if not self.field_data.has_key(field):
                 continue
             if self._sortkey is None:
-                self._sortkey = na.argsort(self[self.sort_by])
+                self._sortkey = np.argsort(self[self.sort_by])
             # We *always* sort the field here if we have not successfully
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
@@ -581,7 +581,7 @@
 
     def _get_list_of_grids(self):
         # This bugs me, but we will give the tie to the LeftEdge
-        y = na.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
+        y = np.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
                     & (self.px < self.pf.hierarchy.grid_right_edge[:,self.px_ax])
                     & (self.py >=  self.pf.hierarchy.grid_left_edge[:,self.py_ax])
                     & (self.py < self.pf.hierarchy.grid_right_edge[:,self.py_ax]))
@@ -604,10 +604,10 @@
         else:
             sl = self._cut_masks[grid.id]
         if not iterable(grid[field]):
-            gf = grid[field] * na.ones(grid.child_mask[sl].shape)
+            gf = grid[field] * np.ones(grid.child_mask[sl].shape)
         else:
             gf = grid[field][sl]
-        return gf[na.where(grid.child_mask[sl])]
+        return gf[np.where(grid.child_mask[sl])]
 
 class AMRRayBase(AMR1DData):
     _type_name = "ray"
@@ -646,10 +646,10 @@
         >>> print ray["Density"], ray["t"], ray["dts"]
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
-        self.start_point = na.array(start_point, dtype='float64')
-        self.end_point = na.array(end_point, dtype='float64')
+        self.start_point = np.array(start_point, dtype='float64')
+        self.end_point = np.array(end_point, dtype='float64')
         self.vec = self.end_point - self.start_point
-        #self.vec /= na.sqrt(na.dot(self.vec, self.vec))
+        #self.vec /= np.sqrt(np.dot(self.vec, self.vec))
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
         self._dts, self._ts = {}, {}
@@ -659,7 +659,7 @@
         # Get the value of the line at each LeftEdge and RightEdge
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        p = na.zeros(self.pf.h.num_grids, dtype='bool')
+        p = np.zeros(self.pf.h.num_grids, dtype='bool')
         # Check left faces first
         for i in range(3):
             i1 = (i+1) % 3
@@ -670,10 +670,10 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( na.all( LE <= self.start_point, axis=1 ) 
-                & na.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( na.all( LE <= self.end_point,   axis=1 ) 
-                & na.all( RE >= self.end_point,   axis=1 ) )
+        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+                & np.all( RE >= self.start_point, axis=1 ) )
+        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+                & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
     def _get_line_at_coord(self, v, index):
@@ -684,24 +684,24 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
         gf = grid[field]
         if not iterable(gf):
-            gf = gf * na.ones(grid.child_mask.shape)
+            gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
                        grid.dds, self.center, self.vec)
-        self._dts[grid.id] = na.abs(dts)
-        self._ts[grid.id] = na.abs(ts)
+        self._dts[grid.id] = np.abs(dts)
+        self._ts[grid.id] = np.abs(ts)
         return mask
 
 class AMRStreamlineBase(AMR1DData):
@@ -745,11 +745,11 @@
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
         self.positions = positions
-        self.dts = na.empty_like(positions[:,0])
-        self.dts[:-1] = na.sqrt(na.sum((self.positions[1:]-
+        self.dts = np.empty_like(positions[:,0])
+        self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-1]
-        self.ts = na.add.accumulate(self.dts)
+        self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
         self._dts, self._ts = {}, {}
@@ -760,14 +760,14 @@
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
         # Check left faces first
-        min_streampoint = na.min(self.positions, axis=0)
-        max_streampoint = na.max(self.positions, axis=0)
-        p = na.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
+        min_streampoint = np.min(self.positions, axis=0)
+        max_streampoint = np.max(self.positions, axis=0)
+        p = np.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
         self._grids = self.hierarchy.grids[p]
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
@@ -775,13 +775,13 @@
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
-        points_in_grid = na.all(self.positions > grid.LeftEdge, axis=1) & \
-                         na.all(self.positions <= grid.RightEdge, axis=1) 
-        pids = na.where(points_in_grid)[0]
+        points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
+                         np.all(self.positions <= grid.RightEdge, axis=1) 
+        pids = np.where(points_in_grid)[0]
         for i, pos in zip(pids, self.positions[points_in_grid]):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
@@ -842,8 +842,8 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = na.array([])
-            else: data = na.concatenate(data)
+            if len(data) == 0: data = np.array([])
+            else: data = np.concatenate(data)
             temp_data[field] = data
             # Now the next field can use this field
             self[field] = temp_data[field] 
@@ -891,7 +891,7 @@
 
         >>> proj = pf.h.proj(0, "Density")
         >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_100kpc.png')
         """
         if center is None:
             center = self.get_field_parameter("center")
@@ -944,11 +944,11 @@
         """
         import yt.utilities.delaunay as de
         if log_spacing:
-            zz = na.log10(self[field])
+            zz = np.log10(self[field])
         else:
             zz = self[field]
-        xi, yi = na.array( \
-                 na.mgrid[LE[0]:RE[0]:side*1j, \
+        xi, yi = np.array( \
+                 np.mgrid[LE[0]:RE[0]:side*1j, \
                           LE[1]:RE[1]:side*1j], 'float64')
         zi = de.Triangulation(self['px'],self['py']).nn_interpolator(zz)\
                  [LE[0]:RE[0]:side*1j, \
@@ -1082,7 +1082,7 @@
             points = None
             t = self.comm.par_combine_object(None, datatype="array", op="cat")
         else:
-            points = na.concatenate(points)
+            points = np.concatenate(points)
             # We have to transpose here so that _par_combine_object works
             # properly, as it and the alltoall assume the long axis is the last
             # one.
@@ -1124,27 +1124,27 @@
         nx = grid.child_mask.shape[xaxis]
         ny = grid.child_mask.shape[yaxis]
         mask = self.__cut_mask_child_mask(grid)[sl]
-        cm = na.where(mask.ravel()== 1)
-        cmI = na.indices((nx,ny))
+        cm = np.where(mask.ravel()== 1)
+        cmI = np.indices((nx,ny))
         ind = cmI[0, :].ravel()   # xind
         npoints = cm[0].shape
         # create array of "npoints" ones that will be reused later
-        points = na.ones(npoints, 'float64')
+        points = np.ones(npoints, 'float64')
         # calculate xpoints array
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
         del cmI   # no longer needed 
-        t = na.vstack( (t, points * ind[cm] * dy + \
+        t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
         del ind, cm   # no longer needed
         # calculate zpoints array
-        t = na.vstack((t, points * self.coord))
+        t = np.vstack((t, points * self.coord))
         # calculate dx array
-        t = na.vstack((t, points * dx * 0.5))
+        t = np.vstack((t, points * dx * 0.5))
         # calculate dy array
-        t = na.vstack((t, points * dy * 0.5))
+        t = np.vstack((t, points * dy * 0.5))
         # return [xpoints, ypoints, zpoints, dx, dy] as (5, npoints) array
         return t.swapaxes(0, 1)
 
@@ -1169,7 +1169,7 @@
             dv = self.hierarchy.io._read_data_slice(grid, field, self.axis, sl_ind) * conv_factor
         else:
             dv = grid[field]
-            if dv.size == 1: dv = na.ones(grid.ActiveDimensions)*dv
+            if dv.size == 1: dv = np.ones(grid.ActiveDimensions)*dv
             dv = dv[sl]
         mask = self.__cut_mask_child_mask(grid)[sl]
         dataVals = dv.ravel()[mask.ravel() == 1]
@@ -1251,11 +1251,11 @@
         # ax + by + cz + d = 0
         self.orienter = Orientation(normal, north_vector = north_vector)
         self._norm_vec = self.orienter.normal_vector
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._x_vec = self.orienter.unit_vectors[0]
         self._y_vec = self.orienter.unit_vectors[1]
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
@@ -1276,7 +1276,7 @@
         # @todo: Convert to using corners
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        vertices = na.array([[LE[:,0],LE[:,1],LE[:,2]],
+        vertices = np.array([[LE[:,0],LE[:,1],LE[:,2]],
                              [RE[:,0],RE[:,1],RE[:,2]],
                              [LE[:,0],LE[:,1],RE[:,2]],
                              [RE[:,0],RE[:,1],LE[:,2]],
@@ -1285,27 +1285,27 @@
                              [LE[:,0],RE[:,1],LE[:,2]],
                              [RE[:,0],LE[:,1],RE[:,2]]])
         # This gives us shape: 8, 3, n_grid
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
         self.D = D
         self._grids = self.hierarchy.grids[
-            na.where(na.logical_not(na.all(D<0,axis=0) | na.all(D>0,axis=0) )) ]
+            np.where(np.logical_not(np.all(D<0,axis=0) | np.all(D>0,axis=0) )) ]
 
     @cache_mask
     def _get_cut_mask(self, grid):
         # This is slow.  Suggestions for improvement would be great...
         ss = grid.ActiveDimensions
-        D = na.ones(ss) * self._d
+        D = np.ones(ss) * self._d
         x = grid.LeftEdge[0] + grid.dds[0] * \
-                (na.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
         y = grid.LeftEdge[1] + grid.dds[1] * \
-                (na.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
         z = grid.LeftEdge[2] + grid.dds[2] * \
-                (na.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
         D += (x * self._norm_vec[0]).reshape(ss[0],1,1)
         D += (y * self._norm_vec[1]).reshape(1,ss[1],1)
         D += (z * self._norm_vec[2]).reshape(1,1,ss[2])
-        diag_dist = na.sqrt(na.sum(grid.dds**2.0))
-        cm = (na.abs(D) <= 0.5*diag_dist) # Boolean
+        diag_dist = np.sqrt(np.sum(grid.dds**2.0))
+        cm = (np.abs(D) <= 0.5*diag_dist) # Boolean
         return cm
 
     def _generate_coords(self):
@@ -1313,12 +1313,12 @@
         for grid in self._get_grids():
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
-        else: points = na.concatenate(points)
+        else: points = np.concatenate(points)
         t = self.comm.par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
-        self['px'] = na.dot(pos, self._x_vec)
-        self['py'] = na.dot(pos, self._y_vec)
-        self['pz'] = na.dot(pos, self._norm_vec)
+        self['px'] = np.dot(pos, self._x_vec)
+        self['py'] = np.dot(pos, self._y_vec)
+        self['pz'] = np.dot(pos, self._norm_vec)
         self['pdx'] = t[:,3] * 0.5
         self['pdy'] = t[:,3] * 0.5
         self['pdz'] = t[:,3] * 0.5
@@ -1326,14 +1326,14 @@
     def _generate_grid_coords(self, grid):
         pointI = self._get_point_indices(grid)
         coords = [grid[ax][pointI].ravel() for ax in 'xyz']
-        coords.append(na.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
-        return na.array(coords).swapaxes(0,1)
+        coords.append(np.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
+        return np.array(coords).swapaxes(0,1)
 
     def _get_data_from_grid(self, grid, field):
         if not self.pf.field_info[field].particle_type:
             pointI = self._get_point_indices(grid)
             if grid[field].size == 1: # dx, dy, dz, cellvolume
-                t = grid[field] * na.ones(grid.ActiveDimensions)
+                t = grid[field] * np.ones(grid.ActiveDimensions)
                 return t[pointI].ravel()
             return grid[field][pointI].ravel()
         else:
@@ -1344,10 +1344,10 @@
 
     @cache_point_indices
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _gen_node_name(self):
         cen_name = ("%s" % (self.center,)).replace(" ","_")[1:-1]
@@ -1391,7 +1391,7 @@
         >>> L = sp.quantities["AngularMomentumVector"]()
         >>> cutting = pf.h.cutting(L, c)
         >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
             w, u = width
@@ -1435,34 +1435,34 @@
         self.width = width
         self.dims = dims
         self.dds = self.width / self.dims
-        self.bounds = na.array([0.0,1.0,0.0,1.0])
+        self.bounds = np.array([0.0,1.0,0.0,1.0])
         
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
 
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         if node_name is False:
             self._refresh_data()
@@ -1479,11 +1479,11 @@
         # within width/2 of the center.
         vertices = self.hierarchy.gridCorners
         # Shape = (8,3,n_grid)
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        valid_grids = na.where(na.logical_not(na.all(D<0,axis=0) |
-                                              na.all(D>0,axis=0) ))[0]
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        valid_grids = np.where(np.logical_not(np.all(D<0,axis=0) |
+                                              np.all(D>0,axis=0) ))[0]
         # Now restrict these grids to a rect. prism that bounds the slice
-        sliceCorners = na.array([ \
+        sliceCorners = np.array([ \
             self.center + 0.5*self.width * (+self._x_vec + self._y_vec),
             self.center + 0.5*self.width * (+self._x_vec - self._y_vec),
             self.center + 0.5*self.width * (-self._x_vec - self._y_vec),
@@ -1491,12 +1491,12 @@
         sliceLeftEdge = sliceCorners.min(axis=0)
         sliceRightEdge = sliceCorners.max(axis=0)
         # Check for bounding box and grid overlap
-        leftOverlap = na.less(self.hierarchy.gridLeftEdge[valid_grids],
+        leftOverlap = np.less(self.hierarchy.gridLeftEdge[valid_grids],
                               sliceRightEdge).all(axis=1)
-        rightOverlap = na.greater(self.hierarchy.gridRightEdge[valid_grids],
+        rightOverlap = np.greater(self.hierarchy.gridRightEdge[valid_grids],
                                   sliceLeftEdge).all(axis=1)
         self._grids = self.hierarchy.grids[valid_grids[
-            na.where(leftOverlap & rightOverlap)]]
+            np.where(leftOverlap & rightOverlap)]]
         self._grids = self._grids[::-1]
 
     def _generate_coords(self):
@@ -1512,7 +1512,7 @@
             pointI = self._get_point_indices(grid)
             if len(pointI) == 0: return
             vc = self._calc_vertex_centered_data(grid, field)
-            bds = na.array(zip(grid.LeftEdge,
+            bds = np.array(zip(grid.LeftEdge,
                                grid.RightEdge)).ravel()
             interp = TrilinearFieldInterpolator(vc, bds, ['x', 'y', 'z'])
             self[field][pointI] = interp( \
@@ -1538,27 +1538,27 @@
         self.width = width
         self.dds = self.width / self.dims
         self.set_field_parameter('center', center)
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
 
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         self._refresh_data()
         return
@@ -1584,7 +1584,7 @@
                     continue # A "True" return means we did it
             if not self._vc_data.has_key(field):
                 self._vc_data[field] = {}
-            self[field] = na.zeros(_size, dtype='float64')
+            self[field] = np.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
             self[field] = self.comm.mpi_allreduce(\
@@ -1686,9 +1686,9 @@
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
         self.proj_style = style
         if style == "mip":
-            self.func = na.max
+            self.func = np.max
         elif style == "integrate":
-            self.func = na.sum # for the future
+            self.func = np.sum # for the future
         else:
             raise NotImplementedError(style)
         self.weight_field = weight_field
@@ -1743,7 +1743,7 @@
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
-        return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
+        return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
                         style = self.proj_style)
 
     def _get_dls(self, grid, fields):
@@ -1755,8 +1755,8 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        dls = na.array(dls)
-        convs = na.array(convs)
+        dls = np.array(dls)
+        convs = np.array(convs)
         if self.proj_style == "mip":
             dls[:] = 1.0
             convs[:] = 1.0
@@ -1822,14 +1822,14 @@
                 ds = gs[0].dds[0]
             else:
                 ds = 0.0
-            dxs.append(na.ones(nvals.shape[0], dtype='float64') * ds)
-        coord_data = na.concatenate(coord_data, axis=0).transpose()
-        field_data = na.concatenate(field_data, axis=0).transpose()
+            dxs.append(np.ones(nvals.shape[0], dtype='float64') * ds)
+        coord_data = np.concatenate(coord_data, axis=0).transpose()
+        field_data = np.concatenate(field_data, axis=0).transpose()
         if self._weight is None:
             dls, convs = self._get_dls(self._grids[0], fields)
             field_data *= convs[:,None]
-        weight_data = na.concatenate(weight_data, axis=0).transpose()
-        dxs = na.concatenate(dxs, axis=0).transpose()
+        weight_data = np.concatenate(weight_data, axis=0).transpose()
+        dxs = np.concatenate(dxs, axis=0).transpose()
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = dxs
@@ -1843,7 +1843,7 @@
         data['pdy'] = data['pdx'] # generalization is out the window!
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -1853,7 +1853,7 @@
     def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
         # We build up the fields to add
         if self._weight is None or fields is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -1873,16 +1873,16 @@
         weight_proj = self.func(weight_data, axis=self.axis) * wdl
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.logical_or.reduce(used_data, self.axis)
+            used_points = np.logical_or.reduce(used_data, self.axis)
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         xind, yind = [arr[used_points].ravel()
-                      for arr in na.indices(full_proj[0].shape)]
+                      for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
-        to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
+        to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
         tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
                     to_add, weight_proj[used_points].ravel())
 
@@ -1894,8 +1894,8 @@
         if len(grids_to_initialize) == 0: return
         pbar = get_pbar('Initializing tree % 2i / % 2i' \
                           % (level, self._max_level), len(grids_to_initialize))
-        start_index = na.empty(2, dtype="int64")
-        dims = na.empty(2, dtype="int64")
+        start_index = np.empty(2, dtype="int64")
+        dims = np.empty(2, dtype="int64")
         xax = x_dict[self.axis]
         yax = y_dict[self.axis]
         for pi, grid in enumerate(grids_to_initialize):
@@ -1920,7 +1920,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2024,7 +2024,7 @@
         self._max_level = max_level
         self._weight = weight_field
         self.preload_style = preload_style
-        self.func = na.sum # for the future
+        self.func = np.sum # for the future
         self.__retval_coords = {}
         self.__retval_fields = {}
         self.__retval_coarse = {}
@@ -2083,7 +2083,7 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        return na.array(dls), na.array(convs)
+        return np.array(dls), np.array(convs)
 
     def __project_level(self, level, fields):
         grids_to_project = self.source.select_grids(level)
@@ -2112,12 +2112,12 @@
             field_data.append([pi[fine] for pi in self.__retval_fields[grid.id]])
             self.__retval_coords[grid.id] = [pi[coarse] for pi in self.__retval_coords[grid.id]]
             self.__retval_fields[grid.id] = [pi[coarse] for pi in self.__retval_fields[grid.id]]
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
         if self._weight is not None:
             field_data = field_data / coord_data[3,:].reshape((1,coord_data.shape[1]))
         else:
-            field_data *= convs[...,na.newaxis]
+            field_data *= convs[...,np.newaxis]
         mylog.info("Level %s done: %s final", \
                    level, coord_data.shape[1])
         pdx = grids_to_project[0].dds[x_dict[self.axis]] # this is our dl
@@ -2142,7 +2142,7 @@
                 args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
                 args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
                 args.append(1) # Refinement factor
-                args.append(na.ones(args[0].shape, dtype='int64'))
+                args.append(np.ones(args[0].shape, dtype='int64'))
                 kk = CombineGrids(*args)
                 goodI = args[-1].astype('bool')
                 self.__retval_coords[grid2.id] = \
@@ -2169,8 +2169,8 @@
                     # that this complicated rounding is because sometimes
                     # epsilon differences in dds between the grids causes this
                     # to round to up or down from the expected value.
-                    args.append(int(na.rint(grid2.dds / grid1.dds)[0]))
-                    args.append(na.ones(args[0].shape, dtype='int64'))
+                    args.append(int(np.rint(grid2.dds / grid1.dds)[0]))
+                    args.append(np.ones(args[0].shape, dtype='int64'))
                     kk = CombineGrids(*args)
                     goodI = args[-1].astype('bool')
                     self.__retval_coords[grid2.id] = \
@@ -2213,8 +2213,8 @@
                 self.__project_level(level, fields)
             coord_data.append(my_coords)
             field_data.append(my_fields)
-            pdxs.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
-            pdys.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
+            pdxs.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
+            pdys.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
             if self._check_region and False:
                 check=self.__cleanup_level(level - 1)
                 if len(check) > 0: all_data.append(check)
@@ -2225,10 +2225,10 @@
                 del self.__overlap_masks[grid.id]
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
-        pdxs = na.concatenate(pdxs, axis=1)
-        pdys = na.concatenate(pdys, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
+        pdxs = np.concatenate(pdxs, axis=1)
+        pdys = np.concatenate(pdys, axis=1)
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = pdxs; del pdxs
@@ -2244,7 +2244,7 @@
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
         data = self.comm.par_combine_object(data, datatype='dict', op='cat')
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -2260,7 +2260,7 @@
         # in _get_data_from_grid *and* we attempt not to load weight data
         # independently of the standard field data.
         if self._weight is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -2278,18 +2278,18 @@
         weight_proj = self.func(weight_data, axis=self.axis)
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.where(na.logical_or.reduce(used_data, self.axis))
+            used_points = np.where(np.logical_or.reduce(used_data, self.axis))
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         if zero_out:
-            subgrid_mask = na.logical_and.reduce(
-                                na.logical_or(grid.child_mask,
+            subgrid_mask = np.logical_and.reduce(
+                                np.logical_or(grid.child_mask,
                                              ~used_data),
                                 self.axis).astype('int64')
         else:
-            subgrid_mask = na.ones(full_proj[0].shape, dtype='int64')
-        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj[0].shape)]
+            subgrid_mask = np.ones(full_proj[0].shape, dtype='int64')
+        xind, yind = [arr[used_points].ravel() for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
@@ -2300,7 +2300,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2367,30 +2367,30 @@
         >>> print fproj["Density"]
         """
         AMR2DData.__init__(self, axis, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.dims = na.array([dims]*2)
-        self.ActiveDimensions = na.array([dims]*3, dtype='int32')
+        self.dims = np.array([dims]*2)
+        self.ActiveDimensions = np.array([dims]*3, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
-        self.global_startindex = na.rint((self.left_edge - self.pf.domain_left_edge)
+        self.global_startindex = np.rint((self.left_edge - self.pf.domain_left_edge)
                                          /self.dds).astype('int64')
         self._dls = {}
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        if na.any(self.left_edge < self.pf.domain_left_edge) or \
-           na.any(self.right_edge > self.pf.domain_right_edge):
+        if np.any(self.left_edge < self.pf.domain_left_edge) or \
+           np.any(self.right_edge > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids(
                             self.left_edge, self.right_edge)
         else:
             grids,ind = self.pf.hierarchy.get_box_grids(
                             self.left_edge, self.right_edge)
         level_ind = (self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
         self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
 
     def _generate_coords(self):
@@ -2398,9 +2398,9 @@
         yax = y_dict[self.axis]
         ci = self.left_edge + self.dds*0.5
         cf = self.left_edge + self.dds*(self.ActiveDimensions-0.5)
-        cx = na.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
-        cy = na.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
-        blank = na.ones( (self.ActiveDimensions[xax],
+        cx = np.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
+        cy = np.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
+        blank = np.ones( (self.ActiveDimensions[xax],
                           self.ActiveDimensions[yax]), dtype='float64')
         self['px'] = cx[None,:] * blank
         self['py'] = cx[:,None] * blank
@@ -2422,7 +2422,7 @@
         if len(fields_to_get) == 0: return
         temp_data = {}
         for field in fields_to_get:
-            self[field] = na.zeros(self.dims, dtype='float64')
+            self[field] = np.zeros(self.dims, dtype='float64')
         dls = self.__setup_dls(fields_to_get)
         for i,grid in enumerate(self._get_grids()):
             mylog.debug("Getting fields from %s", i)
@@ -2483,10 +2483,10 @@
             if ( (i%100) == 0):
                 mylog.info("Working on % 7i / % 7i", i, len(self._grids))
             grid.set_field_parameter("center", self.center)
-            points.append((na.ones(
+            points.append((np.ones(
                 grid.ActiveDimensions,dtype='float64')*grid['dx'])\
                     [self._get_point_indices(grid)])
-            t = na.concatenate([t,points])
+            t = np.concatenate([t,points])
             del points
         self['dx'] = t
         #self['dy'] = t
@@ -2496,8 +2496,8 @@
     @restore_grid_state
     def _generate_grid_coords(self, grid, field=None):
         pointI = self._get_point_indices(grid)
-        dx = na.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
-        tr = na.array([grid['x'][pointI].ravel(), \
+        dx = np.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
+        tr = np.array([grid['x'][pointI].ravel(), \
                 grid['y'][pointI].ravel(), \
                 grid['z'][pointI].ravel(), \
                 grid["RadiusCode"][pointI].ravel(),
@@ -2533,7 +2533,7 @@
                 if self._generate_field(field):
                     continue # True means we already assigned it
             mylog.info("Getting field %s from %s", field, len(self._grids))
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
         for field in fields_to_get:
@@ -2545,21 +2545,21 @@
     def _get_data_from_grid(self, grid, field):
         if field in self.pf.field_info and self.pf.field_info[field].particle_type:
             # int64 -> float64 with the first real set of data
-            if grid.NumberOfParticles == 0: return na.array([], dtype='int64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='int64')
             pointI = self._get_particle_indices(grid)
             if self.pf.field_info[field].vector_field:
                 f = grid[field]
-                return na.array([f[i,:][pointI] for i in range(3)])
+                return np.array([f[i,:][pointI] for i in range(3)])
             if self._is_fully_enclosed(grid): return grid[field].ravel()
             return grid[field][pointI].ravel()
         if field in self.pf.field_info and self.pf.field_info[field].vector_field:
             pointI = self._get_point_indices(grid)
             f = grid[field]
-            return na.array([f[i,:][pointI] for i in range(3)])
+            return np.array([f[i,:][pointI] for i in range(3)])
         else:
             tr = grid[field]
             if tr.size == 1: # dx, dy, dz, cellvolume
-                tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
+                tr = tr * np.ones(grid.ActiveDimensions, dtype='float64')
             if len(grid.Children) == 0 and grid.OverlappingSiblings is None \
                 and self._is_fully_enclosed(grid):
                 return tr.ravel()
@@ -2579,19 +2579,19 @@
             if grid.has_key(field):
                 new_field = grid[field]
             else:
-                new_field = na.ones(grid.ActiveDimensions, dtype=dtype) * default_val
+                new_field = np.ones(grid.ActiveDimensions, dtype=dtype) * default_val
             new_field[pointI] = self[field][i:i+np]
             grid[field] = new_field
             i += np
 
     def _is_fully_enclosed(self, grid):
-        return na.all(self._get_cut_mask)
+        return np.all(self._get_cut_mask)
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _get_cut_particle_mask(self, grid):
         if self._is_fully_enclosed(grid):
@@ -2600,9 +2600,9 @@
         return self._get_cut_mask(fake_grid)
 
     def _get_particle_indices(self, grid):
-        k = na.zeros(grid.NumberOfParticles, dtype='bool')
+        k = np.zeros(grid.NumberOfParticles, dtype='bool')
         k = (k | self._get_cut_particle_mask(grid))
-        return na.where(k)
+        return np.where(k)
 
     def cut_region(self, field_cuts):
         """
@@ -2705,16 +2705,16 @@
                 samples.append(svals)
             verts.append(my_verts)
         pb.finish()
-        verts = na.concatenate(verts).transpose()
+        verts = np.concatenate(verts).transpose()
         verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
         verts = verts.transpose()
         if sample_values is not None:
-            samples = na.concatenate(samples)
+            samples = np.concatenate(samples)
             samples = self.comm.par_combine_object(samples, op='cat',
                                 datatype='array')
         if rescale:
-            mi = na.min(verts, axis=0)
-            ma = na.max(verts, axis=0)
+            mi = np.min(verts, axis=0)
+            ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
             f = open(filename, "w")
@@ -2818,7 +2818,7 @@
         mask = self._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(field)
         if fluxing_field is None:
-            ff = na.ones(vals.shape, dtype="float64")
+            ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
         xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
@@ -2835,10 +2835,10 @@
         them to be plotted.
         """
         if log_space:
-            cons = na.logspace(na.log10(min_val),na.log10(max_val),
+            cons = np.logspace(np.log10(min_val),np.log10(max_val),
                                num_levels+1)
         else:
-            cons = na.linspace(min_val, max_val, num_levels+1)
+            cons = np.linspace(min_val, max_val, num_levels+1)
         contours = {}
         if cache: cached_fields = defaultdict(lambda: dict())
         else: cached_fields = None
@@ -2867,7 +2867,7 @@
         """
         for grid in self._grids:
             if default_value != None:
-                grid[field] = na.ones(grid.ActiveDimensions)*default_value
+                grid[field] = np.ones(grid.ActiveDimensions)*default_value
             grid[field][self._get_point_indices(grid)] = value
 
     _particle_handler = None
@@ -2951,36 +2951,36 @@
         grid_vals, xi, yi, zi = [], [], [], []
         for grid in self._base_region._grids:
             xit,yit,zit = self._base_region._get_point_indices(grid)
-            grid_vals.append(na.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
+            grid_vals.append(np.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
             xi.append(xit)
             yi.append(yit)
             zi.append(zit)
-        grid_vals = na.concatenate(grid_vals)[self._base_indices]
-        grid_order = na.argsort(grid_vals)
+        grid_vals = np.concatenate(grid_vals)[self._base_indices]
+        grid_order = np.argsort(grid_vals)
         # Note: grid_vals is still unordered
-        grid_ids = na.unique(grid_vals)
-        xi = na.concatenate(xi)[self._base_indices][grid_order]
-        yi = na.concatenate(yi)[self._base_indices][grid_order]
-        zi = na.concatenate(zi)[self._base_indices][grid_order]
-        bc = na.bincount(grid_vals)
+        grid_ids = np.unique(grid_vals)
+        xi = np.concatenate(xi)[self._base_indices][grid_order]
+        yi = np.concatenate(yi)[self._base_indices][grid_order]
+        zi = np.concatenate(zi)[self._base_indices][grid_order]
+        bc = np.bincount(grid_vals)
         splits = []
         for i,v in enumerate(bc):
             if v > 0: splits.append(v)
-        splits = na.add.accumulate(splits)
-        xis, yis, zis = [na.array_split(aa, splits) for aa in [xi,yi,zi]]
+        splits = np.add.accumulate(splits)
+        xis, yis, zis = [np.array_split(aa, splits) for aa in [xi,yi,zi]]
         self._indices = {}
         h = self._base_region.pf.h
         for grid_id, x, y, z in itertools.izip(grid_ids, xis, yis, zis):
             # grid_id needs no offset
             ll = h.grids[grid_id].ActiveDimensions.prod() \
-               - (na.logical_not(h.grids[grid_id].child_mask)).sum()
+               - (np.logical_not(h.grids[grid_id].child_mask)).sum()
             # This means we're completely enclosed, except for child masks
             if x.size == ll:
                 self._indices[grid_id] = None
             else:
                 # This will slow things down a bit, but conserve memory
                 self._indices[grid_id] = \
-                    na.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
+                    np.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
                 self._indices[grid_id][(x,y,z)] = True
         self._grids = h.grids[self._indices.keys()]
 
@@ -2992,16 +2992,16 @@
         return False
 
     def _get_cut_mask(self, grid):
-        cm = na.zeros(grid.ActiveDimensions, dtype='bool')
+        cm = np.zeros(grid.ActiveDimensions, dtype='bool')
         cm[self._get_point_indices(grid, False)] = True
         return cm
 
-    __empty_array = na.array([], dtype='bool')
+    __empty_array = np.array([], dtype='bool')
     def _get_point_indices(self, grid, use_child_mask=True):
         # Yeah, if it's not true, we don't care.
         tr = self._indices.get(grid.id-grid._id_offset, self.__empty_array)
-        if tr is None: tr = na.where(grid.child_mask)
-        else: tr = na.where(tr)
+        if tr is None: tr = np.where(grid.child_mask)
+        else: tr = np.where(tr)
         return tr
 
     def __repr__(self):
@@ -3018,7 +3018,7 @@
             grid = self.pf.h.grids[g]
             if g in other._indices and g in self._indices:
                 # We now join the indices
-                ind = na.zeros(grid.ActiveDimensions, dtype='bool')
+                ind = np.zeros(grid.ActiveDimensions, dtype='bool')
                 ind[self._indices[g]] = True
                 ind[other._indices[g]] = True
                 if ind.prod() == grid.ActiveDimensions.prod(): ind = None
@@ -3056,7 +3056,7 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        point_mask = na.ones(grid.ActiveDimensions, dtype='bool')
+        point_mask = np.ones(grid.ActiveDimensions, dtype='bool')
         point_mask *= self._base_region._get_cut_mask(grid)
         for cut in self._field_cuts:
             point_mask *= eval(cut)
@@ -3076,35 +3076,35 @@
         within the cylinder will be selected.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
+        self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
         self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._refresh_data()
 
     def _get_list_of_grids(self):
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((self.pf.h.grid_corners -
+        D = np.sqrt(np.sum((self.pf.h.grid_corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
+        R = np.sqrt(D**2.0-H**2.0)
         self._grids = self.hierarchy.grids[
-            ( (na.any(na.abs(H)<self._height,axis=0))
-            & (na.any(R<self._radius,axis=0)
-            & (na.logical_not((na.all(H>0,axis=0) | (na.all(H<0, axis=0)))) )
+            ( (np.any(np.abs(H)<self._height,axis=0))
+            & (np.any(R<self._radius,axis=0)
+            & (np.logical_not((np.all(H>0,axis=0) | (np.all(H<0, axis=0)))) )
             ) ) ]
         self._grids = self.hierarchy.grids
 
     def _is_fully_enclosed(self, grid):
         corners = grid._corners.reshape((8,3,1))
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((corners -
+        D = np.sqrt(np.sum((corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
-        return (na.all(na.abs(H) < self._height, axis=0) \
-            and na.all(R < self._radius, axis=0))
+        R = np.sqrt(D**2.0-H**2.0)
+        return (np.all(np.abs(H) < self._height, axis=0) \
+            and np.all(R < self._radius, axis=0))
 
     @cache_mask
     def _get_cut_mask(self, grid):
@@ -3115,13 +3115,13 @@
               + grid['y'] * self._norm_vec[1] \
               + grid['z'] * self._norm_vec[2] \
               + self._d
-            d = na.sqrt(
+            d = np.sqrt(
                 (grid['x'] - self.center[0])**2.0
               + (grid['y'] - self.center[1])**2.0
               + (grid['z'] - self.center[2])**2.0
                 )
-            r = na.sqrt(d**2.0-h**2.0)
-            cm = ( (na.abs(h) <= self._height)
+            r = np.sqrt(d**2.0-h**2.0)
+            cm = ( (np.abs(h) <= self._height)
                  & (r <= self._radius))
         return cm
 
@@ -3138,8 +3138,8 @@
         describe the box.  No checks are done to ensure that the box satisfies
         a right-hand rule, but if it doesn't, behavior is undefined.
         """
-        self.origin = na.array(origin)
-        self.box_vectors = na.array(box_vectors, dtype='float64')
+        self.origin = np.array(origin)
+        self.box_vectors = np.array(box_vectors, dtype='float64')
         self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
         center = origin + 0.5*self.box_vectors.sum(axis=0)
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
@@ -3150,11 +3150,11 @@
         xv = self.box_vectors[0,:]
         yv = self.box_vectors[1,:]
         zv = self.box_vectors[2,:]
-        self._x_vec = xv / na.sqrt(na.dot(xv, xv))
-        self._y_vec = yv / na.sqrt(na.dot(yv, yv))
-        self._z_vec = zv / na.sqrt(na.dot(zv, zv))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._z_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = xv / np.sqrt(np.dot(xv, xv))
+        self._y_vec = yv / np.sqrt(np.dot(yv, yv))
+        self._z_vec = zv / np.sqrt(np.dot(zv, zv))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._z_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
@@ -3172,7 +3172,7 @@
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
             
 
@@ -3185,7 +3185,7 @@
     def _get_cut_mask(self, grid):
         if self._is_fully_enclosed(grid):
             return True
-        pm = na.zeros(grid.ActiveDimensions, dtype='int32')
+        pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
                               self._rot_mat, grid.LeftEdge, 
                               grid.RightEdge, grid.dds, pm, 0)
@@ -3228,7 +3228,7 @@
                                                            self.right_edge)
 
     def _is_fully_enclosed(self, grid):
-        return na.all( (grid._corners <= self.right_edge)
+        return np.all( (grid._corners <= self.right_edge)
                      & (grid._corners >= self.left_edge))
 
     @cache_mask
@@ -3282,10 +3282,10 @@
 
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
-        self.right_edge = na.array(right_edge)
+        self.left_edge = np.array(left_edge)
+        self.right_edge = np.array(right_edge)
         self._refresh_data()
-        self.offsets = (na.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
+        self.offsets = (np.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
                         (self.pf.domain_right_edge -
                          self.pf.domain_left_edge)[:,None,None,None])\
                        .transpose().reshape(27,3) # cached and in order
@@ -3300,7 +3300,7 @@
                            self.left_edge[1]+off_y,self.left_edge[2]+off_z]
             region_right = [self.right_edge[0]+off_x,
                             self.right_edge[1]+off_y,self.right_edge[2]+off_z]
-            if (na.all((grid._corners <= region_right) &
+            if (np.all((grid._corners <= region_right) &
                        (grid._corners >= region_left))):
                 return True
         return False
@@ -3310,7 +3310,7 @@
         if self._is_fully_enclosed(grid):
             return True
         else:
-            cm = na.zeros(grid.ActiveDimensions,dtype='bool')
+            cm = np.zeros(grid.ActiveDimensions,dtype='bool')
             dxp, dyp, dzp = self._dx_pad * grid.dds
             for off_x, off_y, off_z in self.offsets:
                 cm = cm | ( (grid['x'] - dxp + off_x < self.right_edge[0])
@@ -3350,7 +3350,7 @@
         Child cells are not returned.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._grids = na.array(grid_list)
+        self._grids = np.array(grid_list)
         self.grid_list = self._grids
 
     def _get_list_of_grids(self):
@@ -3361,13 +3361,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 class AMRMaxLevelCollection(AMR3DData):
@@ -3394,13 +3394,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask and grid.Level < self.max_level:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 
@@ -3441,14 +3441,14 @@
         # Now we sort by level
         grids = grids.tolist()
         grids.sort(key=lambda x: (x.Level, x.LeftEdge[0], x.LeftEdge[1], x.LeftEdge[2]))
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = na.abs(grid._corners - self.center)
-        r = na.minimum(r, na.abs(self.DW[None,:]-r))
-        corner_radius = na.sqrt((r**2.0).sum(axis=1))
-        return na.all(corner_radius <= self.radius)
+        r = np.abs(grid._corners - self.center)
+        r = np.minimum(r, np.abs(self.DW[None,:]-r))
+        corner_radius = np.sqrt((r**2.0).sum(axis=1))
+        return np.all(corner_radius <= self.radius)
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3477,7 +3477,7 @@
         can define a ellipsoid of any proportion.  Only cells whose centers are
         within the ellipsoid will be selected.
         """
-        AMR3DData.__init__(self, na.array(center), fields, pf, **kwargs)
+        AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
@@ -3488,12 +3488,12 @@
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0[1] / e0[0])
+        t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
         RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
         r1 = (e0 * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
         given the tilt about the x axis when e0 was aligned 
@@ -3505,7 +3505,7 @@
         e1 = ((0, 1, 0) * RX).sum(axis = 1)
         e1 = (e1 * RY).sum(axis = 1)
         e1 = (e1 * RZ).sum(axis = 1)
-        e2 = na.cross(e0, e1)
+        e2 = np.cross(e0, e1)
 
         self._e1 = e1
         self._e2 = e2
@@ -3535,7 +3535,7 @@
                                   x.LeftEdge[0], \
                                   x.LeftEdge[1], \
                                   x.LeftEdge[2]))
-        self._grids = na.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype = 'object')
 
     def _is_fully_enclosed(self, grid):
         """
@@ -3545,18 +3545,18 @@
         vr = (grid._corners - self.center)
         # 3 possible cases of locations taking periodic BC into account
         # just listing the components, find smallest later
-        dotarr=na.array([vr, vr + self.DW, vr - self.DW])
+        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
         # these vrdote# finds the product of vr components with e#
         # square the results
         # find the smallest
         # sums it
-        vrdote0_2 = (na.multiply(dotarr, self._e0)**2).min(axis \
+        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
+        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
+        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        return na.all(vrdote0_2 / self._A**2 + \
+        return np.all(vrdote0_2 / self._A**2 + \
                       vrdote1_2 / self._B**2 + \
                       vrdote2_2 / self._C**2 <=1.0)
 
@@ -3572,21 +3572,21 @@
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = na.zeros(grid["x"].shape, dtype = 'float64')
+        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
         dim = grid["x"].shape
         # need this to take into account non-cube root grid tiles
-        dot_evec = na.zeros([3, dim[0], dim[1], dim[2]])
+        dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
         for i, ax in enumerate('xyz'):
             # distance to center
             ar  = grid[ax]-self.center[i]
             # cases to take into account periodic BC
-            case = na.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
             # find which of the 3 cases is smallest in magnitude
-            index = na.abs(case).argmin(axis = 0)
+            index = np.abs(case).argmin(axis = 0)
             # restrict distance to only the smallest cases
-            vec = na.choose(index, case)
+            vec = np.choose(index, case)
             # sum up to get the dot product with e_vectors
-            dot_evec += na.array([vec * self._e0[i], \
+            dot_evec += np.array([vec * self._e0[i], \
                                   vec * self._e1[i], \
                                   vec * self._e2[i]])
         # Calculate the eqn of ellipsoid, if it is inside
@@ -3627,22 +3627,22 @@
         """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = na.array(dims,dtype='int32')
+        self.ActiveDimensions = np.array(dims,dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar
-        self.global_startindex = na.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self, buffer = 0.0):
         if self._grids is not None: return
-        if na.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
-           na.any(self.right_edge + buffer > self.pf.domain_right_edge):
+        if np.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
+           np.any(self.right_edge + buffer > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
                             self.right_edge + buffer, self.level)
@@ -3650,14 +3650,14 @@
             grids,ind = self.pf.hierarchy.get_box_grids_below_level(
                 self.left_edge - buffer,
                 self.right_edge + buffer, self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind])
         self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
     def _refresh_data(self):
         AMR3DData._refresh_data(self)
-        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dx'] = self.dds[0] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dy'] = self.dds[1] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dz'] = self.dds[2] * np.ones(self.ActiveDimensions, dtype='float64')
 
     def get_data(self, fields=None):
         if self._grids is None:
@@ -3677,7 +3677,7 @@
                 except NeedsOriginalGrid, ngt_exception:
                     pass
             obtain_fields.append(field)
-            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
+            self[field] = np.zeros(self.ActiveDimensions, dtype='float64') -999
         if len(obtain_fields) == 0: return
         mylog.debug("Getting fields %s from %s possible grids",
                    obtain_fields, len(self._grids))
@@ -3689,9 +3689,9 @@
             count -= self._get_data_from_grid(grid, obtain_fields)
             if count <= 0: break
         if self._use_pbar: pbar.finish()
-        if count > 0 or na.any(self[obtain_fields[0]] == -999):
+        if count > 0 or np.any(self[obtain_fields[0]] == -999):
             # and self.dx < self.hierarchy.grids[0].dx:
-            n_bad = na.where(self[obtain_fields[0]]==-999)[0].size
+            n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
             
@@ -3737,7 +3737,7 @@
         g_fields = []
         for field in fields:
             if not grid.has_key(field): grid[field] = \
-               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
+               np.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
             g_fields.append(grid[field])
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
@@ -3832,7 +3832,7 @@
         if self.level > 0:
             for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]
-                if na.any(self[field] == -999):
+                if np.any(self[field] == -999):
                     # and self.dx < self.hierarchy.grids[0].dx:
                     n_bad = (self[field]==-999).sum()
                     mylog.error("Covering problem: %s cells are uncovered", n_bad)
@@ -3846,35 +3846,35 @@
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint(LL / dx).astype('int64') - 1
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
-            self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
+            self.global_startindex = np.array(np.floor(LL/ dx), dtype='int64')
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
-        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
+        dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
+        output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
 
         for field in fields:
-            output_field = na.zeros(output_dims, dtype="float64")
+            output_field = np.zeros(output_dims, dtype="float64")
             output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
@@ -3944,7 +3944,7 @@
             self._all_regions.append(item)
             # So cut_masks don't get messed up.
             item._boolean_touched = True
-        self._all_regions = na.unique(self._all_regions)
+        self._all_regions = np.unique(self._all_regions)
     
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
@@ -3969,7 +3969,7 @@
                 # The whole grid is in the hybrid region if a) its cut_mask
                 # in the original region is identical to the new one and b)
                 # the original region cut_mask is all ones.
-                if (local == na.bitwise_and(overall, local)).all() and \
+                if (local == np.bitwise_and(overall, local)).all() and \
                         (local == True).all():
                     self._all_overlap.append(grid)
                     continue
@@ -3997,7 +3997,7 @@
         return (grid in self._all_overlap)
 
     def _get_list_of_grids(self):
-        self._grids = na.array(self._some_overlap + self._all_overlap,
+        self._grids = np.array(self._some_overlap + self._all_overlap,
             dtype='object')
 
     def _get_cut_mask(self, grid, field=None):
@@ -4054,13 +4054,13 @@
             if i == 0: continue
             if item == "AND":
                 # So, the next item in level_masks we want to AND.
-                na.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
             if item == "NOT":
                 # It's convenient to remember that NOT == AND NOT
-                na.bitwise_and(this_cut_mask, na.invert(level_masks[i+1]),
+                np.bitwise_and(this_cut_mask, np.invert(level_masks[i+1]),
                     this_cut_mask)
             if item == "OR":
-                na.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
         if not isinstance(grid, FakeGridForParticles):
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -100,7 +100,7 @@
             if not iterable(rv): rv = (rv,)
             for i in range(self.n_ret): self.retvals[i].append(rv[i])
             g.clear_data()
-        self.retvals = [na.array(self.retvals[i]) for i in range(self.n_ret)]
+        self.retvals = [np.array(self.retvals[i]) for i in range(self.n_ret)]
         return self.c_func(self._data_source, *self.retvals)
 
     def _finalize_parallel(self):
@@ -110,7 +110,7 @@
         # our long axis is the first one!
         rv = []
         for my_list in self.retvals:
-            data = na.array(my_list).transpose()
+            data = np.array(my_list).transpose()
             rv.append(self.comm.par_combine_object(data,
                         datatype="array", op="cat").transpose())
         self.retvals = rv
@@ -185,7 +185,7 @@
 
     return x,y,z, den
 def _combCenterOfMass(data, x,y,z, den):
-    return na.array([x.sum(), y.sum(), z.sum()])/den.sum()
+    return np.array([x.sum(), y.sum(), z.sum()])/den.sum()
 add_quantity("CenterOfMass", function=_CenterOfMass,
              combine_function=_combCenterOfMass, n_ret = 4)
 
@@ -218,7 +218,7 @@
     xv = xv.sum()/w
     yv = yv.sum()/w
     zv = zv.sum()/w
-    return na.array([xv, yv, zv])
+    return np.array([xv, yv, zv])
 add_quantity("BulkVelocity", function=_BulkVelocity,
              combine_function=_combBulkVelocity, n_ret=4)
 
@@ -249,9 +249,9 @@
     return [j_mag]
 
 def _combAngularMomentumVector(data, j_mag):
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     L_vec = j_mag.sum(axis=0)
-    L_vec_norm = L_vec / na.sqrt((L_vec**2.0).sum())
+    L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
     return L_vec_norm
 add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
              combine_function=_combAngularMomentumVector, n_ret=1)
@@ -268,17 +268,17 @@
     amx = data["SpecificAngularMomentumX"]*data["CellMassMsun"]
     amy = data["SpecificAngularMomentumY"]*data["CellMassMsun"]
     amz = data["SpecificAngularMomentumZ"]*data["CellMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
     weight=data["CellMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
 def _combBaryonSpinParameter(data, j_mag, m_enc, e_term_pre, weight):
     # Because it's a vector field, we have to ensure we have enough dimensions
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     W = weight.sum()
     M = m_enc.sum()
-    J = na.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
-    E = na.sqrt(e_term_pre.sum()/W)
+    J = np.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
+    E = np.sqrt(e_term_pre.sum()/W)
     G = 6.67e-8 # cm^3 g^-1 s^-2
     spin = J * E / (M*1.989e33*G)
     return spin
@@ -292,11 +292,11 @@
     """
     m_enc = data["CellMassMsun"].sum() + data["ParticleMassMsun"].sum()
     amx = data["ParticleSpecificAngularMomentumX"]*data["ParticleMassMsun"]
-    if amx.size == 0: return (na.zeros((3,), dtype='float64'), m_enc, 0, 0)
+    if amx.size == 0: return (np.zeros((3,), dtype='float64'), m_enc, 0, 0)
     amy = data["ParticleSpecificAngularMomentumY"]*data["ParticleMassMsun"]
     amz = data["ParticleSpecificAngularMomentumZ"]*data["ParticleMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["ParticleMassMsun"]
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["ParticleMassMsun"]
                        *data["ParticleVelocityMagnitude"]**2.0)
     weight=data["ParticleMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
@@ -360,15 +360,15 @@
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
         kinetic += thermal
     if periodic_test:
-        kinetic = na.ones_like(kinetic)
+        kinetic = np.ones_like(kinetic)
     # Gravitational potential energy
     # We only divide once here because we have velocity in cgs, but radius is
     # in code.
     G = 6.67e-8 / data.convert("cm") # cm^3 g^-1 s^-2
     # Check for periodicity of the clump.
-    two_root = 2. / na.array(data.pf.domain_dimensions)
+    two_root = 2. / np.array(data.pf.domain_dimensions)
     domain_period = data.pf.domain_right_edge - data.pf.domain_left_edge
-    periodic = na.array([0., 0., 0.])
+    periodic = np.array([0., 0., 0.])
     for i,dim in enumerate(["x", "y", "z"]):
         sorted = data[dim][data[dim].argsort()]
         # If two adjacent values are different by (more than) two root grid
@@ -380,7 +380,7 @@
             # define the gap from the right boundary, which we'll use for the
             # periodic adjustment later.
             sel = (diff >= two_root[i])
-            index = na.min(na.nonzero(sel))
+            index = np.min(np.nonzero(sel))
             # The last addition term below ensures that the data makes a full
             # wrap-around.
             periodic[i] = data.pf.domain_right_edge[i] - sorted[index + 1] + \
@@ -402,26 +402,26 @@
             local_data[dim] += periodic[i]
             local_data[dim] %= domain_period[i]
     if periodic_test:
-        local_data["CellMass"] = na.ones_like(local_data["CellMass"])
+        local_data["CellMass"] = np.ones_like(local_data["CellMass"])
     import time
     t1 = time.time()
     if treecode:
         # Calculate the binding energy using the treecode method.
         # Faster but less accurate.
         # The octree doesn't like uneven root grids, so we will make it cubical.
-        root_dx = 1./na.array(data.pf.domain_dimensions).astype('float64')
-        left = min([na.amin(local_data['x']), na.amin(local_data['y']),
-            na.amin(local_data['z'])])
-        right = max([na.amax(local_data['x']), na.amax(local_data['y']),
-            na.amax(local_data['z'])])
-        cover_min = na.array([left, left, left])
-        cover_max = na.array([right, right, right])
+        root_dx = 1./np.array(data.pf.domain_dimensions).astype('float64')
+        left = min([np.amin(local_data['x']), np.amin(local_data['y']),
+            np.amin(local_data['z'])])
+        right = max([np.amax(local_data['x']), np.amax(local_data['y']),
+            np.amax(local_data['z'])])
+        cover_min = np.array([left, left, left])
+        cover_max = np.array([right, right, right])
         # Fix the coverage to match to root grid cell left 
         # edges for making indexes.
         cover_min = cover_min - cover_min % root_dx
         cover_max = cover_max - cover_max % root_dx
-        cover_imin = (cover_min * na.array(data.pf.domain_dimensions)).astype('int64')
-        cover_imax = (cover_max * na.array(data.pf.domain_dimensions) + 1).astype('int64')
+        cover_imin = (cover_min * np.array(data.pf.domain_dimensions)).astype('int64')
+        cover_imax = (cover_max * np.array(data.pf.domain_dimensions) + 1).astype('int64')
         cover_ActiveDimensions = cover_imax - cover_imin
         # Create the octree with these dimensions.
         # One value (mass) with incremental=True.
@@ -429,12 +429,12 @@
         #print 'here', cover_ActiveDimensions
         # Now discover what levels this data comes from, not assuming
         # symmetry.
-        dxes = na.unique(data['dx']) # unique returns a sorted array,
-        dyes = na.unique(data['dy']) # so these will all have the same
-        dzes = na.unique(data['dz']) # order.
+        dxes = np.unique(data['dx']) # unique returns a sorted array,
+        dyes = np.unique(data['dy']) # so these will all have the same
+        dzes = np.unique(data['dz']) # order.
         # We only need one dim to figure out levels, we'll use x.
         dx = 1./data.pf.domain_dimensions[0]
-        levels = (na.log(dx / dxes) / na.log(data.pf.refine_by)).astype('int')
+        levels = (np.log(dx / dxes) / np.log(data.pf.refine_by)).astype('int')
         lsort = levels.argsort()
         levels = levels[lsort]
         dxes = dxes[lsort]
@@ -447,9 +447,9 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = na.array([local_data["CellMass"][sel]], order='F')
+	    vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
-               na.ones_like(thisx).astype('float64'), treecode = 1)
+               np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)
         mylog.info("Using a treecode to find gravitational energy for %d cells." % local_data['x'].size)
@@ -484,7 +484,7 @@
     m = (data['CellMass'] * mass_scale_factor).astype('float32')
     assert(m.size > bsize)
 
-    gsize=int(na.ceil(float(m.size)/bsize))
+    gsize=int(np.ceil(float(m.size)/bsize))
     assert(gsize > 16)
 
     # Now the tedious process of rescaling our values...
@@ -492,7 +492,7 @@
     x = ((data['x'] - data['x'].min()) * length_scale_factor).astype('float32')
     y = ((data['y'] - data['y'].min()) * length_scale_factor).astype('float32')
     z = ((data['z'] - data['z'].min()) * length_scale_factor).astype('float32')
-    p = na.zeros(z.shape, dtype='float32')
+    p = np.zeros(z.shape, dtype='float32')
     
     x_gpu = cuda.mem_alloc(x.size * x.dtype.itemsize)
     y_gpu = cuda.mem_alloc(y.size * y.dtype.itemsize)
@@ -569,7 +569,7 @@
          block=(bsize,1,1), grid=(gsize, gsize), time_kernel=True)
     cuda.memcpy_dtoh(p, p_gpu)
     p1 = p.sum()
-    if na.any(na.isnan(p)): raise ValueError
+    if np.any(np.isnan(p)): raise ValueError
     return p1 * (length_scale_factor / (mass_scale_factor**2.0))
 
 def _Extrema(data, fields, non_zero = False, filter=None):
@@ -613,9 +613,9 @@
                 maxs.append(-1e90)
     return len(fields), mins, maxs
 def _combExtrema(data, n_fields, mins, maxs):
-    mins, maxs = na.atleast_2d(mins, maxs)
+    mins, maxs = np.atleast_2d(mins, maxs)
     n_fields = mins.shape[1]
-    return [(na.min(mins[:,i]), na.max(maxs[:,i])) for i in range(n_fields)]
+    return [(np.min(mins[:,i]), np.max(maxs[:,i])) for i in range(n_fields)]
 add_quantity("Extrema", function=_Extrema, combine_function=_combExtrema,
              n_ret=3)
 
@@ -644,14 +644,14 @@
     """
     ma, maxi, mx, my, mz, mg = -1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        maxi = na.argmax(data[field])
+        maxi = np.argmax(data[field])
         ma = data[field][maxi]
         mx, my, mz = [data[ax][maxi] for ax in 'xyz']
         mg = data["GridIndices"][maxi]
     return (ma, maxi, mx, my, mz, mg)
 def _combMaxLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmax(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmax(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MaxLocation", function=_MaxLocation,
              combine_function=_combMaxLocation, n_ret = 6)
@@ -663,14 +663,14 @@
     """
     ma, mini, mx, my, mz, mg = 1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        mini = na.argmin(data[field])
+        mini = np.argmin(data[field])
         ma = data[field][mini]
         mx, my, mz = [data[ax][mini] for ax in 'xyz']
         mg = data["GridIndices"][mini]
     return (ma, mini, mx, my, mz, mg)
 def _combMinLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmin(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmin(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MinLocation", function=_MinLocation,
              combine_function=_combMinLocation, n_ret = 6)
@@ -691,8 +691,8 @@
         totals.append(data[field].sum())
     return len(fields), totals
 def _combTotalQuantity(data, n_fields, totals):
-    totals = na.atleast_2d(totals)
+    totals = np.atleast_2d(totals)
     n_fields = totals.shape[1]
-    return [na.sum(totals[:,i]) for i in range(n_fields)]
+    return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -30,7 +30,7 @@
 import copy
 import itertools
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -151,8 +151,8 @@
         self.ActiveDimensions = [nd,nd,nd]
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
-        self.dds = na.ones(3, "float64")
-        self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
+        self.dds = np.ones(3, "float64")
+        self['dx'] = self['dy'] = self['dz'] = np.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
 
@@ -161,8 +161,8 @@
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
-            pf.domain_left_edge = na.zeros(3, 'float64')
-            pf.domain_right_edge = na.ones(3, 'float64')
+            pf.domain_left_edge = np.zeros(3, 'float64')
+            pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3
         self.pf = pf
 
@@ -180,12 +180,12 @@
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd, nd, nd), dtype='float64')
-                + 1e-4*na.random.random((nd, nd, nd)))
+                lambda: np.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*np.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd * nd * nd), dtype='float64')
-                + 1e-4*na.random.random((nd * nd * nd)))
+                lambda: np.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*np.random.random((nd * nd * nd)))
 
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
@@ -215,13 +215,13 @@
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(field_name) and FI[field_name].particle_type:
             self.requested.append(field_name)
-            return na.ones(self.NumberOfParticles)
+            return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
-            return na.random.random(3) * 1e-2
+            return np.random.random(3) * 1e-2
         else:
             return 0.0
     _num_ghost_zones = 0


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -27,7 +27,7 @@
 import pdb
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
@@ -79,11 +79,11 @@
         if self.Parent == None:
             left = self.LeftEdge - self.pf.domain_left_edge
             start_index = left / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
 
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
+                       np.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
         self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -184,15 +184,15 @@
                 if self.pf.field_info[field].particle_type and \
                    self.NumberOfParticles == 0:
                     # because this gets upcast to float
-                    self[field] = na.array([],dtype='int64')
+                    self[field] = np.array([],dtype='int64')
                     return self.field_data[field]
                 try:
                     temp = self.hierarchy.io.pop(self, field)
-                    self[field] = na.multiply(temp, conv_factor, temp)
+                    self[field] = np.multiply(temp, conv_factor, temp)
                 except self.hierarchy.io._read_exception, exc:
                     if field in self.pf.field_info:
                         if self.pf.field_info[field].not_in_all:
-                            self[field] = na.zeros(self.ActiveDimensions, dtype='float64')
+                            self[field] = np.zeros(self.ActiveDimensions, dtype='float64')
                         else:
                             raise
                     else: raise
@@ -209,14 +209,14 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
-        return na.array([ # Unroll!
+        return np.array([ # Unroll!
             [self.LeftEdge[0],  self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
@@ -237,9 +237,9 @@
         x = x_dict[axis]
         y = y_dict[axis]
         cond = self.RightEdge[x] >= LE[:,x]
-        cond = na.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
-        cond = na.logical_and(cond, self.RightEdge[y] >= LE[:,y])
-        cond = na.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
+        cond = np.logical_and(cond, self.RightEdge[y] >= LE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
         return cond
 
     def __repr__(self):
@@ -278,19 +278,19 @@
         self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
     def __len__(self):
-        return na.prod(self.ActiveDimensions)
+        return np.prod(self.ActiveDimensions)
 
     def find_max(self, field):
         """ Returns value, index of maximum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmax()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def find_min(self, field):
         """ Returns value, index of minimum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmin()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
@@ -369,8 +369,8 @@
     def __fill_child_mask(self, child, mask, tofill):
         rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = na.maximum(0, cgi / rf - gi)
-        endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],
@@ -383,7 +383,7 @@
         thus, where higher resolution data is available).
 
         """
-        self._child_mask = na.ones(self.ActiveDimensions, 'int32')
+        self._child_mask = np.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
             self.__fill_child_mask(child, self._child_mask, 0)
         if self.OverlappingSiblings is not None:
@@ -398,7 +398,7 @@
         and otherwise has the ID of the grid that resides there.
 
         """
-        self._child_index_mask = na.zeros(self.ActiveDimensions, 'int32') - 1
+        self._child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
             self.__fill_child_mask(child, self._child_index_mask,
                                    child.id)
@@ -425,8 +425,8 @@
         Creates self.coords, which is of dimensions (3, ActiveDimensions)
 
         """
-        ind = na.indices(self.ActiveDimensions)
-        left_shaped = na.reshape(self.LeftEdge, (3, 1, 1, 1))
+        ind = np.indices(self.ActiveDimensions)
+        left_shaped = np.reshape(self.LeftEdge, (3, 1, 1, 1))
         self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
 
     child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
@@ -462,7 +462,7 @@
         return cube
 
     def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
             of = self[field]
@@ -474,9 +474,9 @@
             new_field[1:,:-1,1:] += of
             new_field[1:,1:,:-1] += of
             new_field[1:,1:,1:] += of
-            na.multiply(new_field, 0.125, new_field)
+            np.multiply(new_field, 0.125, new_field)
             if self.pf.field_info[field].take_log:
-                new_field = na.log10(new_field)
+                new_field = np.log10(new_field)
 
             new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
             new_field[:,:, 0]  = 2.0*new_field[:,:,1] - new_field[:,:,2]
@@ -486,17 +486,17 @@
             new_field[0,:,:]  = 2.0*new_field[1,:,:] - new_field[2,:,:]
 
             if self.pf.field_info[field].take_log:
-                na.power(10.0, new_field, new_field)
+                np.power(10.0, new_field, new_field)
         else:
             cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            na.multiply(new_field, 0.125, new_field)
+            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
+            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
+            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
+            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
+            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
+            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
+            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
+            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
+            np.multiply(new_field, 0.125, new_field)
 
         return new_field


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import string, re, gc, time, cPickle, pdb
 import weakref
 
@@ -116,11 +116,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _setup_classes(self, dd):
         # Called by subclass
@@ -172,7 +172,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -361,13 +361,13 @@
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]
         self.level_stats['numcells'] = [0 for i in range(MAXLEVEL)]
         for level in xrange(self.max_level+1):
-            self.level_stats[level]['numgrids'] = na.sum(self.grid_levels == level)
+            self.level_stats[level]['numgrids'] = np.sum(self.grid_levels == level)
             li = (self.grid_levels[:,0] == level)
             self.level_stats[level]['numcells'] = self.grid_dimensions[li,:].prod(axis=1).sum()
 
     @property
     def grid_corners(self):
-        return na.array([
+        return np.array([
           [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.lib import \
@@ -38,15 +38,15 @@
         along *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the two edges, we win!
-        na.choose(na.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
-        na.choose(na.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
+        np.choose(np.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_max(self, field, finest_levels = 3):
@@ -70,18 +70,18 @@
         max_val, maxi, mx, my, mz, mg = \
             source.quantities["MaxLocation"]( field, lazy_reader=True)
         max_grid = self.grids[mg]
-        mc = na.unravel_index(maxi, max_grid.ActiveDimensions)
+        mc = np.unravel_index(maxi, max_grid.ActiveDimensions)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s %s", \
               max_val, mx, my, mz, max_grid, max_grid.Level, mc)
         self.parameters["Max%sValue" % (field)] = max_val
         self.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
-        return max_grid, mc, max_val, na.array((mx,my,mz), dtype='float64')
+        return max_grid, mc, max_val, np.array((mx,my,mz), dtype='float64')
 
     def find_min(self, field):
         """
         Returns (value, center) of location of minimum for a given field
         """
-        gI = na.where(self.grid_levels >= 0) # Slow but pedantic
+        gI = np.where(self.grid_levels >= 0) # Slow but pedantic
         minVal = 1e100
         for grid in self.grids[gI[0]]:
             mylog.debug("Checking %s (level %s)", grid.id, grid.Level)
@@ -90,7 +90,7 @@
                 minCoord = coord
                 minVal = val
                 minGrid = grid
-        mc = na.array(minCoord)
+        mc = np.array(minCoord)
         pos=minGrid.get_position(mc)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", \
               minVal, pos[0], pos[1], pos[2], minGrid, minGrid.Level)
@@ -103,11 +103,11 @@
         """
         Returns the (objects, indices) of grids containing an (x,y,z) point
         """
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         for i in xrange(len(coord)):
-            na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-            na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-        ind = na.where(mask == 1)
+            np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+            np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_field_value_at_point(self, fields, coord):
@@ -134,7 +134,7 @@
         # Get the most-refined grid at this coordinate.
         this = self.find_point(coord)[0][-1]
         cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions
-        mark = na.zeros(3).astype('int')
+        mark = np.zeros(3).astype('int')
         # Find the index for the cell containing this point.
         for dim in xrange(len(coord)):
             mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim])
@@ -151,15 +151,15 @@
         *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the edges, we win!
-        #ind = na.where( na.logical_and(self.grid_right_edge[:,axis] > coord, \
+        #ind = np.where( np.logical_and(self.grid_right_edge[:,axis] > coord, \
                                        #self.grid_left_edge[:,axis] < coord))
-        na.choose(na.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_sphere_grids(self, center, radius):
@@ -167,29 +167,29 @@
         Returns objects, indices of grids within a sphere
         """
         centers = (self.grid_right_edge + self.grid_left_edge)/2.0
-        long_axis = na.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
-        t = na.abs(centers - center)
+        long_axis = np.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
+        t = np.abs(centers - center)
         DW = self.parameter_file.domain_right_edge \
            - self.parameter_file.domain_left_edge
-        na.minimum(t, na.abs(DW-t), t)
-        dist = na.sqrt(na.sum((t**2.0), axis=1))
-        gridI = na.where(dist < (radius + long_axis))
+        np.minimum(t, np.abs(DW-t), t)
+        dist = np.sqrt(np.sum((t**2.0), axis=1))
+        gridI = np.where(dist < (radius + long_axis))
         return self.grids[gridI], gridI
 
     def get_box_grids(self, left_edge, right_edge):
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = na.where((na.all(self.grid_right_edge > left_edge, axis=1)
-                         & na.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
+                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -203,26 +203,26 @@
                     nre = nle + db
                     g, gi = self.get_box_grids(nle, nre)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_box_grids_below_level(self, left_edge, right_edge, level,
                                   min_level = 0):
         # We discard grids if they are ABOVE the level
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
                             self.grid_levels.astype("int32"), mask, min_level)
         mask = mask.astype("bool")
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_periodic_box_grids_below_level(self, left_edge, right_edge, level,
                                            min_level = 0):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -237,5 +237,5 @@
                     g, gi = self.get_box_grids_below_level(nle, nre,
                                             level, min_level)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -86,7 +86,7 @@
         for field in fields:
             f = self.pf.field_info[field]
             to_add = f.get_dependencies(pf = self.pf).requested
-            to_add = list(na.unique(to_add))
+            to_add = list(np.unique(to_add))
             if len(to_add) != 1: raise KeyError
             fields_to_read += to_add
             if f._particle_convert_function is None:
@@ -95,9 +95,9 @@
                 func = f.particle_convert
             func = particle_converter(func)
             conv_factors.append(
-              na.fromiter((func(g) for g in grid_list),
+              np.fromiter((func(g) for g in grid_list),
                           count=len(grid_list), dtype='float64'))
-        conv_factors = na.array(conv_factors).transpose()
+        conv_factors = np.array(conv_factors).transpose()
         self.conv_factors = conv_factors
         rvs = self.pf.h.io._read_particles(
             fields_to_read, rtype, args, grid_list, count_list,
@@ -115,9 +115,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64') 
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64') 
-        args = (na.array(self.left_edge), na.array(self.right_edge), 
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64') 
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64') 
+        args = (np.array(self.left_edge), np.array(self.right_edge), 
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
@@ -140,9 +140,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64')
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64')
-        return (1, (na.array(self.center, dtype='float64'), self.radius,
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64')
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64')
+        return (1, (np.array(self.center, dtype='float64'), self.radius,
             1, DLE, DRE))
 
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
@@ -156,8 +156,8 @@
         ParticleIOHandler.__init__(self, pf, source)
     
     def _get_args(self):
-        args = (na.array(self.center, dtype='float64'),
-                na.array(self.normal, dtype='float64'),
+        args = (np.array(self.center, dtype='float64'),
+                np.array(self.normal, dtype='float64'),
                 self.radius, self.height)
         return (2, args)
         


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -25,7 +25,7 @@
 from yt.utilities.lib import sample_field_at_positions
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import h5py
 
 class ParticleTrajectoryCollection(object) :
@@ -112,16 +112,16 @@
         for pf in self.pfs :
             dd = pf.h.all_data()
             newtags = dd["particle_index"].astype("int")
-            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)) :
                 print "Not all requested particle ids contained in this file!"
                 raise IndexError
-            mask = na.in1d(newtags, indices, assume_unique=True)
-            sorts = na.argsort(newtags[mask])
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
             self.masks.append(mask)            
             self.sorts.append(sorts)
             self.times.append(pf.current_time)
 
-        self.times = na.array(self.times)
+        self.times = np.array(self.times)
 
         # Set up the derived field list and the particle field list
         # so that if the requested field is a particle field, we'll
@@ -226,7 +226,7 @@
         
         if not self.field_data.has_key(field):
             
-            particles = na.empty((0))
+            particles = np.empty((0))
 
             step = int(0)
                 
@@ -238,13 +238,13 @@
 
                     dd = pf.h.all_data()
                     pfield = dd[field][mask]
-                    particles = na.append(particles, pfield[sort])
+                    particles = np.append(particles, pfield[sort])
 
                 else :
 
                     # This is hard... must loop over grids
 
-                    pfield = na.zeros((self.num_indices))
+                    pfield = np.zeros((self.num_indices))
                     x = self["particle_position_x"][:,step]
                     y = self["particle_position_y"][:,step]
                     z = self["particle_position_z"][:,step]
@@ -258,7 +258,7 @@
                                                             grid.RightEdge,
                                                             x, y, z)
 
-                    particles = na.append(particles, pfield)
+                    particles = np.append(particles, pfield)
 
                 step += 1
                 
@@ -294,9 +294,9 @@
         >>> pl.savefig("orbit")
         """
         
-        mask = na.in1d(self.indices, (index,), assume_unique=True)
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
 
-        if not na.any(mask) :
+        if not np.any(mask) :
             print "The particle index %d is not in the list!" % (index)
             raise IndexError
 
@@ -376,7 +376,7 @@
 
         fields = [field for field in sorted(self.field_data.keys())]
         
-        fid.create_dataset("particle_indices", dtype=na.int32,
+        fid.create_dataset("particle_indices", dtype=np.int32,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -115,13 +115,13 @@
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
         #pbar.finish()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 self.__data[field][ub] /= self.__weight_data[field][ub]
                 self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
-            self["%s_std" % field] = na.sqrt(self.__std_data[field])
+            self["%s_std" % field] = np.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
         del self.__data, self.__std_data, self.__weight_data, self.__used
 
@@ -131,7 +131,7 @@
         for key in self.__data:
             my_mean[key] = self._get_empty_field()
             my_weight[key] = self._get_empty_field()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for key in self.__data:
             my_mean[key][ub] = self.__data[key][ub] / self.__weight_data[key][ub]
             my_weight[key][ub] = self.__weight_data[key][ub]
@@ -151,7 +151,7 @@
                                          accumulation, self._args, check_cut = False)
             if weight:
                 f[u] /= w[u]
-                q[u] = na.sqrt(q[u] / w[u])
+                q[u] = np.sqrt(q[u] / w[u])
             self[field] = f
             self["%s_std" % field] = q
         self["UsedBins"] = u
@@ -202,7 +202,7 @@
                 else:
                     pointI = self._data_source._get_point_indices(source)
             data.append(source[field][pointI].ravel().astype('float64'))
-        return na.concatenate(data, axis=0)
+        return np.concatenate(data, axis=0)
 
     def _fix_pickle(self):
         if isinstance(self._data_source, tuple):
@@ -235,10 +235,10 @@
 
         # Get our bins
         if log_space:
-            func = na.logspace
-            lower_bound, upper_bound = na.log10(lower_bound), na.log10(upper_bound)
+            func = np.logspace
+            lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
         else:
-            func = na.linspace
+            func = np.linspace
 
         # These are the bin *edges*
         self._bins = func(lower_bound, upper_bound, n_bins + 1)
@@ -253,7 +253,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros(self[self.bin_field].size, dtype='float64')
+        return np.zeros(self[self.bin_field].size, dtype='float64')
 
     @preserve_source_parameters
     def _bin_field(self, source, field, weight, accumulation,
@@ -263,7 +263,7 @@
         # (i.e., lazy_reader)
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -282,7 +282,7 @@
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
-            binned_field = na.add.accumulate(binned_field)
+            binned_field = np.add.accumulate(binned_field)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -293,7 +293,7 @@
             raise EmptyProfileData()
         # Truncate at boundaries.
         if self.end_collect:
-            mi = na.ones_like(source_data).astype('bool')
+            mi = np.ones_like(source_data).astype('bool')
         else:
             mi = ((source_data > self._bins.min())
                &  (source_data < self._bins.max()))
@@ -301,9 +301,9 @@
         if sd.size == 0:
             raise EmptyProfileData()
         # Stick the bins into our fixed bins, set at initialization
-        bin_indices = na.digitize(sd, self._bins)
+        bin_indices = np.digitize(sd, self._bins)
         if self.end_collect: #limit the range of values to 0 and n_bins-1
-            bin_indices = na.clip(bin_indices, 0, self.n_bins - 1)
+            bin_indices = np.clip(bin_indices, 0, self.n_bins - 1)
         else: #throw away outside values
             bin_indices -= 1
           
@@ -319,7 +319,7 @@
         elif bin_style is 'left': x = x[:-1]
         elif bin_style is 'right': x = x[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
+            if self._x_log: x=np.log10(x)
             x = 0.5*(x[:-1] + x[1:])
             if self._x_log: x=10**x
         else:
@@ -337,11 +337,11 @@
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
-        field_data = na.array(self.choose_bins(bin_style)) 
+        field_data = np.array(self.choose_bins(bin_style)) 
         if bin_style is 'both':
-            field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field] for field in fields]), axis=0)
         else: 
-            field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field][:-1] for field in fields]), axis=0)
         
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -409,18 +409,18 @@
         self.x_n_bins = x_n_bins
         self.y_n_bins = y_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])):
             mylog.error("Your min/max values for x, y have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -428,7 +428,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size), dtype='float64')
 
     @preserve_source_parameters
@@ -436,7 +436,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -456,9 +456,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -470,9 +470,9 @@
             raise EmptyProfileData()
 
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
-            mi = na.where( (source_data_x > self._x_bins.min())
+            mi = np.where( (source_data_x > self._x_bins.min())
                            & (source_data_x < self._x_bins.max())
                            & (source_data_y > self._y_bins.min())
                            & (source_data_y < self._y_bins.max()))
@@ -481,11 +481,11 @@
         if sd_x.size == 0 or sd_y.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y)
@@ -507,8 +507,8 @@
             x = x[1:]
             y = y[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             if self._x_log: x=10**x
@@ -531,7 +531,7 @@
         fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
                           + fields + ["\n"]))
         x,y = self.choose_bins(bin_style)
-        x,y = na.meshgrid(x,y)
+        x,y = np.meshgrid(x,y)
         field_data = [x.ravel(), y.ravel()]
         if bin_style is not 'both':
             field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
@@ -540,7 +540,7 @@
             field_data += [self.field_data[field].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
 
-        field_data = na.array(field_data)
+        field_data = np.array(field_data)
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -579,7 +579,7 @@
         return [self.x_bin_field, self.y_bin_field]
 
 def fix_bounds(upper, lower, logit):
-    if logit: return na.log10(upper), na.log10(lower)
+    if logit: return np.log10(upper), np.log10(lower)
     return upper, lower
 
 class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -599,7 +599,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -617,9 +617,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, used_field.astype('bool')
 
         
@@ -656,24 +656,24 @@
         self.y_n_bins = y_n_bins
         self.z_n_bins = z_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        func = {True:na.logspace, False:na.linspace}[z_log]
+        func = {True:np.logspace, False:np.linspace}[z_log]
         bounds = fix_bounds(z_lower_bound, z_upper_bound, z_log)
         self._z_bins = func(bounds[0], bounds[1], z_n_bins + 1)
         self[z_bin_field] = self._z_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])) \
-            or na.any(na.isnan(self[z_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])) \
+            or np.any(np.isnan(self[z_bin_field])):
             mylog.error("Your min/max values for x, y or z have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -681,7 +681,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size,
                          self[self.z_bin_field].size), dtype='float64')
 
@@ -689,9 +689,9 @@
     def _bin_field(self, source, field, weight, accumulation,
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
-        weight_data = na.ones(source_data.shape).astype('float64')
+        weight_data = np.ones(source_data.shape).astype('float64')
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape).astype('float64')
+        else: weight_data = np.ones(source_data.shape).astype('float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -711,11 +711,11 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
-                binned_field = na.add.accumulate(binned_field, axis=2)
+                binned_field = np.add.accumulate(binned_field, axis=2)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -727,7 +727,7 @@
         if source_data_x.size == 0:
             raise EmptyProfileData()
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
             mi = ( (source_data_x > self._x_bins.min())
                  & (source_data_x < self._x_bins.max())
@@ -741,13 +741,13 @@
         if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
-        bin_indices_z = na.digitize(sd_z, self._z_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
+        bin_indices_z = np.digitize(sd_z, self._z_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
-            bin_indices_z = na.minimum(na.maximum(1, bin_indices_z), self.z_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_z = np.minimum(np.maximum(1, bin_indices_z), self.z_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y, bin_indices_z)
@@ -772,9 +772,9 @@
             y = y[1:]
             z = z[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
-            if self._z_log: z=na.log10(z)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
+            if self._z_log: z=np.log10(z)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             z = 0.5*(z[:-1] + z[1:])
@@ -853,7 +853,7 @@
             if field in set_attr.values(): continue
             order.append(field)
             values.append(self[field].ravel())
-        values = na.array(values).transpose()
+        values = np.array(values).transpose()
         self._data_source.hierarchy.save_data(values, "/Profiles", name,
                                               set_attr, force=force)
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -26,7 +26,7 @@
 """
 
 import types
-import numpy as na
+import numpy as np
 import inspect
 import copy
 
@@ -61,66 +61,66 @@
 
 def _dx(field, data):
     return data.dds[0]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
 add_field('dx', function=_dx, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dy(field, data):
     return data.dds[1]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
 add_field('dy', function=_dy, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dz(field, data):
     return data.dds[2]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
 add_field('dz', function=_dz,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _coordX(field, data):
     dim = data.ActiveDimensions[0]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[0])[:,None,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
             +0.5) * data['dx'] + data.LeftEdge[0]
 add_field('x', function=_coordX, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordY(field, data):
     dim = data.ActiveDimensions[1]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[1])[None,:,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
             +0.5) * data['dy'] + data.LeftEdge[1]
 add_field('y', function=_coordY, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordZ(field, data):
     dim = data.ActiveDimensions[2]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[2])[None,None,:]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
             +0.5) * data['dz'] + data.LeftEdge[2]
 add_field('z', function=_coordZ, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _GridLevel(field, data):
-    return na.ones(data.ActiveDimensions)*(data.Level)
+    return np.ones(data.ActiveDimensions)*(data.Level)
 add_field("GridLevel", function=_GridLevel,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
 def _GridIndices(field, data):
-    return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
+    return np.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)], take_log=False)
 
 def _OnesOverDx(field, data):
-    return na.ones(data["Ones"].shape,
+    return np.ones(data["Ones"].shape,
                    dtype=data["Density"].dtype)/data['dx']
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
 
 def _Ones(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64')
+    return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
           validators=[ValidateSpatial(0)],
           projection_conversion="unitary",
@@ -130,7 +130,7 @@
 
 def _SoundSpeed(field, data):
     if data.pf["EOSType"] == 1:
-        return na.ones(data["Density"].shape, dtype='float64') * \
+        return np.ones(data["Density"].shape, dtype='float64') * \
                 data.pf["EOSSoundSpeed"]
     return ( data.pf["Gamma"]*data["Pressure"] / \
              data["Density"] )**(1.0/2.0)
@@ -139,7 +139,7 @@
 
 def _RadialMachNumber(field, data):
     """M{|v|/t_sound}"""
-    return na.abs(data["RadialVelocity"]) / data["SoundSpeed"]
+    return np.abs(data["RadialVelocity"]) / data["SoundSpeed"]
 add_field("RadialMachNumber", function=_RadialMachNumber)
 
 def _MachNumber(field, data):
@@ -157,7 +157,7 @@
     t3 = data['dz'] / (
         data["SoundSpeed"] + \
         abs(data["z-velocity"]))
-    return na.minimum(na.minimum(t1,t2),t3)
+    return np.minimum(np.minimum(t1,t2),t3)
 def _convertCourantTimeStep(data):
     # SoundSpeed and z-velocity are in cm/s, dx is in code
     return data.convert("cm")
@@ -169,7 +169,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["particle_velocity_x"]-bulk_velocity[0])**2.0 + \
              (data["particle_velocity_y"]-bulk_velocity[1])**2.0 + \
              (data["particle_velocity_z"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -181,7 +181,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
              (data["y-velocity"]-bulk_velocity[1])**2.0 + \
              (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -189,13 +189,13 @@
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _TangentialOverVelocityMagnitude(field, data):
-    return na.abs(data["TangentialVelocity"])/na.abs(data["VelocityMagnitude"])
+    return np.abs(data["TangentialVelocity"])/np.abs(data["VelocityMagnitude"])
 add_field("TangentialOverVelocityMagnitude",
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
 def _TangentialVelocity(field, data):
-    return na.sqrt(data["VelocityMagnitude"]**2.0
+    return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)
 add_field("TangentialVelocity", 
           function=_TangentialVelocity,
@@ -223,14 +223,14 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
     ## The spherical coordinates radius is simply the magnitude of the
     ## coords vector.
 
-    return na.sqrt(na.sum(coords**2,axis=-1))
+    return np.sqrt(np.sum(coords**2,axis=-1))
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,7 +245,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -254,11 +254,11 @@
     ## vector.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    JdotCoords = na.sum(J*coords,axis=-1)
+    JdotCoords = np.sum(J*coords,axis=-1)
     
-    return na.arccos( JdotCoords / na.sqrt(na.sum(coords**2,axis=-1)) )
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,7 +269,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
     
@@ -282,18 +282,18 @@
     ## The angle is then given by the arctan of the ratio of the
     ## yprime-component and the xprime-component of the coords vector.
 
-    xprime = na.cross([0.0,1.0,0.0],normal)
-    if na.sum(xprime) == 0: xprime = na.array([0.0, 0.0, 1.0])
-    yprime = na.cross(normal,xprime)
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = na.tile(xprime,tile_shape)
-    Jy = na.tile(yprime,tile_shape)
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
     
-    Px = na.sum(Jx*coords,axis=-1)
-    Py = na.sum(Jy*coords,axis=-1)
+    Px = np.sum(Jx*coords,axis=-1)
+    Py = np.sum(Jy*coords,axis=-1)
     
-    return na.arctan2(Py,Px)
+    return np.arctan2(Py,Px)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -305,7 +305,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -313,10 +313,10 @@
     ## gives a vector of magnitude equal to the cylindrical radius.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    JcrossCoords = na.cross(J,coords)
-    return na.sqrt(na.sum(JcrossCoords**2,axis=-1))
+    JcrossCoords = np.cross(J,coords)
+    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -331,7 +331,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -339,9 +339,9 @@
     ## the cylindrical height.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    return na.sum(J*coords,axis=-1)  
+    return np.sum(J*coords,axis=-1)  
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -399,7 +399,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*np.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -502,7 +502,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*data['dx']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']*data['dx']
     return data["dx"]*data["dy"]*data["dz"]
@@ -520,7 +520,7 @@
           convert_function=_ConvertCellVolumeCGS)
 
 def _ChandraEmissivity(field, data):
-    logT0 = na.log10(data["Temperature"]) - 7
+    logT0 = np.log10(data["Temperature"]) - 7
     return ((data["NumberDensity"].astype('float64')**2.0) \
             *(10**(-0.0103*logT0**8 \
                    +0.0417*logT0**7 \
@@ -579,15 +579,15 @@
 
 def _AveragedDensity(field, data):
     nx, ny, nz = data["Density"].shape
-    new_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    weight_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    i_i, j_i, k_i = na.mgrid[0:3,0:3,0:3]
+    new_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    weight_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    i_i, j_i, k_i = np.mgrid[0:3,0:3,0:3]
     for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):
         sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]
         new_field += data["Density"][sl] * data["CellMass"][sl]
         weight_field += data["CellMass"][sl]
     # Now some fancy footwork
-    new_field2 = na.zeros((nx,ny,nz))
+    new_field2 = np.zeros((nx,ny,nz))
     new_field2[1:-1,1:-1,1:-1] = new_field/weight_field
     return new_field2
 add_field("AveragedDensity",
@@ -615,7 +615,7 @@
         ds = div_fac * data['dz'].flat[0]
         f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
         f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = f
     return new_field
 def _convertDivV(data):
@@ -627,12 +627,12 @@
           convert_function=_convertDivV)
 
 def _AbsDivV(field, data):
-    return na.abs(data['DivV'])
+    return np.abs(data['DivV'])
 add_field("AbsDivV", function=_AbsDivV,
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return -na.ones_like(data["Ones"])
+    return -np.ones_like(data["Ones"])
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
 add_field("tempContours", function=_Contours,
@@ -642,7 +642,7 @@
 def obtain_velocities(data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["x-velocity"] - bv[0]
     yv = data["y-velocity"] - bv[1]
     zv = data["z-velocity"] - bv[2]
@@ -694,18 +694,18 @@
     """
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["particle_velocity_x"] - bv[0]
     yv = data["particle_velocity_y"] - bv[1]
     zv = data["particle_velocity_z"] - bv[2]
     center = data.get_field_parameter('center')
-    coords = na.array([data['particle_position_x'],
+    coords = np.array([data['particle_position_x'],
                        data['particle_position_y'],
                        data['particle_position_z']], dtype='float64')
     new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - na.reshape(center,new_shape)
-    v_vec = na.array([xv,yv,zv], dtype='float64')
-    return na.cross(r_vec, v_vec, axis=0)
+    r_vec = coords - np.reshape(center,new_shape)
+    v_vec = np.array([xv,yv,zv], dtype='float64')
+    return np.cross(r_vec, v_vec, axis=0)
 #add_field("ParticleSpecificAngularMomentum",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
@@ -720,7 +720,7 @@
 def _ParticleSpecificAngularMomentumX(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     y = data["particle_position_y"] - center[1]
     z = data["particle_position_z"] - center[2]
@@ -730,7 +730,7 @@
 def _ParticleSpecificAngularMomentumY(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     z = data["particle_position_z"] - center[2]
@@ -740,7 +740,7 @@
 def _ParticleSpecificAngularMomentumZ(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     y = data["particle_position_y"] - center[1]
@@ -788,20 +788,20 @@
 def _ParticleRadius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["particle_position_x"].shape, dtype='float64')
+    radius = np.zeros(data["particle_position_x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data["particle_position_%s" % ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data["particle_position_%s" % ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _Radius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["x"].shape, dtype='float64')
+    radius = np.zeros(data["x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data[ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data[ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _ConvertRadiusCGS(data):
     return data.convert("cm")
@@ -886,16 +886,16 @@
     center = data.get_field_parameter("center")
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
                 + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
                 + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
                 )/data["RadiusCode"]
-    if na.any(na.isnan(new_field)): # to fix center = point
-        new_field[na.isnan(new_field)] = 0.0
+    if np.any(np.isnan(new_field)): # to fix center = point
+        new_field[np.isnan(new_field)] = 0.0
     return new_field
 def _RadialVelocityABS(field, data):
-    return na.abs(_RadialVelocity(field, data))
+    return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
     return 1e-5
 add_field("RadialVelocity", function=_RadialVelocity,
@@ -916,10 +916,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(x_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(x_vec, v_vec)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -929,10 +929,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(y_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(y_vec, v_vec)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -955,16 +955,16 @@
 def _convertDensity(data):
     return data.convert("Density")
 def _pdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                 data["particle_position_y"].astype(na.float64),
-                 data["particle_position_z"].astype(na.float64),
-                 data["particle_mass"].astype(na.float32),
-                 na.int64(data.NumberOfParticles),
-                 blank, na.array(data.LeftEdge).astype(na.float64),
-                 na.array(data.ActiveDimensions).astype(na.int32),
-                 na.float64(data['dx']))
+    CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                 data["particle_position_y"].astype(np.float64),
+                 data["particle_position_z"].astype(np.float64),
+                 data["particle_mass"].astype(np.float32),
+                 np.int64(data.NumberOfParticles),
+                 blank, np.array(data.LeftEdge).astype(np.float64),
+                 np.array(data.ActiveDimensions).astype(np.int32),
+                 np.float64(data['dx']))
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
@@ -993,7 +993,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape)
+    new_field = np.zeros(data["x-velocity"].shape)
     dvzdy = (data["z-velocity"][1:-1,sl_right,1:-1] -
              data["z-velocity"][1:-1,sl_left,1:-1]) \
              / (div_fac*data["dy"].flat[0])
@@ -1018,7 +1018,7 @@
              / (div_fac*data["dy"].flat[0])
     new_field[1:-1,1:-1,1:-1] += (dvydx - dvxdy)**2.0
     del dvydx, dvxdy
-    new_field = na.abs(new_field)
+    new_field = np.abs(new_field)
     return new_field
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
@@ -1038,7 +1038,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
@@ -1053,7 +1053,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
@@ -1068,7 +1068,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
@@ -1083,7 +1083,7 @@
               units=r"\rm{dyne}/\rm{cm}^{3}")
 
 def _gradPressureMagnitude(field, data):
-    return na.sqrt(data["gradPressureX"]**2 +
+    return np.sqrt(data["gradPressureX"]**2 +
                    data["gradPressureY"]**2 +
                    data["gradPressureZ"]**2)
 add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
@@ -1100,7 +1100,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
@@ -1115,7 +1115,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
@@ -1130,7 +1130,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
@@ -1145,7 +1145,7 @@
               units=r"\rm{g}/\rm{cm}^{4}")
 
 def _gradDensityMagnitude(field, data):
-    return na.sqrt(data["gradDensityX"]**2 +
+    return np.sqrt(data["gradDensityX"]**2 +
                    data["gradDensityY"]**2 +
                    data["gradDensityZ"]**2)
 add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
@@ -1171,7 +1171,7 @@
           units=r"\rm{s}^{-1}")
 
 def _BaroclinicVorticityMagnitude(field, data):
-    return na.sqrt(data["BaroclinicVorticityX"]**2 +
+    return np.sqrt(data["BaroclinicVorticityX"]**2 +
                    data["BaroclinicVorticityY"]**2 +
                    data["BaroclinicVorticityZ"]**2)
 add_field("BaroclinicVorticityMagnitude",
@@ -1189,7 +1189,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
                                  data["z-velocity"][1:-1,sl_left,1:-1]) \
                                  / (div_fac*data["dy"].flat[0])
@@ -1207,7 +1207,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
                                  data["x-velocity"][1:-1,1:-1,sl_left]) \
                                  / (div_fac*data["dz"].flat[0])
@@ -1225,7 +1225,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
                                  data["y-velocity"][sl_left,1:-1,1:-1]) \
                                  / (div_fac*data["dx"].flat[0])
@@ -1244,7 +1244,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityMagnitude(field, data):
-    return na.sqrt(data["VorticityX"]**2 +
+    return np.sqrt(data["VorticityX"]**2 +
                    data["VorticityY"]**2 +
                    data["VorticityZ"]**2)
 add_field("VorticityMagnitude", function=_VorticityMagnitude,
@@ -1263,7 +1263,7 @@
     add_field(n, function=eval("_%s" % n),
               validators=[ValidateSpatial(0)])
 def _VorticityStretchingMagnitude(field, data):
-    return na.sqrt(data["VorticityStretchingX"]**2 +
+    return np.sqrt(data["VorticityStretchingX"]**2 +
                    data["VorticityStretchingY"]**2 +
                    data["VorticityStretchingZ"]**2)
 add_field("VorticityStretchingMagnitude", 
@@ -1285,13 +1285,13 @@
                           ["x-velocity", "y-velocity", "z-velocity"])],
               units=r"\rm{s}^{-2}")
 def _VorticityGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityGrowthX"]**2 +
+    result = np.sqrt(data["VorticityGrowthX"]**2 +
                      data["VorticityGrowthY"]**2 +
                      data["VorticityGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1299,7 +1299,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityGrowthX"]**2 +
+    return np.sqrt(data["VorticityGrowthX"]**2 +
                    data["VorticityGrowthY"]**2 +
                    data["VorticityGrowthZ"]**2)
 add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
@@ -1311,7 +1311,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],
@@ -1344,7 +1344,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityRadPressureMagnitude(field, data):
-    return na.sqrt(data["VorticityRadPressureX"]**2 +
+    return np.sqrt(data["VorticityRadPressureX"]**2 +
                    data["VorticityRadPressureY"]**2 +
                    data["VorticityRadPressureZ"]**2)
 add_field("VorticityRadPressureMagnitude",
@@ -1369,13 +1369,13 @@
                        ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
               units=r"\rm{s}^{-1}")
 def _VorticityRPGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityRPGrowthX"]**2 +
+    result = np.sqrt(data["VorticityRPGrowthX"]**2 +
                      data["VorticityRPGrowthY"]**2 +
                      data["VorticityRPGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1383,7 +1383,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityRPGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityRPGrowthX"]**2 +
+    return np.sqrt(data["VorticityRPGrowthX"]**2 +
                    data["VorticityRPGrowthY"]**2 +
                    data["VorticityRPGrowthZ"]**2)
 add_field("VorticityRPGrowthMagnitudeABS", 
@@ -1396,7 +1396,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 import cPickle
@@ -106,7 +106,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -120,10 +120,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -141,7 +141,7 @@
         #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
         
@@ -180,9 +180,9 @@
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_info = np.array(self.pf.level_info)        
         self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
+        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
         self.pf.level_art_child_masks = {}
@@ -192,10 +192,10 @@
         del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
-                        na.zeros(3, dtype='int64'), # left index of PSG
+                        np.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
-                        na.zeros((1,3), dtype='int64'), # left edges of grids
-                        na.zeros((1,6), dtype='int64') # empty
+                        np.zeros((1,3), dtype='int64'), # left edges of grids
+                        np.zeros((1,6), dtype='int64') # empty
                         )
         
         self.proto_grids = [[root_psg],]
@@ -224,8 +224,8 @@
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
-            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
-                              na.log10(2))
+            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                              np.log10(2))
             hilbert_indices = _ramses_reader.get_hilbert_indices(
                                     level + base_level, left_index)
             #print base_level, hilbert_indices.max(),
@@ -234,7 +234,7 @@
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             
@@ -260,15 +260,15 @@
                 #why would we ever have non-unique octs?
                 #perhaps the hilbert ordering may visit the same
                 #oct multiple times - review only unique octs 
-                #for idomain in na.unique(ddfl[:,1]):
+                #for idomain in np.unique(ddfl[:,1]):
                 #dom_ind = ddfl[:,1] == idomain
                 #dleft_index = ddleft_index[dom_ind,:]
                 #dfl = ddfl[dom_ind,:]
                 
                 dleft_index = ddleft_index
                 dfl = ddfl
-                initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                initial_left = np.min(dleft_index, axis=0)
+                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                 #this creates a grid patch that doesn't cover the whole level
                 #necessarily, but with other patches covers all the regions
                 #with octs. This object automatically shrinks its size
@@ -298,8 +298,8 @@
                 
                 step+=1
                 pbar.update(step)
-            eff_mean = na.mean(psg_eff)
-            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_mean = np.mean(psg_eff)
+            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
             eff_nall = len(psg_eff)
             mylog.info("Average subgrid efficiency %02.1f %%",
                         eff_mean*100.0)
@@ -345,14 +345,14 @@
                 self.grid_right_edge[gi,:] = props[1,:] / dds
                 self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                child_mask = na.zeros(props[2,:],'uint8')
+                child_mask = np.zeros(props[2,:],'uint8')
                 amr_utils.fill_child_mask(fl,props[0],
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*na.array(correction).astype('int64')))
+                    props*np.array(correction).astype('int64')))
                 gi += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         
 
         if self.pf.file_particle_data:
@@ -372,7 +372,7 @@
             pbar.update(1)
             npa,npb=0,0
             npb = lspecies[-1]
-            clspecies = na.concatenate(([0,],lspecies))
+            clspecies = np.concatenate(([0,],lspecies))
             if self.pf.only_particle_type is not None:
                 npb = lspecies[0]
                 if type(self.pf.only_particle_type)==type(5):
@@ -388,13 +388,13 @@
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = na.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            self.pf.particle_type         = np.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = np.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
@@ -461,17 +461,17 @@
             init = self.pf.particle_position.shape[0]
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
-            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
             if type(self.pf.grid_particles) == type(5):
                 particle_level = min(self.pf.max_level,self.pf.grid_particles)
             else:
                 particle_level = 2
-            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
+            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
 
             pbar = get_pbar("Gridding Particles ",init)
             assignment,ilists = amr_utils.assign_particles_to_cell_lists(
                     self.grid_levels.ravel().astype('int32'),
-                    na.zeros(len(pos[:,0])).astype('int32')-1,
+                    np.zeros(len(pos[:,0])).astype('int32')-1,
                     particle_level, #dont grid particles past this
                     self.grid_left_edge.astype('float32'),
                     self.grid_right_edge.astype('float32'),
@@ -500,10 +500,10 @@
             
 
     def _get_grid_parents(self, grid, LE, RE):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
         return self.grids[mask]
 
     def _populate_grid_objects(self):
@@ -519,7 +519,7 @@
         self.max_level = self.grid_levels.max()
 
     # def _populate_grid_objects(self):
-    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     mask = np.empty(self.grids.size, dtype='int32')
     #     pb = get_pbar("Populating grids", len(self.grids))
     #     for gi,g in enumerate(self.grids):
     #         pb.update(gi)
@@ -609,7 +609,7 @@
         self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
-            self.limit_level = na.inf
+            self.limit_level = np.inf
         else:
             limit_level = int(limit_level)
             mylog.info("Using maximum level: %i",limit_level)
@@ -685,7 +685,7 @@
         wmu = self["wmu"]
         #ng = self.domain_dimensions[0]
         #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + na.sqrt(self.omega_matter))
+        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
         #v0 = r0 / t0
         #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
         #e0 = v0**2.0
@@ -696,7 +696,7 @@
         hubble = self.hubble_constant
         ng = self.domain_dimensions[0]
         self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * na.sqrt(self.omega_matter)  #cm/s
+        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         self.t0 = self.r0/self.v0
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
@@ -730,8 +730,8 @@
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
-        self.domain_left_edge = na.zeros(3, dtype="float64")
-        self.domain_right_edge = na.ones(3, dtype="float64")
+        self.domain_left_edge = np.zeros(3, dtype="float64")
+        self.domain_right_edge = np.ones(3, dtype="float64")
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters = {}
@@ -812,10 +812,10 @@
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
         # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
+        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
         # integrand_arr = integrand(spacings)
-        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
         # self.current_time *= self.hubble_time
         self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
@@ -824,7 +824,7 @@
         
         Om0 = self.parameters['Om0']
         hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * na.sqrt(Om0)
+        dummy = 100.0 * hubble * np.sqrt(Om0)
         ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
         boxh = header_vals['boxh'] 
@@ -836,7 +836,7 @@
         self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
         #velocity velocity units in km/s
         self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                na.sqrt(self.parameters["Om0"])
+                np.sqrt(self.parameters["Om0"])
         #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
         self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
         rho0 = self.parameters["rho0"]
@@ -857,10 +857,10 @@
     
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = int(na.rint(self.ncell**(1.0/3.0)))
+        est = int(np.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64')*est 
+        self.domain_dimensions = np.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
         #_skip_record(f) # iOctCh
@@ -927,8 +927,8 @@
         seek_extras = 137
         fh.seek(seek_extras)
         n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
         self.parameters['wspecies'] = self.parameters['wspecies'][:n]
         self.parameters['lspecies'] = self.parameters['lspecies'][:n]
         fh.close()


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -44,7 +44,7 @@
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as na
+import numpy as np
 
 #these are just the hydro fields
 known_art_fields = [ 'Density','TotalEnergy',
@@ -178,7 +178,7 @@
     di = dd==0.0
     #dd[di] = -1.0
     tr = dg/dd
-    #tr[na.isnan(tr)] = 0.0
+    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
     #    import pdb;pdb.set_trace()
     tr /= data.pf.conversion_factors["GasEnergy"]
@@ -186,7 +186,7 @@
     tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
-    #assert na.all(na.isfinite(tr))
+    #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
     x = data.pf.conversion_factors["Temperature"]
@@ -258,9 +258,9 @@
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
     tr  = data["Ones"] #create a grid in the right size
-    if na.sum(idx)>0:
-        tr /= na.prod(tr.shape) #divide by the volume
-        tr *= na.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+    if np.sum(idx)>0:
+        tr /= np.prod(tr.shape) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
         return tr
     else:
         return tr*0.0


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import struct
 
 import os
@@ -93,9 +93,9 @@
         f.seek(self.level_offsets[level])
         ncells = 8*self.level_info[level]
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
-        arr = na.fromfile(f, dtype='>f', count=nvals)
+        arr = np.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
         if field==None:
             self.level_data[level] = arr.astype('float32')
@@ -108,13 +108,13 @@
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
+        hvar = np.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
-        na.fromfile(f,dtype='>i',count=2) #throw away the pads
+        np.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
+        var = np.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
-        arr = na.concatenate((hvar,var))
+        arr = np.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
@@ -122,9 +122,9 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = na.array(grid.particle_indices)
+        idx = np.array(grid.particle_indices)
         if field == 'particle_index':
-            return na.array(idx)
+            return np.array(idx)
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
@@ -168,10 +168,10 @@
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
             return tr.swapaxes(0, 2).astype("float64")
-        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
-        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        filled = np.zeros(grid.ActiveDimensions, dtype='uint8')
         to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
@@ -198,9 +198,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -242,20 +242,20 @@
     #fortran indices start at 1
     
     #Skip all the oct hierarchy data
-    le     = na.zeros((nLevel,3),dtype='int64')
-    fl     = na.ones((nLevel,6),dtype='int64')
-    iocts  = na.zeros(nLevel+1,dtype='int64')
+    le     = np.zeros((nLevel,3),dtype='int64')
+    fl     = np.ones((nLevel,6),dtype='int64')
+    iocts  = np.zeros(nLevel+1,dtype='int64')
     idxa,idxb = 0,0
     chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
     left = nLevel
     while left > 0 :
         this_chunk = min(chunk,left)
         idxb=idxa+this_chunk
-        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data = np.fromfile(f,dtype='>i',count=this_chunk*15)
         data=data.reshape(this_chunk,15)
         left-=this_chunk
         le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        fl[idxa:idxb,1] = np.arange(idxa,idxb)
         #pad byte is last, LL2, then ioct right before it
         iocts[idxa:idxb] = data[:,-3] 
         idxa=idxa+this_chunk
@@ -272,12 +272,12 @@
     #now correct iocts for fortran indices start @ 1
     iocts = iocts-1
 
-    assert na.unique(iocts).shape[0] == nLevel
+    assert np.unique(iocts).shape[0] == nLevel
     
     #ioct tries to access arrays much larger than le & fl
     #just make sure they appear in the right order, skipping
     #the empty space in between
-    idx = na.argsort(iocts)
+    idx = np.argsort(iocts)
     
     #now rearrange le & fl in order of the ioct
     le = le[idx]
@@ -294,7 +294,7 @@
     #now read the hvars and vars arrays
     #we are looking for iOctCh
     #we record if iOctCh is >0, in which it is subdivided
-    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
     
     
     
@@ -309,9 +309,9 @@
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
-    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
-    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    f = np.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = np.vsplit(np.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
 def read_stars(file,nstars,Nrow):
@@ -332,8 +332,8 @@
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
     nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
-    ioctch = na.zeros(nLevel,dtype='uint8')
-    idc = na.zeros(nLevel,dtype='int32')
+    ioctch = np.zeros(nLevel,dtype='uint8')
+    idc = np.zeros(nLevel,dtype='int32')
     
     chunk = long(1e6)
     left = nLevel
@@ -342,9 +342,9 @@
     while left > 0:
         chunk = min(chunk,left)
         b += chunk
-        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = np.fromfile(f, dtype='>i', count=chunk*width)
         arr = arr.reshape((width, chunk), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
         ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
         #zero in the mask means there is refinement available
@@ -354,12 +354,12 @@
     return idc,ioctch
     
 nchem=8+2
-dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
                 ",>%sf4"%(2)+",>i4")
 def _read_art_child(f, level_child_offsets,level,nLevel,field):
     pos=f.tell()
     f.seek(level_child_offsets[level])
-    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = np.fromfile(f, dtype='>f', count=nLevel * 8)
     arr = arr.reshape((nLevel,16), order="F")
     arr = arr[3:-1,:].astype("float64")
     f.seek(pos)
@@ -372,8 +372,8 @@
 
 def _read_frecord(f,fmt):
     s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    count = s1/na.dtype(fmt).itemsize
-    ss = na.fromfile(f,fmt,count=count)
+    count = s1/np.dtype(fmt).itemsize
+    ss = np.fromfile(f,fmt,count=count)
     s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
     assert s1==s2
     return ss
@@ -406,14 +406,14 @@
 
 #All of these functions are to convert from hydro time var to 
 #proper time
-sqrt = na.sqrt
-sign = na.sign
+sqrt = np.sqrt
+sign = np.sign
 
 def find_root(f,a,b,tol=1e-6):
     c = (a+b)/2.0
-    last = -na.inf
+    last = -np.inf
     assert(sign(f(a)) != sign(f(b)))  
-    while na.abs(f(c)-last) > tol:
+    while np.abs(f(c)-last) > tol:
         last=f(c)
         if sign(last)==sign(f(b)):
             b=c
@@ -423,9 +423,9 @@
     return c
 
 def quad(fintegrand,xmin,xmax,n=1e4):
-    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    spacings = np.logspace(np.log10(xmin),np.log10(xmax),n)
     integrand_arr = fintegrand(spacings)
-    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    val = np.trapz(integrand_arr,dx=np.diff(spacings))
     return val
 
 def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
@@ -450,14 +450,14 @@
     integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
     #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
     current_time = quad(integrand,1e-4,at)
-    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #spacings = np.logspace(-5,np.log10(at),1e5)
     #integrand_arr = integrand(spacings)
-    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
     current_time *= 9.779/h
     return current_time
 
 def b2t(tb,n = 1e2,logger=None,**kwargs):
-    tb = na.array(tb)
+    tb = np.array(tb)
     if type(tb) == type(1.1): 
         return a2t(b2a(tb))
     if tb.shape == (): 
@@ -465,14 +465,14 @@
     if len(tb) < n: n= len(tb)
     age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
     age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
-    tbs  = -1.*na.logspace(na.log10(-tb.min()),
-                          na.log10(-tb.max()),n)
+    tbs  = -1.*np.logspace(np.log10(-tb.min()),
+                          np.log10(-tb.max()),n)
     ages = []
     for i,tbi in enumerate(tbs):
         ages += a2t(b2a(tbi)),
         if logger: logger(i)
-    ages = na.array(ages)
-    fb2t = na.interp(tb,tbs,ages)
+    ages = np.array(ages)
+    fb2t = np.interp(tb,tbs,ages)
     #fb2t = interp1d(tbs,ages)
     return fb2t
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -109,7 +109,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
@@ -174,12 +174,12 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #na.array(map(int, self._global_header_lines[counter].split()))
+        #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         #domain_re.search(self._global_header_lines[counter]).groups()
@@ -187,9 +187,9 @@
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -273,8 +273,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                                        level, gfn, gfo, dims,
@@ -296,7 +296,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
 
         self.field_list += castro_particle_field_names[:]
@@ -311,7 +311,7 @@
 
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = na.fromiter((int(i)
+        grid_info = np.fromiter((int(i)
                                  for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
@@ -347,15 +347,15 @@
         self._dtype = dtype
 
     def _calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.fullplotdir
 
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
@@ -367,9 +367,9 @@
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids * [self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
                                   for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
@@ -384,9 +384,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -405,7 +405,7 @@
             grid._setup_dx()
 
     def _setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -424,10 +424,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -439,7 +439,7 @@
             except:
                 continue
 
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
 
         for field in self.field_list:
@@ -473,11 +473,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -620,9 +620,9 @@
                     else:
                         self.parameters[paramName] = t
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals.split()])
+                self.domain_right_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals.split()])
+                self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 from yt.utilities.lib import \
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
             castro_particle_field_names.index(field),
             len(castro_particle_field_names),
@@ -85,8 +85,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
+            start = np.array(map(int, start.split(',')))
+            stop = np.array(map(int, stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -126,7 +126,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
+        field = np.fromfile(inFile, count=nElements, dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
      defaultdict
@@ -81,10 +81,10 @@
         if self.Parent == []:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -97,7 +97,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -137,18 +137,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py                                                                                                             
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
@@ -182,8 +182,8 @@
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
             for level_id, box in enumerate(boxes):
-                si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
                 pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
                 grids.append(pg)
@@ -193,9 +193,9 @@
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-#        self.grids = na.array(self.grids, dtype='object')
+#        self.grids = np.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -212,7 +212,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
@@ -316,21 +316,21 @@
     def __calc_left_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         fileh.close()
         return LE
 
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
                   
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
-        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         return R_index - L_index
  
     @classmethod


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,7 +33,7 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-import numpy as na
+import numpy as np
 
 KnownChomboFields = FieldInfoContainer()
 add_chombo_field = KnownChomboFields.add_field
@@ -131,7 +131,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
         


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,7 +25,7 @@
 """
 import h5py
 import re
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -108,4 +108,4 @@
                     if ( (grid.LeftEdge < coord).all() and
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import weakref
-import numpy as na
+import numpy as np
 import os
 import stat
 import string
@@ -90,7 +90,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -179,7 +179,7 @@
                 if self.pf.field_info[field].particle_type: continue
                 temp = self.hierarchy.io._read_raw_data_set(self, field)
                 temp = temp.swapaxes(0, 2)
-                cube.field_data[field] = na.multiply(temp, conv_factor, temp)[sl]
+                cube.field_data[field] = np.multiply(temp, conv_factor, temp)[sl]
         return cube
 
 class EnzoHierarchy(AMRHierarchy):
@@ -313,7 +313,7 @@
                     self.__pointer_handler(vv)
         pbar.finish()
         self._fill_arrays(ei, si, LE, RE, np)
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids = np.empty(self.num_grids, dtype='object')
         temp_grids[:] = self.grids
         self.grids = temp_grids
         self.filenames = fn
@@ -322,7 +322,7 @@
 
     def _fill_arrays(self, ei, si, LE, RE, np):
         self.grid_dimensions.flat[:] = ei
-        self.grid_dimensions -= na.array(si, self.float_type)
+        self.grid_dimensions -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge.flat[:] = LE
         self.grid_right_edge.flat[:] = RE
@@ -379,7 +379,7 @@
             if Pid > -1:
                 grids[Pid-1]._children_ids.append(grid.id)
             self.filenames.append(pmap[P])
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
         f.close()
         mylog.info("Finished with binary hierarchy reading")
         return True
@@ -408,9 +408,9 @@
             procs.append(int(self.filenames[i][0][-4:]))
             levels.append(g.Level)
 
-        parents = na.array(parents, dtype='int64')
-        procs = na.array(procs, dtype='int64')
-        levels = na.array(levels, dtype='int64')
+        parents = np.array(parents, dtype='int64')
+        procs = np.array(procs, dtype='int64')
+        levels = np.array(levels, dtype='int64')
         f.create_dataset("/ParentIDs", data=parents)
         f.create_dataset("/Processor", data=procs)
         f.create_dataset("/Level", data=levels)
@@ -425,7 +425,7 @@
         mylog.info("Rebuilding grids on level %s", level)
         cmask = (self.grid_levels.flat == (level + 1))
         cmsum = cmask.sum()
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         for grid in self.select_grids(level):
             mask[:] = 0
             LE = self.grid_left_edge[grid.id - grid._id_offset]
@@ -477,20 +477,20 @@
 
     def _generate_random_grids(self):
         if self.num_grids > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(self.grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
             # We also add in a bit to make sure that some of the grids have
             # particles
             gwp = self.grid_particle_count > 0
-            if na.any(gwp) and not na.any(gwp[(random_sample,)]):
+            if np.any(gwp) and not np.any(gwp[(random_sample,)]):
                 # We just add one grid.  This is not terribly efficient.
-                first_grid = na.where(gwp)[0][0]
+                first_grid = np.where(gwp)[0][0]
                 random_sample.resize((21,))
                 random_sample[-1] = first_grid
                 mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(self.grids),1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
@@ -518,7 +518,7 @@
         pstore = []
         for level in range(self.max_level, -1, -1):
             for grid in self.select_grids(level):
-                index = na.where(grid['particle_type'] == ptype)[0]
+                index = np.where(grid['particle_type'] == ptype)[0]
                 total += len(index)
                 pstore.append(index)
                 if total >= max_num: break
@@ -527,7 +527,7 @@
         if total > 0:
             result = {}
             for p in pfields:
-                result[p] = na.zeros(total, 'float64')
+                result[p] = np.zeros(total, 'float64')
             # Now we retrieve data for each field
             ig = count = 0
             for level in range(self.max_level, -1, -1):
@@ -590,7 +590,7 @@
                 grids[pid-1]._children_ids.append(grids[-1].id)
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for i, grid in enumerate(grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -601,7 +601,7 @@
 
     def _initialize_grid_arrays(self):
         EnzoHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def _copy_hierarchy_structure(self):
         # Dimensions are important!
@@ -638,18 +638,18 @@
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(my_grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(my_grids)-1:20j].astype("int32")
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
         return my_grids[(random_sample,)]
 
 class EnzoHierarchy1D(EnzoHierarchy):
 
     def _fill_arrays(self, ei, si, LE, RE, np):
         self.grid_dimensions[:,:1] = ei
-        self.grid_dimensions[:,:1] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:1] = LE
         self.grid_right_edge[:,:1] = RE
@@ -662,7 +662,7 @@
 
     def _fill_arrays(self, ei, si, LE, RE, np):
         self.grid_dimensions[:,:2] = ei
-        self.grid_dimensions[:,:2] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:2] = LE
         self.grid_right_edge[:,:2] = RE
@@ -724,17 +724,17 @@
         self._hierarchy_class = EnzoHierarchy1D
         self._fieldinfo_fallback = Enzo1DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
+            np.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
-            na.concatenate([[self.domain_right_edge], [1.0, 1.0]])
+            np.concatenate([[self.domain_right_edge], [1.0, 1.0]])
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
         self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([self["DomainLeftEdge"], [0.0]])
+            np.concatenate([self["DomainLeftEdge"], [0.0]])
         self.domain_right_edge = \
-            na.concatenate([self["DomainRightEdge"], [1.0]])
+            np.concatenate([self["DomainRightEdge"], [1.0]])
 
     def get_parameter(self,parameter,type=None):
         """
@@ -827,7 +827,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
@@ -842,17 +842,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         self.current_time = self.parameters["InitialTime"]
         # To be enabled when we can break old pickles:
@@ -937,7 +937,7 @@
         with fortran code.
         """
         k = {}
-        k["utim"] = 2.52e17/na.sqrt(self.omega_matter)\
+        k["utim"] = 2.52e17/np.sqrt(self.omega_matter)\
                        / self.hubble_constant \
                        / (1+self.parameters["CosmologyInitialRedshift"])**1.5
         k["urho"] = 1.88e-29 * self.omega_matter \
@@ -949,8 +949,8 @@
                (1.0 + self.current_redshift)
         k["uaye"] = 1.0/(1.0 + self.parameters["CosmologyInitialRedshift"])
         k["uvel"] = 1.225e7*self.parameters["CosmologyComovingBoxSize"] \
-                      *na.sqrt(self.omega_matter) \
-                      *na.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
+                      *np.sqrt(self.omega_matter) \
+                      *np.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
         k["utem"] = 1.88e6 * (self.parameters["CosmologyComovingBoxSize"]**2) \
                       * self.omega_matter \
                       * (1.0 + self.parameters["CosmologyInitialRedshift"])
@@ -990,7 +990,7 @@
         self.conversion_factors.update(enzo.conversion_factors)
         for i in self.parameters:
             if isinstance(self.parameters[i], types.TupleType):
-                self.parameters[i] = na.array(self.parameters[i])
+                self.parameters[i] = np.array(self.parameters[i])
             if i.endswith("Units") and not i.startswith("Temperature"):
                 dataType = i[:-5]
                 self.conversion_factors[dataType] = self.parameters[i]
@@ -998,7 +998,7 @@
         self.domain_right_edge = self.parameters["DomainRightEdge"].copy()
         for i in self.conversion_factors:
             if isinstance(self.conversion_factors[i], types.TupleType):
-                self.conversion_factors[i] = na.array(self.conversion_factors[i])
+                self.conversion_factors[i] = np.array(self.conversion_factors[i])
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
         for p, v in self._conversion_override.items():


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
@@ -193,7 +193,7 @@
     # We can assume that we at least have Density
     # We should actually be guaranteeing the presence of a .shape attribute,
     # but I am not currently implementing that
-    fieldData = na.zeros(data["Density"].shape,
+    fieldData = np.zeros(data["Density"].shape,
                          dtype = data["Density"].dtype)
     if data.pf["MultiSpecies"] == 0:
         if data.has_field_parameter("mu"):
@@ -249,7 +249,7 @@
 KnownEnzoFields["z-velocity"].projection_conversion='1'
 
 def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
+    return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
     f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
@@ -324,39 +324,39 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
     else:
-        filter = na.ones(data.NumberOfParticles, dtype='bool')
+        filter = np.ones(data.NumberOfParticles, dtype='bool')
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
@@ -367,28 +367,28 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           particle_field_data.astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           top, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           particle_field_data.astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           top, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           data["particle_mass"].astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           bottom, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           data["particle_mass"].astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           bottom, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -406,30 +406,30 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
     particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          particle_field_data.astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          top, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          particle_field_data.astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          top, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          data["particle_mass"][filter].astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          bottom, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          data["particle_mass"][filter].astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          bottom, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -466,7 +466,7 @@
           projection_conversion="1")
 
 def _StarAge(field, data):
-    star_age = na.zeros(data['StarCreationTimeYears'].shape)
+    star_age = np.zeros(data['StarCreationTimeYears'].shape)
     with_stars = data['StarCreationTimeYears'] > 0
     star_age[with_stars] = data.pf.time_units['years'] * \
         data.pf.current_time - \
@@ -485,7 +485,7 @@
 def _Bmag(field, data):
     """ magnitude of bvec
     """
-    return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
+    return np.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
 add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
 
@@ -495,7 +495,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         try:
             return io._read_data_set(data, p_field).astype(dtype)
         except io._read_exception:
@@ -555,13 +555,13 @@
 def _convertParticleMass(data):
     return data.convert("Density")*(data.convert("cm")**3.0)
 def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
     return cf
 def _convertParticleMassMsun(data):
     return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
 def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
     return cf
 add_field("ParticleMass",
@@ -584,7 +584,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']
     return data["dx"]*data["dy"]
@@ -606,7 +606,7 @@
         Enzo2DFieldInfo["CellArea%s" % a]
 
 def _zvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_2d_field("z-velocity", function=_zvel)
 
@@ -638,7 +638,7 @@
         Enzo1DFieldInfo["CellLength%s" % a]
 
 def _yvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -25,7 +25,7 @@
 
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import glob
 import os
 
@@ -236,8 +236,8 @@
             else:
                 my_final_time = self.final_time
 
-            my_times = na.array(map(lambda a:a['time'], my_all_outputs))
-            my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+            my_times = np.array(map(lambda a:a['time'], my_all_outputs))
+            my_indices = np.digitize([my_initial_time, my_final_time], my_times)
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
@@ -294,7 +294,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         self.refine_by = self.parameters["RefineBy"]
         self.dimensionality = self.parameters["TopGridRank"]
@@ -303,17 +303,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         if self.parameters["ComovingCoordinates"]:
             cosmo_attr = {'box_size': 'CosmologyComovingBoxSize',
@@ -374,7 +374,7 @@
                     current_time * self.enzo_cosmology.TimeUnits)
 
             self.all_time_outputs.append(output)
-            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            if np.abs(self.final_time - current_time) / self.final_time < 1e-4: break
             current_time += self.parameters['dtDataDump']
             index += 1
 
@@ -476,8 +476,8 @@
         self.parameters['RedshiftDumpDir'] = "RD"
         self.parameters['ComovingCoordinates'] = 0
         self.parameters['TopGridRank'] = 3
-        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
-        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
         self.parameters['Refineby'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
@@ -491,7 +491,7 @@
 
         self.time_units = {}
         if self.cosmological_simulation:
-            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+            self.parameters['TimeUnits'] = 2.52e17 / np.sqrt(self.omega_matter) \
                 / self.hubble_constant / (1 + self.initial_redshift)**1.5
         self.time_units['1'] = 1.
         self.time_units['seconds'] = self.parameters['TimeUnits']
@@ -586,8 +586,8 @@
             outputs = self.all_outputs
         my_outputs = []
         for value in values:
-            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
-            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+            outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
+            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
                     and outputs[0] not in my_outputs:
                 my_outputs.append(outputs[0])
             else:
@@ -649,7 +649,7 @@
 
         """
 
-        times = na.array(times) / self.time_units[time_units]
+        times = np.array(times) / self.time_units[time_units]
         return self._get_outputs_by_key('time', times, tolerance=tolerance,
                                         outputs=outputs)
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import stat
-import numpy as na
+import numpy as np
 import weakref
 
 from yt.funcs import *
@@ -70,7 +70,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -123,14 +123,14 @@
             self.grid_particle_count[:] = f["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
-        self._particle_indices = na.zeros(self.num_grids + 1, dtype='int64')
-        na.add.accumulate(self.grid_particle_count.squeeze(),
+        self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
+        np.add.accumulate(self.grid_particle_count.squeeze(),
                           out=self._particle_indices[1:])
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
         self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i+1, self, self.grid_levels[i,0])
         
@@ -139,20 +139,20 @@
         rdx = (self.parameter_file.domain_right_edge -
                 self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
         nlevels = self.grid_levels.max()
-        dxs = na.zeros((nlevels+1,3),dtype='float64')
+        dxs = np.zeros((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
             dxs[i] = rdx/self.parameter_file.refine_by**i
        
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = na.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = na.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx
+            self.grid_right_edge[i] = np.rint(self.grid_right_edge[i]/dx)*dx
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)
         
         offset = 7
-        ii = na.argsort(self.grid_levels.flat)
+        ii = np.argsort(self.grid_levels.flat)
         gid = self._handle["/gid"][:]
         first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
         for g in self.grids[ii].flat:
@@ -363,9 +363,9 @@
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
-        self.domain_left_edge = na.array(
+        self.domain_left_edge = np.array(
             [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
-        self.domain_right_edge = na.array(
+        self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         self.min_level = self.parameters.get("lrefine_min", 1) - 1
 
@@ -391,7 +391,7 @@
         nblockz = self.parameters["nblockz"]
         self.dimensionality = dimensionality
         self.domain_dimensions = \
-            na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
+            np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
         try:
             self.parameters["Gamma"] = self.parameters["gamma"]
         except:


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 
 from yt.utilities.io_handler import \
@@ -66,12 +66,12 @@
             gi = f["/tracer particles"][start:end,blki] == bi
             tr.append(f["/tracer particles"][gi,fi])
             start = end
-        return na.concatenate(tr)
+        return np.concatenate(tr)
 
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:
-            if grid.NumberOfParticles == 0: return na.array([], dtype='float64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 from itertools import izip
 
 from yt.funcs import *
@@ -104,7 +104,7 @@
         
     def _parse_hierarchy(self):
         f = self._handle # shortcut
-        npa = na.array
+        npa = np.array
         DLE = self.parameter_file.domain_left_edge
         DRE = self.parameter_file.domain_right_edge
         DW = (DRE - DLE)
@@ -119,12 +119,12 @@
                                 + dxs *(1 + self.grid_dimensions)
         self.grid_particle_count.flat[:] = f['/grid_particle_count'][:].astype("int32")
         grid_parent_id = f['/grid_parent_id'][:]
-        self.max_level = na.max(self.grid_levels)
+        self.max_level = np.max(self.grid_levels)
         
         args = izip(xrange(self.num_grids), self.grid_levels.flat,
                     grid_parent_id, LI,
                     self.grid_dimensions, self.grid_particle_count.flat)
-        self.grids = na.empty(len(args), dtype='object')
+        self.grids = np.empty(len(args), dtype='object')
         for gi, (j,lvl,p, le, d, n) in enumerate(args):
             self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
         


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import \


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,9 +38,9 @@
             address = '/data/grid_%010i/particles/%s/%s' % (grid.id, ptype, field)
             data.append(fh[address][:])
         if len(data) > 0:
-            data = na.concatenate(data)
+            data = np.concatenate(data)
         fh.close()
-        return na.array(data)
+        return np.array(data)
     def _read_field_names(self,grid): 
         adr = grid.Address
         fh = h5py.File(grid.filename,mode='r')


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -28,7 +28,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -71,7 +71,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -108,11 +108,11 @@
     def _parse_hierarchy(self):
         f = self._fhandle
         dxs = []
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         levels = (f['grid_level'][:]).copy()
         glis = (f['grid_left_index'][:]).copy()
         gdims = (f['grid_dimensions'][:]).copy()
-        active_dims = ~((na.max(gdims, axis=0) == 1) &
+        active_dims = ~((np.max(gdims, axis=0) == 1) &
                         (self.parameter_file.domain_dimensions == 1))
 
         for i in range(levels.shape[0]):
@@ -125,7 +125,7 @@
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
-        dx = na.array(dxs)
+        dx = np.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
@@ -147,7 +147,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
     defaultdict
@@ -110,7 +110,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -170,9 +170,9 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self.__global_header_lines[counter].split()
         counter += 1
@@ -181,9 +181,9 @@
         counter += 1 # unused line in Maestro BoxLib
         
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
 
         counter += self.n_levels # unused line in Maestro BoxLib
         
@@ -259,8 +259,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -304,17 +304,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
         self.grid_levels = self.grid_levels.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -325,9 +325,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -354,10 +354,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -367,7 +367,7 @@
                 fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -381,11 +381,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -494,9 +494,9 @@
                 t = parameterTypes[paramName](val)
                 exec("self.%s = %s" % (paramName,t))
 
-        self.domain_dimensions = na.array([_n_cellx,_n_celly,_n_cellz])
-        self.domain_left_edge = na.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
-        self.domain_right_edge = na.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
+        self.domain_dimensions = np.array([_n_cellx,_n_celly,_n_cellz])
+        self.domain_left_edge = np.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
+        self.domain_right_edge = np.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
         
         self.cosmological_simulation = self.current_redshift = \
             self.omega_matter = self.omega_lambda = self.hubble_constant = 0


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/maestro/io.py
--- a/yt/frontends/maestro/io.py
+++ b/yt/frontends/maestro/io.py
@@ -28,7 +28,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -72,8 +72,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -113,7 +113,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -35,7 +35,7 @@
 from string import strip, rstrip
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import AMRGridPatch
@@ -108,7 +108,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
@@ -172,20 +172,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #na.array(map(int, self._global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         counter += 1
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self._global_header_lines[counter:counter + self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -269,8 +269,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                              level, gfn, gfo, dims, start, stop,
@@ -290,7 +290,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
         self.field_list += nyx_particle_field_names[:]
         header = open(os.path.join(self.parameter_file.path, "DM", "Header"))
@@ -304,7 +304,7 @@
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel + 1):dummy = header.readline()
 
-        grid_info = na.fromiter((int(i) for line in header.readlines()
+        grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
                                 count=3*self.num_grids).reshape((self.num_grids, 3))
@@ -341,15 +341,15 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.path
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
             g.particle_filename = os.path.join(basedir, "DM",
@@ -361,9 +361,9 @@
         self.grid_particle_count[:, 0] = self.pgrid_info[:, 1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids, 1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]]
                                    for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:, 0].reshape((self.num_grids, 1))
@@ -378,9 +378,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]  # why the same thing twice?
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -398,7 +398,7 @@
             grid._setup_dx()
 
     def __setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -415,10 +415,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -444,11 +444,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids, 3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids, 3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids, 3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids, 1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids, 1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids, 3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids, 3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids, 3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids, 1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids, 1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -464,7 +464,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -607,9 +607,9 @@
                         self.parameters[param_name] = vals
 
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals])
+                self.domain_right_edge = np.array([float(i) for i in vals])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals])
+                self.domain_left_edge = np.array([float(i) for i in vals])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals[0])
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -27,7 +27,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.lib import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
                               nyx_particle_field_names.index(field),
                               len(nyx_particle_field_names), tr)
@@ -68,7 +68,7 @@
         offset2 = int(nElements*bytesPerReal*field_index)
 
         dtype = grid.hierarchy._dtype
-        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        field = np.empty(nElements, dtype=grid.hierarchy._dtype)
         read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
         field = field.reshape(grid.ActiveDimensions, order='F')
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -107,7 +107,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -139,7 +139,7 @@
         simply add it to the if/elif/else block.
 
         """
-        self.grid_particle_count = na.zeros(len(self.grids))
+        self.grid_particle_count = np.zeros(len(self.grids))
 
         for particle_filename in ["StarParticles", "SinkParticles"]:
             fn = os.path.join(self.pf.fullplotdir, particle_filename)
@@ -160,18 +160,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
@@ -211,20 +211,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int,self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #np.array(map(int,self.__global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
         #domain_re.search(self.__global_header_lines[counter]).groups()
         counter += 1
         self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
         counter += self.n_levels
         self.geometry = int(self.__global_header_lines[counter])
         if self.geometry != 0:
@@ -302,8 +302,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -347,17 +347,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = na.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grid_levels = np.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -368,9 +368,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -399,10 +399,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _count_grids(self):
@@ -413,11 +413,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -551,14 +551,14 @@
                 
             elif param.startswith("geometry.prob_hi"):
                 self.domain_right_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
                 self.domain_left_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
-        self.domain_dimensions = na.array(self.parameters["TopGridDimensions"],dtype='int32')
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"],dtype='int32')
         self.refine_by = self.parameters["RefineBy"]
 
         if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.utilities.physical_constants import \
     mh, kboltz
@@ -146,7 +146,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -76,7 +76,7 @@
                     if ( (grid.LeftEdge < coord).all() and 
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)
 
     def _read_data_set(self,grid,field):
         """
@@ -109,8 +109,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -150,7 +150,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 
@@ -79,7 +79,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -93,10 +93,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -116,7 +116,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.tree_proxy = pf.ramses_tree
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -153,12 +153,12 @@
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         level_info = self.tree_proxy.count_zones()
         num_ogrids = sum(level_info)
-        ogrid_left_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_right_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_levels = na.zeros((num_ogrids,1), dtype='int32')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        ogrid_hilbert_indices = na.zeros(num_ogrids, dtype='uint64')
-        ochild_masks = na.zeros((num_ogrids, 8), dtype='int32')
+        ogrid_left_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_right_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_levels = np.zeros((num_ogrids,1), dtype='int32')
+        ogrid_file_locations = np.zeros((num_ogrids,6), dtype='int64')
+        ogrid_hilbert_indices = np.zeros(num_ogrids, dtype='uint64')
+        ochild_masks = np.zeros((num_ogrids, 8), dtype='int32')
         self.tree_proxy.fill_hierarchy_arrays(
             self.pf.domain_dimensions,
             ogrid_left_edge, ogrid_right_edge,
@@ -180,7 +180,7 @@
             if level_info[level] == 0: continue
             # Get the indices of grids on this level
             ggi = (ogrid_levels == level).ravel()
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2 
+            dims = np.ones((ggi.sum(), 3), dtype='int64') * 2 
             mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
             nd = self.pf.domain_dimensions * 2**level
             fl = ogrid_file_locations[ggi,:]
@@ -189,7 +189,7 @@
             # We want grids that cover no more than MAX_EDGE cells in every direction
             psgs = []
             # left_index is integers of the index, with respect to this level
-            left_index = na.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
+            left_index = np.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
             # we've got octs, so it's +2
             pbar = get_pbar("Re-gridding ", left_index.shape[0])
             dlp = [None, None, None]
@@ -203,18 +203,18 @@
             #print level, hilbert_indices.min(), hilbert_indices.max()
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.debug("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
             for ddleft_index, ddfl in zip(lefts, locs):
-                for idomain in na.unique(ddfl[:,0]):
+                for idomain in np.unique(ddfl[:,0]):
                     dom_ind = ddfl[:,0] == idomain
                     dleft_index = ddleft_index[dom_ind,:]
                     dfl = ddfl[dom_ind,:]
-                    initial_left = na.min(dleft_index, axis=0)
-                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                     psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
                                     dleft_index, dfl)
                     if psg.efficiency <= 0: continue
@@ -226,12 +226,12 @@
             pbar.finish()
             self.proto_grids.append(psgs)
             print sum(len(psg.grid_file_locations) for psg in psgs)
-            sums = na.zeros(3, dtype='int64')
+            sums = np.zeros(3, dtype='int64')
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
             #for g in self.proto_grids[level]:
             #    sums += [s.sum() for s in g.sigs]
-            #assert(na.all(sums == dims.prod(axis=1).sum()))
+            #assert(np.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
     def _parse_hierarchy(self):
@@ -251,11 +251,11 @@
                 grids.append(self.grid(gi, self, level, fl, props[0,:]))
                 gi += 1
         self.proto_grids = []
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
 
     def _populate_grid_objects(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         print self.grid_levels.dtype
         for gi,g in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[gi,:],
@@ -346,10 +346,10 @@
         rheader = self.ramses_tree.get_file_info()
         self.parameters.update(rheader)
         self.current_time = self.parameters['time'] * self.parameters['unit_t']
-        self.domain_right_edge = na.ones(3, dtype='float64') \
+        self.domain_right_edge = np.ones(3, dtype='float64') \
                                            * rheader['boxlen']
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_dimensions = na.ones(3, dtype='int32') * 2
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_dimensions = np.ones(3, dtype='int32') * 2
         # This is likely not true, but I am not sure how to otherwise
         # distinguish them.
         mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,7 +24,7 @@
 """
 
 from collections import defaultdict
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,8 +38,8 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
 
     def _read_data_set(self, grid, field):
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float64')
+        filled = np.zeros(grid.ActiveDimensions, dtype='int32')
         to_fill = grid.ActiveDimensions.prod()
         grids = [grid]
         l_delta = 0


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -24,7 +24,7 @@
 """
 
 import weakref
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -71,7 +71,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -180,7 +180,7 @@
             self._reconstruct_parent_child()
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids = np.empty(self.num_grids, dtype='object')
         for i, grid in enumerate(self.grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -191,7 +191,7 @@
         mylog.debug("Prepared")
 
     def _reconstruct_parent_child(self):
-        mask = na.empty(len(self.grids), dtype='int32')
+        mask = np.empty(len(self.grids), dtype='int32')
         mylog.debug("First pass; identifying child grids")
         for i, grid in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[i,:],
@@ -199,7 +199,7 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = na.where(mask.astype("bool"))
+            ids = np.where(mask.astype("bool"))
             grid._children_ids = ids[0] # where is a tuple
         mylog.debug("Second pass; identifying parents")
         for i, grid in enumerate(self.grids): # Second pass
@@ -208,7 +208,7 @@
 
     def _initialize_grid_arrays(self):
         AMRHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def save_data(self, *args, **kwargs):
         pass
@@ -224,7 +224,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -325,23 +325,23 @@
     Examples
     --------
 
-    >>> arr = na.random.random((256, 256, 256))
+    >>> arr = np.random.random((256, 256, 256))
     >>> data = dict(Density = arr)
     >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
                 
     """
     sfh = StreamDictFieldHandler()
     sfh.update({0:data})
-    domain_dimensions = na.array(domain_dimensions)
-    if na.unique(domain_dimensions).size != 1:
+    domain_dimensions = np.array(domain_dimensions)
+    if np.unique(domain_dimensions).size != 1:
         print "We don't support variably sized domains yet."
         raise RuntimeError
-    domain_left_edge = na.zeros(3, 'float64')
-    domain_right_edge = na.ones(3, 'float64')
-    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
-    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
+    domain_left_edge = np.zeros(3, 'float64')
+    domain_right_edge = np.ones(3, 'float64')
+    grid_left_edges = np.zeros(3, "int64").reshape((1,3))
+    grid_right_edges = np.array(domain_dimensions, "int64").reshape((1,3))
 
-    grid_levels = na.array([0], dtype='int32').reshape((1,1))
+    grid_levels = np.array([0], dtype='int32').reshape((1,1))
     grid_dimensions = grid_right_edges - grid_left_edges
 
     grid_left_edges  = grid_left_edges.astype("float64")
@@ -359,9 +359,9 @@
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        na.array([-1], dtype='int64'),
-        number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
-        na.zeros(1).reshape((1,1)),
+        np.array([-1], dtype='int64'),
+        number_of_particles*np.ones(1, dtype='int64').reshape((1,1)),
+        np.zeros(1).reshape((1,1)),
         sfh,
     )
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -44,15 +44,15 @@
         self.RightEdge = right_edge
         self.Level = 0
         self.NumberOfParticles = 0
-        self.left_dims = na.array(left_dims, dtype='int32')
-        self.right_dims = na.array(right_dims, dtype='int32')
+        self.left_dims = np.array(left_dims, dtype='int32')
+        self.right_dims = np.array(right_dims, dtype='int32')
         self.ActiveDimensions = self.right_dims - self.left_dims
         self.Parent = None
         self.Children = []
 
     @property
     def child_mask(self):
-        return na.ones(self.ActiveDimensions, dtype='int32')
+        return np.ones(self.ActiveDimensions, dtype='int32')
 
     def __repr__(self):
         return "TigerGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -70,7 +70,7 @@
         # Tiger is unigrid
         self.ngdims = [i/j for i,j in
                 izip(self.pf.root_size, self.pf.max_grid_size)]
-        self.num_grids = na.prod(self.ngdims)
+        self.num_grids = np.prod(self.ngdims)
         self.max_level = 0
 
     def _setup_classes(self):
@@ -87,18 +87,18 @@
         DW = DRE - DLE
         gds = DW / self.ngdims
         rd = [self.pf.root_size[i]-self.pf.max_grid_size[i] for i in range(3)]
-        glx, gly, glz = na.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
+        glx, gly, glz = np.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
                                  DLE[1]:DRE[1]-gds[1]:self.ngdims[1]*1j,
                                  DLE[2]:DRE[2]-gds[2]:self.ngdims[2]*1j]
-        gdx, gdy, gdz = na.mgrid[0:rd[0]:self.ngdims[0]*1j,
+        gdx, gdy, gdz = np.mgrid[0:rd[0]:self.ngdims[0]*1j,
                                  0:rd[1]:self.ngdims[1]*1j,
                                  0:rd[2]:self.ngdims[2]*1j]
         LE, RE, levels, counts = [], [], [], []
         i = 0
         for glei, gldi in izip(izip(glx.flat, gly.flat, glz.flat),
                                izip(gdx.flat, gdy.flat, gdz.flat)):
-            gld = na.array(gldi)
-            gle = na.array(glei)
+            gld = np.array(gldi)
+            gle = np.array(glei)
             gre = gle + gds
             g = self.grid(i, self, gle, gre, gld, gld+self.pf.max_grid_size)
             grids.append(g)
@@ -108,13 +108,13 @@
             levels.append(g.Level)
             counts.append(g.NumberOfParticles)
             i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-        self.grid_dimensions[:] = na.array(dims, dtype='int64')
-        self.grid_left_edge[:] = na.array(LE, dtype='float64')
-        self.grid_right_edge[:] = na.array(RE, dtype='float64')
-        self.grid_levels.flat[:] = na.array(levels, dtype='int32')
-        self.grid_particle_count.flat[:] = na.array(counts, dtype='int32')
+        self.grid_dimensions[:] = np.array(dims, dtype='int64')
+        self.grid_left_edge[:] = np.array(LE, dtype='float64')
+        self.grid_right_edge[:] = np.array(RE, dtype='float64')
+        self.grid_levels.flat[:] = np.array(levels, dtype='int32')
+        self.grid_particle_count.flat[:] = np.array(counts, dtype='int32')
 
     def _populate_grid_objects(self):
         # We don't need to do anything here
@@ -186,8 +186,8 @@
         self.parameters['RefineBy'] = 2
 
     def _set_units(self):
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_right_edge = na.ones(3, dtype='float64')
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_right_edge = np.ones(3, dtype='float64')
         self.units = {}
         self.time_units = {}
         self.time_units['1'] = 1


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/frontends/tiger/io.py
--- a/yt/frontends/tiger/io.py
+++ b/yt/frontends/tiger/io.py
@@ -36,17 +36,17 @@
 
     def _read_data_set(self, grid, field):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64')
-        SS = na.array(grid.ActiveDimensions, dtype='int64')
-        RS = na.array(grid.pf.root_size, dtype='int64')
+        LD = np.array(grid.left_dims, dtype='int64')
+        SS = np.array(grid.ActiveDimensions, dtype='int64')
+        RS = np.array(grid.pf.root_size, dtype='int64')
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
         return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64').copy()
-        SS = na.array(grid.ActiveDimensions, dtype='int64').copy()
-        RS = na.array(grid.pf.root_size, dtype='int64').copy()
+        LD = np.array(grid.left_dims, dtype='int64').copy()
+        SS = np.array(grid.ActiveDimensions, dtype='int64').copy()
+        RS = np.array(grid.pf.root_size, dtype='int64').copy()
         LD[axis] += coord
         SS[axis] = 1
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/gui/opengl_widgets/mip_viewer.py
--- a/yt/gui/opengl_widgets/mip_viewer.py
+++ b/yt/gui/opengl_widgets/mip_viewer.py
@@ -31,7 +31,7 @@
 import OpenGL.GL.ARB.framebuffer_object as GL_fbo
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 from small_apps import ViewHandler3D, GenericGLUTScene
@@ -85,8 +85,8 @@
                     yield s[v][i]
 
     def _get_texture_vertices(self):
-        vs = [na.zeros(3, dtype='float32'),
-              na.ones(3, dtype='float32')]
+        vs = [np.zeros(3, dtype='float32'),
+              np.ones(3, dtype='float32')]
         #vs.reverse()
         for b in self.hv.bricks:
             shape = b.my_data[0].shape
@@ -126,7 +126,7 @@
 
         DW = self.hv.pf.domain_right_edge - self.hv.pf.domain_left_edge
         dds = ((brick.RightEdge - brick.LeftEdge) /
-               (na.array([ix,iy,iz], dtype='float32')-1)) / DW
+               (np.array([ix,iy,iz], dtype='float32')-1)) / DW
         BLE = brick.LeftEdge / DW - 0.5
         self._brick_textures.append(
             (id_field, (ix-1,iy-1,iz-1), dds, BLE))
@@ -135,7 +135,7 @@
 
     def _setup_colormap(self):
 
-        buffer = na.mgrid[0.0:1.0:256j]
+        buffer = np.mgrid[0.0:1.0:256j]
         colors = map_to_colors(buffer, "algae")
         
         GL.glActiveTexture(GL.GL_TEXTURE1)
@@ -165,17 +165,17 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(hv.bricks) * 6 * 4
-        self.v = na.fromiter(self._get_brick_vertices(offset),
+        self.v = np.fromiter(self._get_brick_vertices(offset),
                              dtype = 'float32', count = num * 3)
         self.vertices = vbo.VBO(self.v)
 
-        self.t = na.fromiter(self._get_texture_vertices(),
+        self.t = np.fromiter(self._get_texture_vertices(),
                              dtype = 'float32', count = num * 3)
         self.tvertices = vbo.VBO(self.t)
 
         self.ng = len(hv.bricks)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_bricks()
@@ -373,8 +373,8 @@
 
     def reset_view(self):   
         print "RESETTING"
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
     def translate(self, axis, value):


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/gui/opengl_widgets/small_apps.py
--- a/yt/gui/opengl_widgets/small_apps.py
+++ b/yt/gui/opengl_widgets/small_apps.py
@@ -30,7 +30,7 @@
 from OpenGL.arrays import vbo, ArrayDatatype
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 ESCAPE = '\033'
@@ -235,7 +235,7 @@
 
     @classmethod
     def from_image_file(cls, fn, tex_unit = GL.GL_TEXTURE0):
-        buffer = na.array(Image.open(fn))
+        buffer = np.array(Image.open(fn))
         print "Uploading buffer", buffer.min(), buffer.max(), buffer.shape, buffer.dtype
         obj = cls(tex_unit)
         obj.upload_image(buffer)
@@ -260,8 +260,8 @@
     @classmethod
     def from_image_files(cls, left_fn, right_fn, tex_unit = GL.GL_TEXTURE0):
         print "Uploading pairs from %s and %s" % (left_fn, right_fn)
-        left_buffer = na.array(Image.open(left_fn))
-        right_buffer = na.array(Image.open(right_fn))
+        left_buffer = np.array(Image.open(left_fn))
+        right_buffer = np.array(Image.open(right_fn))
         obj = cls(tex_unit)
         obj.left_image.upload_image(left_buffer)
         obj.right_image.upload_image(right_buffer)
@@ -294,7 +294,7 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
@@ -408,7 +408,7 @@
 
         GL.glActiveTexture(GL.GL_TEXTURE0)
         id_field = GL.glGenTextures(1)
-        upload = na.log10(grid["Density"].astype("float32")).copy()
+        upload = np.log10(grid["Density"].astype("float32")).copy()
         self.mi = min(upload.min(), self.mi)
         self.ma = max(upload.max(), self.ma)
         #upload = (255*(upload - -31.0) / (-25.0 - -31.0)).astype("uint8")
@@ -452,13 +452,13 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
         self.ng = len(pf.h.grids)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float')
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float')
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_grids()


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -29,7 +29,7 @@
 import logging, threading
 import sys
 import urllib, urllib2
-import numpy as na
+import numpy as np
 
 from yt.utilities.bottle import \
     server_names, debug, route, run, request, ServerAdapter, response
@@ -134,7 +134,7 @@
         bp['binary'] = []
         for bkey in bkeys:
             bdata = bp.pop(bkey) # Get the binary data
-            if isinstance(bdata, na.ndarray):
+            if isinstance(bdata, np.ndarray):
                 bdata = bdata.tostring()
             bpserver = BinaryDelivery(bdata, bkey)
             self.binary_payloads.append(bpserver)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -30,7 +30,7 @@
 import cStringIO
 import logging
 import uuid
-import numpy as na
+import numpy as np
 import time
 import urllib
 import urllib2


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import os
-import numpy as na
+import numpy as np
 import zipfile
 import sys
 
@@ -92,9 +92,9 @@
                                     dd*DW[0] / (64*256),
                                     dd*DW[0])
         if self.pf.field_info[self.field].take_log:
-            cmi = na.log10(cmi)
-            cma = na.log10(cma)
-            to_plot = apply_colormap(na.log10(frb[self.field]), color_bounds = (cmi, cma))
+            cmi = np.log10(cmi)
+            cma = np.log10(cma)
+            to_plot = apply_colormap(np.log10(frb[self.field]), color_bounds = (cmi, cma))
         else:
             to_plot = apply_colormap(frb[self.field], color_bounds = (cmi, cma))
         rv = write_png_to_string(to_plot)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/gui/reason/pyro_queue.py
--- a/yt/gui/reason/pyro_queue.py
+++ b/yt/gui/reason/pyro_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/gui/reason/widget_builders.py
--- a/yt/gui/reason/widget_builders.py
+++ b/yt/gui/reason/widget_builders.py
@@ -35,7 +35,7 @@
         self._tf = tf
 
         self.center = self.pf.domain_center
-        self.normal_vector = na.array([0.7,1.0,0.3])
+        self.normal_vector = np.array([0.7,1.0,0.3])
         self.north_vector = [0.,0.,1.]
         self.steady_north = True
         self.fields = ['Density']
@@ -54,7 +54,7 @@
             roi = self.pf.h.region(self.center, self.center-self.width, self.center+self.width)
             self.mi, self.ma = roi.quantities['Extrema'](self.fields[0])[0]
             if self.log_fields[0]:
-                self.mi, self.ma = na.log10(self.mi), na.log10(self.ma)
+                self.mi, self.ma = np.log10(self.mi), np.log10(self.ma)
 
         self._tf = ColorTransferFunction((self.mi-2, self.ma+2), nbins=nbins)
 
@@ -87,10 +87,10 @@
     dd = pf.h.all_data()
     if value is None or rel_val:
         if value is None: value = 0.5
-        mi, ma = na.log10(dd.quantities["Extrema"]("Density")[0])
+        mi, ma = np.log10(dd.quantities["Extrema"]("Density")[0])
         value = 10.0**(value*(ma - mi) + mi)
     vert = dd.extract_isocontours("Density", value)
-    na.multiply(vert, 100, vert)
+    np.multiply(vert, 100, vert)
     return vert
 
 def get_streamlines(pf):


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -70,7 +70,7 @@
         if onmax: 
             center = pf.h.find_max('Density')[1]
         else:
-            center = na.array(center)
+            center = np.array(center)
         axis = inv_axis_names[axis.lower()]
         coord = center[axis]
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
@@ -203,7 +203,7 @@
     def _pf_info(self):
         tr = {}
         for k, v in self.pf._mrep._attrs.items():
-            if isinstance(v, na.ndarray):
+            if isinstance(v, np.ndarray):
                 tr[k] = v.tolist()
             else:
                 tr[k] = v
@@ -237,9 +237,9 @@
     def deliver_isocontour(self, field, value, rel_val = False):
         ph = PayloadHandler()
         vert = get_isocontour(self.pf, field, value, rel_val)
-        normals = na.empty(vert.shape)
+        normals = np.empty(vert.shape)
         for i in xrange(vert.shape[0]/3):
-            n = na.cross(vert[i*3,:], vert[i*3+1,:])
+            n = np.cross(vert[i*3,:], vert[i*3+1,:])
             normals[i*3:i*3+3,:] = n[None,:]
         ph.widget_payload(self, {'ptype':'isocontour',
                                  'binary': ['vert', 'normals'],
@@ -260,20 +260,20 @@
         # Assume that path comes in as a list of matrice
         # Assume original vector is (0., 0., 1.), up is (0., 1., 0.)
         
-        views = [na.array(view).transpose() for view in views]
+        views = [np.array(view).transpose() for view in views]
 
-        times = na.linspace(0.0,1.0,len(times))
+        times = np.linspace(0.0,1.0,len(times))
                 
         # This is wrong.
-        reflect = na.array([[1,0,0],[0,1,0],[0,0,-1]])
+        reflect = np.array([[1,0,0],[0,1,0],[0,0,-1]])
 
-        rots = na.array([R[0:3,0:3] for R in views])
+        rots = np.array([R[0:3,0:3] for R in views])
 
-        rots = na.array([na.dot(reflect,rot) for rot in rots])
+        rots = np.array([np.dot(reflect,rot) for rot in rots])
 
-        centers = na.array([na.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
+        centers = np.array([np.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
 
-        ups = na.array([na.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
+        ups = np.array([np.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
 
         #print 'views'
         #for view in views: print view
@@ -284,12 +284,12 @@
         #print 'ups'
         #for up in ups: print up
 
-        pos = na.empty((N,3), dtype="float64")
-        uv = na.empty((N,3), dtype="float64")
-        f = na.zeros((N,3), dtype="float64")
+        pos = np.empty((N,3), dtype="float64")
+        uv = np.empty((N,3), dtype="float64")
+        f = np.zeros((N,3), dtype="float64")
         for i in range(3):
-            pos[:,i] = create_spline(times, centers[:,i], na.linspace(0.0,1.0,N))
-            uv[:,i] = create_spline(times, ups[:,i], na.linspace(0.0,1.0,N))
+            pos[:,i] = create_spline(times, centers[:,i], np.linspace(0.0,1.0,N))
+            uv[:,i] = create_spline(times, ups[:,i], np.linspace(0.0,1.0,N))
     
         path = [pos.tolist(), f.tolist(), uv.tolist()]
     


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -33,6 +33,7 @@
 # First module imports
 import sys, types, os, glob, cPickle, time
 import numpy as na # For historical reasons
+import numpy as np # For modern purposes
 import numpy # In case anyone wishes to use it by name
 
 # This next item will handle most of the actual startup procedures, but it will
@@ -52,7 +53,7 @@
 if __level >= int(ytcfgDefaults["loglevel"]):
     # This won't get displayed.
     mylog.debug("Turning off NumPy error reporting")
-    na.seterr(all = 'ignore')
+    np.seterr(all = 'ignore')
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -25,7 +25,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.visualization.image_writer import write_image, write_bitmap
@@ -61,7 +61,7 @@
 def _rchild_id(id): return (id<<1) + 2
 def _parent_id(id): return (id-1)>>1
 
-steps = na.array([[-1, -1, -1],
+steps = np.array([[-1, -1, -1],
                   [-1, -1,  0],
                   [-1, -1,  1],
                   [-1,  0, -1],
@@ -319,31 +319,31 @@
         if l_max is None:
             self.l_max = self.pf.hierarchy.max_level+1
         else:
-            self.l_max = na.min([l_max,self.pf.hierarchy.max_level+1])
+            self.l_max = np.min([l_max,self.pf.hierarchy.max_level+1])
 
         if le is None:
             self.domain_left_edge = pf.domain_left_edge
         else:
-            self.domain_left_edge = na.array(le)
+            self.domain_left_edge = np.array(le)
 
         if re is None:
             self.domain_right_edge = pf.domain_right_edge
         else:
-            self.domain_right_edge = na.array(re)
+            self.domain_right_edge = np.array(re)
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
 
         levels = pf.hierarchy.get_levels()
         root_grids = levels.next()
         covering_grids = root_grids
-        vol_needed = na.prod(self.domain_right_edge-self.domain_left_edge)
+        vol_needed = np.prod(self.domain_right_edge-self.domain_left_edge)
 
         for i in range(self.pf.hierarchy.max_level):
-            root_l_data = na.clip(na.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
-            root_r_data = na.clip(na.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_l_data = np.clip(np.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_r_data = np.clip(np.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
             
-            vol = na.prod(root_r_data-root_l_data,axis=1).sum()
+            vol = np.prod(root_r_data-root_l_data,axis=1).sum()
             if vol >= vol_needed:
                 covering_grids = root_grids
                 root_grids = levels.next()
@@ -356,18 +356,18 @@
         self.domain_left_edge = ((self.domain_left_edge)/rgdds).astype('int64')*rgdds
         self.domain_right_edge = (((self.domain_right_edge)/rgdds).astype('int64')+1)*rgdds
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
         
         self.my_l_corner = self.domain_left_edge
         self.my_r_corner = self.domain_right_edge
 
         #mylog.info('Making kd tree from le %s to %s'% (self.domain_left_edge, self.domain_right_edge))
         
-        root_l_data = na.array([grid.LeftEdge for grid in root_grids])
-        root_r_data = na.array([grid.RightEdge for grid in root_grids])
-        root_we_want = na.all(root_l_data < self.my_r_corner,axis=1)*\
-                       na.all(root_r_data > self.my_l_corner,axis=1)
+        root_l_data = np.array([grid.LeftEdge for grid in root_grids])
+        root_r_data = np.array([grid.RightEdge for grid in root_grids])
+        root_we_want = np.all(root_l_data < self.my_r_corner,axis=1)*\
+                       np.all(root_r_data > self.my_l_corner,axis=1)
         
         root_grids = root_grids[root_we_want]
 
@@ -550,7 +550,7 @@
         center cell (i,j,k) is ommitted.
         
         """
-        position = na.array(position)
+        position = np.array(position)
         grid = self.locate_brick(position).grid
         ci = ((position-grid.LeftEdge)/grid.dds).astype('int64')
         return self.locate_neighbors(grid,ci)
@@ -583,20 +583,20 @@
         center cell (i,j,k) is ommitted.
         
         """
-        ci = na.array(ci)
+        ci = np.array(ci)
         center_dds = grid.dds
-        position = grid.LeftEdge + (na.array(ci)+0.5)*grid.dds
-        grids = na.empty(26, dtype='object')
-        cis = na.empty([26,3], dtype='int64')
+        position = grid.LeftEdge + (np.array(ci)+0.5)*grid.dds
+        grids = np.empty(26, dtype='object')
+        cis = np.empty([26,3], dtype='int64')
         offs = 0.5*(center_dds + self.sdx)
 
         new_cis = ci + steps
-        in_grid = na.all((new_cis >=0)*
+        in_grid = np.all((new_cis >=0)*
                          (new_cis < grid.ActiveDimensions),axis=1)
         new_positions = position + steps*offs
         grids[in_grid] = grid
                 
-        get_them = na.argwhere(in_grid != True).ravel()
+        get_them = np.argwhere(in_grid != True).ravel()
         cis[in_grid] = new_cis[in_grid]
 
         if (in_grid != True).sum()>0:
@@ -668,7 +668,7 @@
                     dds = []
                     for i,field in enumerate(self.fields):
                         vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                        if self.log_fields[i]: vcd = na.log10(vcd)
+                        if self.log_fields[i]: vcd = np.log10(vcd)
                         dds.append(vcd)
                     current_saved_grids.append(current_node.grid)
                     current_vcds.append(dds)
@@ -677,7 +677,7 @@
                           current_node.li[1]:current_node.ri[1]+1,
                           current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
                 
-                if na.any(current_node.r_corner-current_node.l_corner == 0):
+                if np.any(current_node.r_corner-current_node.l_corner == 0):
                     current_node.brick = None
                 else:
                     current_node.brick = PartitionedGrid(current_node.grid.id, data,
@@ -686,8 +686,8 @@
                                                          current_node.dims.astype('int64'))
                 self.bricks.append(current_node.brick)
                 self.brick_dimensions.append(current_node.dims)
-        self.bricks = na.array(self.bricks)
-        self.brick_dimensions = na.array(self.brick_dimensions)
+        self.bricks = np.array(self.bricks)
+        self.brick_dimensions = np.array(self.brick_dimensions)
         del current_saved_grids, current_vcds
         self.bricks_loaded = True
 
@@ -701,7 +701,7 @@
             dds = []
             for i,field in enumerate(self.fields):
                 vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = na.log10(vcd)
+                if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(current_node.grid)
                 self.current_vcds.append(dds)
@@ -734,14 +734,14 @@
         dds = thisnode.grid.dds
         gle = thisnode.grid.LeftEdge
         gre = thisnode.grid.RightEdge
-        thisnode.li = na.rint((thisnode.l_corner-gle)/dds).astype('int32')
-        thisnode.ri = na.rint((thisnode.r_corner-gle)/dds).astype('int32')
+        thisnode.li = np.rint((thisnode.l_corner-gle)/dds).astype('int32')
+        thisnode.ri = np.rint((thisnode.r_corner-gle)/dds).astype('int32')
         thisnode.dims = (thisnode.ri - thisnode.li).astype('int32')
         # Here the cost is actually inversely proportional to 4**Level (empirical)
-        #thisnode.cost = (na.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
+        #thisnode.cost = (np.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
         thisnode.cost = 1.0
         # Here is the old way
-        # thisnode.cost = na.prod(thisnode.dims).astype('int64')
+        # thisnode.cost = np.prod(thisnode.dims).astype('int64')
 
     def initialize_leafs(self):
         for node in self.depth_traverse():
@@ -754,7 +754,7 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(self.comm.size))
+        par_tree_depth = long(np.log2(self.comm.size))
         for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
                 # There are self.comm.size nodes that meet this criteria
@@ -767,7 +767,7 @@
                 del node.grids
             except:
                 pass
-            if not na.isreal(node.grid):
+            if not np.isreal(node.grid):
                 node.grid = node.grid.id
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
@@ -942,7 +942,7 @@
         v = 0.0
         for node in self.depth_traverse():
             if node.grid is not None:
-                v += na.prod(node.r_corner - node.l_corner)
+                v += np.prod(node.r_corner - node.l_corner)
         return v
 
     def count_cells(self):
@@ -957,10 +957,10 @@
         Total volume of the tree.
         
         """
-        c = na.int64(0)
+        c = np.int64(0)
         for node in self.depth_traverse():
             if node.grid is not None:
-                c += na.prod(node.ri - node.li).astype('int64')
+                c += np.prod(node.ri - node.li).astype('int64')
         return c
 
     def _build(self, grids, parent, l_corner, r_corner):
@@ -994,12 +994,12 @@
         current_node.r_corner = r_corner
         # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(self.comm.size))
+        par_tree_depth = int(np.log2(self.comm.size))
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
         pbar = get_pbar("Building kd-Tree",
-                na.prod(self.domain_right_edge-self.domain_left_edge))
+                np.prod(self.domain_right_edge-self.domain_left_edge))
 
         while current_node is not None:
             pbar.update(volume_partitioned)
@@ -1034,12 +1034,12 @@
                     if len(thisgrid.Children) > 0 and thisgrid.Level < self.l_max:
                         # Get the children that are actually in the current volume
                         children = [child.id - self._id_offset for child in thisgrid.Children  
-                                    if na.all(child.LeftEdge < current_node.r_corner) & 
-                                    na.all(child.RightEdge > current_node.l_corner)]
+                                    if np.all(child.LeftEdge < current_node.r_corner) & 
+                                    np.all(child.RightEdge > current_node.l_corner)]
 
                         # If we have children, get all the new grids, and keep building the tree
                         if len(children) > 0:
-                            current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
+                            current_node.grids = self.pf.hierarchy.grids[np.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
                             #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
@@ -1048,7 +1048,7 @@
                     # Else make a leaf node (brick container)
                     #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    volume_partitioned += na.prod(current_node.r_corner-current_node.l_corner)
+                    volume_partitioned += np.prod(current_node.r_corner-current_node.l_corner)
                     # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
@@ -1078,7 +1078,7 @@
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1089,7 +1089,7 @@
         left and right children.
         '''
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
 
@@ -1106,8 +1106,8 @@
         current_node.split_pos = split
         #less_ids0 = (data[:,0] < split)
         #greater_ids0 = (split < data[:,1])
-        #assert(na.all(less_ids0 == less_ids))
-        #assert(na.all(greater_ids0 == greater_ids))
+        #assert(np.all(less_ids0 == less_ids))
+        #assert(np.all(greater_ids0 == greater_ids))
 
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
@@ -1143,7 +1143,7 @@
             Position of the back center from which to start moving forward.
         front_center: array_like
             Position of the front center to which the traversal progresses.
-        image: na.array
+        image: np.array
             Image plane to contain resulting ray cast.
 
         Returns
@@ -1176,12 +1176,12 @@
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(self.comm.size))
+        rounds = int(np.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+self.comm.rank)
+        path = np.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1215,7 +1215,7 @@
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = 1.0 - na.sum(self.image,axis=2)
+                    ta = 1.0 - np.sum(self.image,axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1237,8 +1237,8 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    #ta = na.exp(-na.sum(arr2,axis=2))
-                    ta = 1.0 - na.sum(arr2, axis=2)
+                    #ta = np.exp(-np.sum(arr2,axis=2))
+                    ta = 1.0 - np.sum(arr2, axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1292,8 +1292,8 @@
                     self.bricks.append(node.brick)
                     self.brick_dimensions.append(node.dims)
 
-            self.bricks = na.array(self.bricks)
-            self.brick_dimensions = na.array(self.brick_dimensions)
+            self.bricks = np.array(self.bricks)
+            self.brick_dimensions = np.array(self.brick_dimensions)
 
             self.bricks_loaded=True
             f.close()
@@ -1333,12 +1333,12 @@
         raise NotImplementedError()
         f = h5py.File(fn,"w")
         Nkd = len(self.tree)
-        kd_l_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_r_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_grids = na.zeros( (Nkd) )
-        kd_split_axs = na.zeros( (Nkd), dtype='int32')
-        kd_split_pos = na.zeros( (Nkd), dtype='float64')
-        kd_owners = na.zeros( (Nkd), dtype='int32')
+        kd_l_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_r_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_grids = np.zeros( (Nkd) )
+        kd_split_axs = np.zeros( (Nkd), dtype='int32')
+        kd_split_pos = np.zeros( (Nkd), dtype='float64')
+        kd_owners = np.zeros( (Nkd), dtype='int32')
         f.create_group("/bricks")
         for i, tree_item in enumerate(self.tree.iteritems()):
             kdid = tree_item[0]
@@ -1369,17 +1369,17 @@
         f.close()
         
     def corners_to_line(self,lc, rc):
-        x = na.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
+        x = np.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
                        rc[0], rc[0], rc[0], rc[0], rc[0],
                        rc[0], lc[0], lc[0], rc[0],
                        rc[0], lc[0], lc[0] ])
         
-        y = na.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
+        y = np.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1],
                        rc[1], rc[1], lc[1] ])
         
-        z = na.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
+        z = np.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
                        lc[2], rc[2], rc[2], lc[2], lc[2],
                        rc[2], rc[2], rc[2], rc[2],
                        lc[2], lc[2], lc[2] ])


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ b/yt/utilities/answer_testing/hydro_tests.py
@@ -99,11 +99,11 @@
     field = None
 
     def run(self):
-        na.random.seed(4333)
-        start_point = na.random.random(self.pf.dimensionality) * \
+        np.random.seed(4333)
+        start_point = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
-        end_point   = na.random.random(self.pf.dimensionality) * \
+        end_point   = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -55,10 +55,10 @@
 
 class ArrayDelta(ValueDelta):
     def __repr__(self):
-        nabove = len(na.where(self.delta > self.acceptable)[0])
+        nabove = len(np.where(self.delta > self.acceptable)[0])
         return "ArrayDelta: Delta max of %s, acceptable of %s.\n" \
                "%d of %d points above the acceptable limit" % \
-               (na.nanmax(self.delta), self.acceptable, nabove,
+               (np.nanmax(self.delta), self.acceptable, nabove,
                 self.delta.size)
 
 class ShapeMismatch(RegressionTestException):
@@ -122,8 +122,8 @@
         """
         if a1.shape != a2.shape:
             raise ShapeMismatch(a1, a2)
-        delta = na.abs(a1 - a2).astype("float64")/(a1 + a2)
-        if na.nanmax(delta) > acceptable:
+        delta = np.abs(a1 - a2).astype("float64")/(a1 + a2)
+        if np.nanmax(delta) > acceptable:
             raise ArrayDelta(delta, acceptable)
         return True
 
@@ -134,7 +134,7 @@
         difference is greater than `acceptable` it is considered a failure and
         an appropriate exception is raised.
         """
-        delta = na.abs(v1 - v2)/(v1 + v2)
+        delta = np.abs(v1 - v2)/(v1 + v2)
         if delta > acceptable:
             raise ValueDelta(delta, acceptable)
         return True


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ b/yt/utilities/answer_testing/particle_tests.py
@@ -32,13 +32,13 @@
         # Tests to make sure there are no particle positions aren't changing
         # drastically. This is very unlikely to be a problem.
         all = self.pf.h.all_data()
-        min = na.empty(3,dtype='float64')
+        min = np.empty(3,dtype='float64')
         max = min.copy()
         dims = ["particle_position_x","particle_position_y",
             "particle_position_z"]
         for i in xrange(3):
-            min[i] = na.min(all[dims[i]])
-            max[i] = na.max(all[dims[i]])
+            min[i] = np.min(all[dims[i]])
+            max[i] = np.max(all[dims[i]])
         self.result = (min,max)
     
     def compare(self, old_result):


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1212,7 +1212,7 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
         if args.axis == 4:
             axes = range(3)
         else:
@@ -1266,12 +1266,12 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
 
         L = args.viewpoint
         if L is None:
             L = [1.]*3
-        L = na.array(args.viewpoint)
+        L = np.array(args.viewpoint)
 
         unit = args.unit
         if unit is None:
@@ -1302,7 +1302,7 @@
             roi = pf.h.region(center, center-width, center+width)
             mi, ma = roi.quantities['Extrema'](field)[0]
             if log:
-                mi, ma = na.log10(mi), na.log10(ma)
+                mi, ma = np.log10(mi), np.log10(ma)
         else:
             mi, ma = myrange[0], myrange[1]
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 c_kms = 2.99792458e5 # c in km/s
 G = 6.67259e-8 # cgs
@@ -49,40 +49,40 @@
 
     def ComovingTransverseDistance(self,z_i,z_f):
          if (self.OmegaCurvatureNow > 0):
-             return (self.HubbleDistance() / na.sqrt(self.OmegaCurvatureNow) * 
-                     na.sinh(na.sqrt(self.OmegaCurvatureNow) * 
+             return (self.HubbleDistance() / np.sqrt(self.OmegaCurvatureNow) * 
+                     np.sinh(np.sqrt(self.OmegaCurvatureNow) * 
                           self.ComovingRadialDistance(z_i,z_f) / 
                           self.HubbleDistance()))
          elif (self.OmegaCurvatureNow < 0):
-             return (self.HubbleDistance() / na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
-                     sin(na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
+             return (self.HubbleDistance() / np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
+                     sin(np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
                          self.ComovingRadialDistance(z_i,z_f) / self.HubbleDistance()))
          else:
              return self.ComovingRadialDistance(z_i,z_f)
 
     def ComovingVolume(self,z_i,z_f):
         if (self.OmegaCurvatureNow > 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      ana.sinh(na.fabs(self.OmegaCurvatureNow) * 
+                      anp.sinh(np.fabs(self.OmegaCurvatureNow) * 
                             self.ComovingTransverseDistance(z_i,z_f) / 
-                            self.HubbleDistance()) / na.sqrt(self.OmegaCurvatureNow)) / 1e9)
+                            self.HubbleDistance()) / np.sqrt(self.OmegaCurvatureNow)) / 1e9)
         elif (self.OmegaCurvatureNow < 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / 
-                     na.fabs(self.OmegaCurvatureNow) * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / 
+                     np.fabs(self.OmegaCurvatureNow) * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      asin(na.fabs(self.OmegaCurvatureNow) * 
+                      asin(np.fabs(self.OmegaCurvatureNow) * 
                            self.ComovingTransverseDistance(z_i,z_f) / 
                            self.HubbleDistance()) / 
-                      na.sqrt(na.fabs(self.OmegaCurvatureNow))) / 1e9)
+                      np.sqrt(np.fabs(self.OmegaCurvatureNow))) / 1e9)
         else:
-             return (4 * na.pi * na.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
+             return (4 * np.pi * np.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
                      3 / 1e9)
 
     def AngularDiameterDistance(self,z_i,z_f):
@@ -100,18 +100,18 @@
         return (romberg(self.AgeIntegrand,z,1000) / self.HubbleConstantNow * kmPerMpc)
 
     def AngularScale_1arcsec_kpc(self,z_i,z_f):
-        return (self.AngularDiameterDistance(z_i,z_f) / 648. * na.pi)
+        return (self.AngularDiameterDistance(z_i,z_f) / 648. * np.pi)
 
     def CriticalDensity(self,z):
-        return (3.0 / 8.0 / na.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
+        return (3.0 / 8.0 / np.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
                 (self.OmegaLambdaNow + ((1 + z)**3.0) * self.OmegaMatterNow))
 
     def AgeIntegrand(self,z):
         return (1 / (z + 1) / self.ExpansionFactor(z))
 
     def ExpansionFactor(self,z):
-        return na.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
-                    self.OmegaCurvatureNow * na.sqrt(1 + z) + 
+        return np.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
+                    self.OmegaCurvatureNow * np.sqrt(1 + z) + 
                     self.OmegaLambdaNow)
 
     def InverseExpansionFactor(self,z):
@@ -162,8 +162,8 @@
         """
         # Changed 2.52e17 to 2.52e19 because H_0 is in km/s/Mpc, 
         # instead of 100 km/s/Mpc.
-        return 2.52e19 / na.sqrt(self.OmegaMatterNow) / \
-            self.HubbleConstantNow / na.power(1 + self.InitialRedshift,1.5)
+        return 2.52e19 / np.sqrt(self.OmegaMatterNow) / \
+            self.HubbleConstantNow / np.power(1 + self.InitialRedshift,1.5)
 
     def ComputeRedshiftFromTime(self,time):
         """
@@ -183,18 +183,18 @@
  
         # 1) For a flat universe with OmegaMatterNow = 1, it's easy.
  
-        if ((na.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
+        if ((np.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            a = na.power(time/self.InitialTime,2.0/3.0)
+            a = np.power(time/self.InitialTime,2.0/3.0)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
         #    Actually, this is a little tricky since we must solve an equation
-        #    of the form eta - na.sinh(eta) + x = 0..
+        #    of the form eta - np.sinh(eta) + x = 0..
  
         if ((self.OmegaMatterNow < 1) and 
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            x = 2*TimeHubble0*na.power(1.0 - self.OmegaMatterNow, 1.5) / \
+            x = 2*TimeHubble0*np.power(1.0 - self.OmegaMatterNow, 1.5) / \
                 self.OmegaMatterNow;
  
             # Compute eta in a three step process, first from a third-order
@@ -203,12 +203,12 @@
             # eta.  This works well because parts 1 & 2 are an excellent approximation
             # when x is small and part 3 converges quickly when x is large. 
  
-            eta = na.power(6*x,1.0/3.0)                # part 1
-            eta = na.power(120*x/(20+eta*eta),1.0/3.0) # part 2
+            eta = np.power(6*x,1.0/3.0)                # part 1
+            eta = np.power(120*x/(20+eta*eta),1.0/3.0) # part 2
             for i in range(40):                      # part 3
                 eta_old = eta
-                eta = na.arcsinh(eta + x)
-                if (na.fabs(eta-eta_old) < ETA_TOLERANCE): 
+                eta = np.arcsinh(eta + x)
+                if (np.fabs(eta-eta_old) < ETA_TOLERANCE): 
                     break
                 if (i == 39):
                     print "No convergence after %d iterations." % i
@@ -216,7 +216,7 @@
             # Now use eta to compute the expansion factor (eq. 13-10, part 2).
  
             a = self.OmegaMatterNow/(2.0*(1.0 - self.OmegaMatterNow))*\
-                (na.cosh(eta) - 1.0)
+                (np.cosh(eta) - 1.0)
 
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
         #    Easy, but skip it for now.
@@ -228,10 +228,10 @@
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
+        if ((np.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow > OMEGA_TOLERANCE)):
-            a = na.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
-                na.power(na.sinh(1.5 * na.sqrt(1.0 - self.OmegaMatterNow)*\
+            a = np.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
+                np.power(np.sinh(1.5 * np.sqrt(1.0 - self.OmegaMatterNow)*\
                                      TimeHubble0),2.0/3.0)
 
 
@@ -249,29 +249,29 @@
         # 1) For a flat universe with OmegaMatterNow = 1, things are easy.
  
         if ((self.OmegaMatterNow == 1.0) and (self.OmegaLambdaNow == 0.0)):
-            TimeHubble0 = 2.0/3.0/na.power(1+z,1.5)
+            TimeHubble0 = 2.0/3.0/np.power(1+z,1.5)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
  
         if ((self.OmegaMatterNow < 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (na.sinh(eta) - eta)
+            eta = np.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (np.sinh(eta) - eta)
  
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
  
         if ((self.OmegaMatterNow > 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (eta - na.sin(eta))
+            eta = np.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (eta - np.sin(eta))
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
-            TimeHubble0 = 2.0/3.0/na.sqrt(1-self.OmegaMatterNow)*\
-                na.arcsinh(na.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
-                               na.power(1+z,1.5))
+        if ((np.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
+            TimeHubble0 = 2.0/3.0/np.sqrt(1-self.OmegaMatterNow)*\
+                np.arcsinh(np.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
+                               np.power(1+z,1.5))
   
         # Now convert from Time * H0 to time.
   


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -1,6 +1,6 @@
 import os
 import weakref
-import numpy as na
+import numpy as np
 import h5py as h5
 from conversion_abc import *
 from glob import glob
@@ -55,11 +55,11 @@
             grid['domain'] = int(splitup[8].rstrip(','))
             self.current_time = grid['time']
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -94,12 +94,12 @@
         proc_names = glob(self.source_dir+'id*')
         #print 'Reading a dataset from %i Processor Files' % len(proc_names)
         N = len(proc_names)
-        grid_dims = na.empty([N,3],dtype='int64')
-        grid_left_edges = na.empty([N,3],dtype='float64')
-        grid_dds = na.empty([N,3],dtype='float64')
-        grid_levels = na.zeros(N,dtype='int64')
-        grid_parent_ids = -1*na.ones(N,dtype='int64')
-        grid_particle_counts = na.zeros([N,1],dtype='int64')
+        grid_dims = np.empty([N,3],dtype='int64')
+        grid_left_edges = np.empty([N,3],dtype='float64')
+        grid_dds = np.empty([N,3],dtype='float64')
+        grid_levels = np.zeros(N,dtype='int64')
+        grid_parent_ids = -1*np.ones(N,dtype='int64')
+        grid_particle_counts = np.zeros([N,1],dtype='int64')
 
         for i in range(N):
             if i == 0:
@@ -128,12 +128,12 @@
 
             if len(line) == 0: break
             
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
                 grid['dimensions'][grid['dimensions']==0]=1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             # Append all hierachy info before reading this grid's data
@@ -149,7 +149,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -159,8 +159,8 @@
 
         gles = grid_left_edges
         gdims = grid_dims
-        dle = na.min(gles,axis=0)
-        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        dle = np.min(gles,axis=0)
+        dre = np.max(gles+grid_dims*grid_dds,axis=0)
         glis = ((gles - dle)/grid_dds).astype('int64')
         gris = glis + gdims
 
@@ -183,17 +183,17 @@
 
         ## --------- Done with top level nodes --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = ddims
         pars_g.attrs['current_time'] = self.current_time
         pars_g.attrs['domain_left_edge'] = dle
         pars_g.attrs['domain_right_edge'] = dre
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(1)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(1)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         # pars_g.attrs['n_cells'] = grid['ncells']
@@ -224,18 +224,18 @@
                 splitup = line.strip().split()
 
                 if "DIMENSIONS" in splitup:
-                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    grid_dims = np.array(splitup[-3:]).astype('int')
                     line = f.readline()
                     continue
                 elif "CELL_DATA" in splitup:
                     grid_ncells = int(splitup[-1])
                     line = f.readline()
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         grid_dims -= 1
                         grid_dims[grid_dims==0]=1
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         print 'product of dimensions %i not equal to number of cells %i' % \
-                              (na.prod(grid_dims), grid_ncells)
+                              (np.prod(grid_dims), grid_ncells)
                         raise TypeError
                     break
                 else:
@@ -250,7 +250,7 @@
                     if not read_table:
                         line = f.readline() # Read the lookup table line
                         read_table = True
-                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
                     if i == 0:
                         self.fields.append(field)
                     # print 'writing field %s' % field
@@ -259,7 +259,7 @@
 
                 elif 'VECTORS' in splitup:
                     field = splitup[1]
-                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
                     data_x = data[0::3].reshape(grid_dims,order='F')
                     data_y = data[1::3].reshape(grid_dims,order='F')
                     data_z = data[2::3].reshape(grid_dims,order='F')
@@ -291,7 +291,7 @@
             if name in self.field_conversions.keys():
                 this_field.attrs['field_to_cgs'] = self.field_conversions[name]
             else:
-                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+                this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
             
 
     def convert(self, hierarchy=True, data=True):
@@ -327,11 +327,11 @@
         elif "Really" in splitup:
             grid['time'] = splitup[-1]
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -365,19 +365,19 @@
             #    print line
 
             if len(line) == 0: break
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             if grid['read_type'] is 'scalar':
                 grid[grid['read_field']] = \
-                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                    np.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
                 self.fields.append(grid['read_field'])
             elif grid['read_type'] is 'vector':
-                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                data = np.fromfile(f, dtype='>f4', count=3*grid['ncells'])
                 grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
@@ -398,7 +398,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -406,8 +406,8 @@
         pars_g = f.create_group('simulation_parameters')
 
         dle = grid['left_edge'] # True only in this case of one grid for the domain
-        gles = na.array([grid['left_edge']])
-        gdims = na.array([grid['dimensions']])
+        gles = np.array([grid['left_edge']])
+        gdims = np.array([grid['dimensions']])
         glis = ((gles - dle)/grid['dds']).astype('int64')
         gris = glis + gdims
 
@@ -416,18 +416,18 @@
         # grid_dimensions
         gdim = f.create_dataset('grid_dimensions',data=gdims)
 
-        levels = na.array([0]).astype('int64') # unigrid example
+        levels = np.array([0]).astype('int64') # unigrid example
         # grid_level
         level = f.create_dataset('grid_level',data=levels)
 
         ## ----------QUESTIONABLE NEXT LINE--------- ##
         # This data needs two dimensions for now. 
-        n_particles = na.array([[0]]).astype('int64')
+        n_particles = np.array([[0]]).astype('int64')
         #grid_particle_count
         part_count = f.create_dataset('grid_particle_count',data=n_particles)
 
         # Assume -1 means no parent.
-        parent_ids = na.array([-1]).astype('int64')
+        parent_ids = np.array([-1]).astype('int64')
         # grid_parent_id
         pids = f.create_dataset('grid_parent_id',data=parent_ids)
 
@@ -451,8 +451,8 @@
 
         ## --------- Attribute Tables --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = grid['dimensions']
         try:
             pars_g.attrs['current_time'] = grid['time']
@@ -461,10 +461,10 @@
         pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
         pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(0)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(0)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         pars_g.attrs['n_cells'] = grid['ncells']
@@ -481,7 +481,7 @@
         if name in self.field_conversions.keys():
             this_field.attrs['field_to_cgs'] = self.field_conversions[name]
         else:
-            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
 
         # Add particle types
         # Nothing to do here


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.lib as lib
@@ -35,23 +35,23 @@
         self.truncate = truncate
         x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
         return my_vals.reshape(orig_shape)
 
@@ -61,28 +61,28 @@
         self.truncate = truncate
         x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
         y_vals = data_object[self.y_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        y_i = (na.digitize(y_vals, self.y_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        y_i = (np.digitize(y_vals, self.y_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.BilinearlyInterpolate(self.table,
                                  x_vals, y_vals, self.x_bins, self.y_bins,
                                  x_i, y_i, my_vals)
@@ -94,9 +94,9 @@
         self.truncate = truncate
         x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = na.linspace(z0, z1, table.shape[2]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -104,23 +104,23 @@
         y_vals = data_object[self.y_name].ravel().astype('float64')
         z_vals = data_object[self.z_name].ravel().astype('float64')
 
-        x_i = na.digitize(x_vals, self.x_bins) - 1
-        y_i = na.digitize(y_vals, self.y_bins) - 1
-        z_i = na.digitize(z_vals, self.z_bins) - 1
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
-            or na.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
+        x_i = np.digitize(x_vals, self.x_bins) - 1
+        y_i = np.digitize(y_vals, self.y_bins) - 1
+        z_i = np.digitize(z_vals, self.z_bins) - 1
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
+            or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
-                z_i = na.minimum(na.maximum(z_i,0), len(self.z_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
+                z_i = np.minimum(np.maximum(z_i,0), len(self.z_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.TrilinearlyInterpolate(self.table,
                                  x_vals, y_vals, z_vals,
                                  self.x_bins, self.y_bins, self.z_bins,
@@ -135,11 +135,11 @@
         xm = (self.x_bins[x_i+1] - x_vals) / (self.x_bins[x_i+1] - self.x_bins[x_i])
         ym = (self.y_bins[y_i+1] - y_vals) / (self.y_bins[y_i+1] - self.y_bins[y_i])
         zm = (self.z_bins[z_i+1] - z_vals) / (self.z_bins[z_i+1] - self.z_bins[z_i])
-        if na.any(na.isnan(self.table)):
+        if np.any(np.isnan(self.table)):
             raise ValueError
-        if na.any(na.isnan(x) | na.isnan(y) | na.isnan(z)):
+        if np.any(np.isnan(x) | np.isnan(y) | np.isnan(z)):
             raise ValueError
-        if na.any(na.isnan(xm) | na.isnan(ym) | na.isnan(zm)):
+        if np.any(np.isnan(xm) | np.isnan(ym) | np.isnan(zm)):
             raise ValueError
         my_vals  = self.table[x_i  ,y_i  ,z_i  ] * (xm*ym*zm)
         my_vals += self.table[x_i+1,y_i  ,z_i  ] * (x *ym*zm)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math
 
 def periodic_dist(a, b, period):
@@ -48,20 +48,20 @@
 
     Examples
     --------
-    >>> a = na.array([0.1, 0.1, 0.1])
-    >>> b = na.array([0.9, 0,9, 0.9])
+    >>> a = np.array([0.1, 0.1, 0.1])
+    >>> b = np.array([0.9, 0,9, 0.9])
     >>> period = 1.
     >>> dist = periodic_dist(a, b, 1.)
     >>> dist
     0.3464102
     """
-    a = na.array(a)
-    b = na.array(b)
+    a = np.array(a)
+    b = np.array(b)
     if a.size != b.size: RunTimeError("Arrays must be the same shape.")
-    c = na.empty((2, a.size), dtype="float64")
+    c = np.empty((2, a.size), dtype="float64")
     c[0,:] = abs(a - b)
     c[1,:] = period - abs(a - b)
-    d = na.amin(c, axis=0)**2
+    d = np.amin(c, axis=0)**2
     return math.sqrt(d.sum())
 
 def rotate_vector_3D(a, dim, angle):
@@ -87,8 +87,8 @@
     
     Examples
     --------
-    >>> a = na.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
-    >>> b = rotate_vector_3D(a, 2, na.pi/2)
+    >>> a = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
+    >>> b = rotate_vector_3D(a, 2, np.pi/2)
     >>> print b
     [[  1.00000000e+00  -1.00000000e+00   0.00000000e+00]
     [  6.12323400e-17  -1.00000000e+00   1.00000000e+00]
@@ -100,27 +100,27 @@
     mod = False
     if len(a.shape) == 1:
         mod = True
-        a = na.array([a])
+        a = np.array([a])
     if a.shape[1] !=3:
         raise SyntaxError("The second dimension of the array a must be == 3!")
     if dim == 0:
-        R = na.array([[1, 0,0],
-            [0, na.cos(angle), na.sin(angle)],
-            [0, -na.sin(angle), na.cos(angle)]])
+        R = np.array([[1, 0,0],
+            [0, np.cos(angle), np.sin(angle)],
+            [0, -np.sin(angle), np.cos(angle)]])
     elif dim == 1:
-        R = na.array([[na.cos(angle), 0, -na.sin(angle)],
+        R = np.array([[np.cos(angle), 0, -np.sin(angle)],
             [0, 1, 0],
-            [na.sin(angle), 0, na.cos(angle)]])
+            [np.sin(angle), 0, np.cos(angle)]])
     elif dim == 2:
-        R = na.array([[na.cos(angle), na.sin(angle), 0],
-            [-na.sin(angle), na.cos(angle), 0],
+        R = np.array([[np.cos(angle), np.sin(angle), 0],
+            [-np.sin(angle), np.cos(angle), 0],
             [0, 0, 1]])
     else:
         raise SyntaxError("dim must be 0, 1, or 2!")
     if mod:
-        return na.dot(R, a.T).T[0]
+        return np.dot(R, a.T).T[0]
     else:
-        return na.dot(R, a.T).T
+        return np.dot(R, a.T).T
     
 
 def modify_reference_frame(CoM, L, P, V):
@@ -164,9 +164,9 @@
     
     Examples
     --------
-    >>> CoM = na.array([0.5, 0.5, 0.5])
-    >>> L = na.array([1, 0, 0])
-    >>> P = na.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
+    >>> CoM = np.array([0.5, 0.5, 0.5])
+    >>> L = np.array([1, 0, 0])
+    >>> P = np.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
     >>> V = p.copy()
     >>> LL, PP, VV = modify_reference_frame(CoM, L, P, V)
     >>> LL
@@ -183,7 +183,7 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
-    if (L == na.array([0, 0, 1.])).all():
+    if (L == np.array([0, 0, 1.])).all():
         # Whew! Nothing to do!
         return L, P, V
     # First translate the positions to center of mass reference frame.
@@ -191,7 +191,7 @@
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
     LL[2] = 0.
-    theta = na.arccos(na.inner(LL, [1.,0,0])/na.inner(LL,LL)**.5)
+    theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
     if L[1] < 0:
         theta = -theta
     # Now rotate all the position, velocity, and L vectors by this much around
@@ -200,7 +200,7 @@
     V = rotate_vector_3D(V, 2, theta)
     L = rotate_vector_3D(L, 2, theta)
     # Now find the angle between L and the z-axis.
-    theta = na.arccos(na.inner(L, [0,0,1])/na.inner(L,L)**.5)
+    theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
     # This time we rotate around the y axis.
     P = rotate_vector_3D(P, 1, theta)
     V = rotate_vector_3D(V, 1, theta)
@@ -241,10 +241,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> circV = compute_rotational_velocity(CoM, L, P, V)
     >>> circV
     array([ 1.        ,  0.        ,  0.        ,  1.41421356])
@@ -254,13 +254,13 @@
     L, P, V = modify_reference_frame(CoM, L, P, V)
     # Find the vector in the plane of the galaxy for each position point
     # that is perpendicular to the radial vector.
-    radperp = na.cross([0, 0, 1], P)
+    radperp = np.cross([0, 0, 1], P)
     # Find the component of the velocity along the radperp vector.
     # Unf., I don't think there's a better way to do this.
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rp in enumerate(radperp):
-        temp = na.dot(rp, V[i]) / na.dot(rp, rp) * rp
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rp, V[i]) / np.dot(rp, rp) * rp
+        res[i] = np.dot(temp, temp)**0.5
     return res
     
 def compute_parallel_velocity(CoM, L, P, V):
@@ -296,10 +296,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> paraV = compute_parallel_velocity(CoM, L, P, V)
     >>> paraV
     array([10, -1,  1, -1])
@@ -342,10 +342,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> radV = compute_radial_velocity(CoM, L, P, V)
     >>> radV
     array([ 1.        ,  1.41421356 ,  0.        ,  0.])
@@ -357,10 +357,10 @@
     # with the cylindrical radial vector for this point.
     # Unf., I don't think there's a better way to do this.
     P[:,2] = 0
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rad in enumerate(P):
-        temp = na.dot(rad, V[i]) / na.dot(rad, rad) * rad
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rad, V[i]) / np.dot(rad, rad) * rad
+        res[i] = np.dot(temp, temp)**0.5
     return res
 
 def compute_cylindrical_radius(CoM, L, P, V):
@@ -396,10 +396,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> cyl_r = compute_cylindrical_radius(CoM, L, P, V)
     >>> cyl_r
     array([ 1.        ,  1.41421356,  0.        ,  1.41421356])
@@ -409,7 +409,7 @@
     # Demote all the positions to the z=0 plane, which makes the distance
     # calculation very easy.
     P[:,2] = 0
-    return na.sqrt((P * P).sum(axis=1))
+    return np.sqrt((P * P).sum(axis=1))
     
 def ortho_find(vec1):
     r"""Find two complementary orthonormal vectors to a given vector.
@@ -489,9 +489,9 @@
     >>> c
     array([-0.16903085,  0.84515425, -0.50709255])
     """
-    vec1 = na.array(vec1, dtype=na.float64)
+    vec1 = np.array(vec1, dtype=np.float64)
     # Normalize
-    norm = na.sqrt(na.vdot(vec1, vec1))
+    norm = np.sqrt(np.vdot(vec1, vec1))
     if norm == 0:
         raise ValueError("Zero vector used as input.")
     vec1 /= norm
@@ -513,9 +513,9 @@
         z2 = 0.0
         x2 = -(y1 / x1)
         norm2 = (1.0 + z2 ** 2.0) ** (0.5)
-    vec2 = na.array([x2,y2,z2])
+    vec2 = np.array([x2,y2,z2])
     vec2 /= norm2
-    vec3 = na.cross(vec1, vec2)
+    vec3 = np.cross(vec1, vec2)
     return vec1, vec2, vec3
 
 def quartiles(a, axis=None, out=None, overwrite_input=False):
@@ -570,7 +570,7 @@
 
     Examples
     --------
-    >>> a = na.arange(100).reshape(10,10)
+    >>> a = np.arange(100).reshape(10,10)
     >>> a
     array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
            [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
@@ -601,7 +601,7 @@
             a.sort(axis=axis)
             sorted = a
     else:
-        sorted = na.sort(a, axis=axis)
+        sorted = np.sort(a, axis=axis)
     if axis is None:
         axis = 0
     indexer = [slice(None)] * sorted.ndim
@@ -619,8 +619,8 @@
             indexer[axis] = slice(index, index+1)
         # Use mean in odd and even case to coerce data type
         # and check, use out array.
-        result.append(na.mean(sorted[indexer], axis=axis, out=out))
-    return na.array(result)
+        result.append(np.mean(sorted[indexer], axis=axis, out=out))
+    return np.array(result)
 
 def get_rotation_matrix(theta, rot_vector):
     """
@@ -656,20 +656,20 @@
     array([[ 0.70710678,  0.        ,  0.70710678],
            [ 0.        ,  1.        ,  0.        ],
            [-0.70710678,  0.        ,  0.70710678]])
-    >>> na.dot(rot,a)
+    >>> np.dot(rot,a)
     array([ 0.,  1.,  0.])
     # since a is an eigenvector by construction
-    >>> na.dot(rot,[1,0,0])
+    >>> np.dot(rot,[1,0,0])
     array([ 0.70710678,  0.        , -0.70710678])
     """
 
     ux = rot_vector[0]
     uy = rot_vector[1]
     uz = rot_vector[2]
-    cost = na.cos(theta)
-    sint = na.sin(theta)
+    cost = np.cos(theta)
+    sint = np.sin(theta)
     
-    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+    R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
                   [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import abc
 import json
 import urllib2
@@ -97,10 +97,10 @@
         if hasattr(self, "_pf_mrep"):
             self._pf_mrep.upload()
         for i in metadata:
-            if isinstance(metadata[i], na.ndarray):
+            if isinstance(metadata[i], np.ndarray):
                 metadata[i] = metadata[i].tolist()
             elif hasattr(metadata[i], 'dtype'):
-                metadata[i] = na.asscalar(metadata[i])
+                metadata[i] = np.asscalar(metadata[i])
         metadata['obj_type'] = self.type
         if len(chunks) == 0:
             chunk_info = {'chunks': []}
@@ -129,7 +129,7 @@
         for i, (cn, cv) in enumerate(chunks):
             remaining = cv.size * cv.itemsize
             f = TemporaryFile()
-            na.save(f, cv)
+            np.save(f, cv)
             f.seek(0)
             pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i+1, len(chunks)))
             datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import get_rotation_matrix
@@ -52,7 +52,7 @@
            
         """
         self.steady_north = steady_north
-        if na.all(north_vector == normal_vector):
+        if np.all(north_vector == normal_vector):
             mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
             north_vector = None
         if north_vector is not None: self.steady_north = True
@@ -60,23 +60,23 @@
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        normal_vector /= np.sqrt( np.dot(normal_vector, normal_vector))
         if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
+            vecs = np.identity(3)
+            t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
-            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            north_vector = na.cross(normal_vector, east_vector).ravel()
+            east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-            east_vector = na.cross(north_vector, normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+                north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
+            east_vector = np.cross(north_vector, normal_vector).ravel()
+        north_vector /= np.sqrt(np.dot(north_vector, north_vector))
+        east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
         self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        self.inv_mat = np.linalg.pinv(self.unit_vectors)
         
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -73,7 +73,7 @@
             for g in self.grids:
                 for f in fields:
                     if f not in self.queue[g.id]:
-                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        d = np.zeros(g.ActiveDimensions, dtype='float64')
                         self.queue[g.id][f] = d
                 for f in pfields:
                     self.queue[g.id][f] = self._read(g, f)
@@ -87,12 +87,12 @@
         fi = self.pf.field_info[f]
         if fi.particle_type and g.NumberOfParticles == 0:
             # because this gets upcast to float
-            return na.array([],dtype='float64')
+            return np.array([],dtype='float64')
         try:
             temp = self.pf.h.io._read_data_set(g, f)
         except:# self.pf.hierarchy.io._read_exception as exc:
             if fi.not_in_all:
-                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+                temp = np.zeros(g.ActiveDimensions, dtype='float64')
             else:
                 raise
         return temp
@@ -137,9 +137,9 @@
         msg = dict(grid_id = grid.id, field = field, op="read")
         mylog.debug("Requesting %s for %s from %s", field, grid, dest)
         if self.pf.field_info[field].particle_type:
-            data = na.empty(grid.NumberOfParticles, 'float64')
+            data = np.empty(grid.NumberOfParticles, 'float64')
         else:
-            data = na.empty(grid.ActiveDimensions, 'float64')
+            data = np.empty(grid.ActiveDimensions, 'float64')
         hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
         self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
         mylog.debug("Waiting for data.")


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -27,7 +27,7 @@
 import cStringIO
 import itertools
 import logging
-import numpy as na
+import numpy as np
 import sys
 
 from yt.funcs import *
@@ -131,13 +131,13 @@
         # Note that we're doing this in advance, and with a simple means
         # of choosing them; more advanced methods will be explored later.
         if self._use_all:
-            self.my_obj_ids = na.arange(len(self._objs))
+            self.my_obj_ids = np.arange(len(self._objs))
         else:
             if not round_robin:
-                self.my_obj_ids = na.array_split(
-                                na.arange(len(self._objs)), self._skip)[self._offset]
+                self.my_obj_ids = np.array_split(
+                                np.arange(len(self._objs)), self._skip)[self._offset]
             else:
-                self.my_obj_ids = na.arange(len(self._objs))[self._offset::self._skip]
+                self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
         
     def __iter__(self):
         for gid in self.my_obj_ids:
@@ -421,14 +421,14 @@
             njobs, my_size)
         raise RuntimeError
     my_rank = my_communicator.rank
-    all_new_comms = na.array_split(na.arange(my_size), njobs)
+    all_new_comms = np.array_split(np.arange(my_size), njobs)
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
             break
     if parallel_capable:
         communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
-    obj_ids = na.arange(len(objects))
+    obj_ids = np.arange(len(objects))
 
     to_share = {}
     # If our objects object is slice-aware, like time series data objects are,
@@ -525,14 +525,14 @@
         #   cat
         #   join
         # data is selected to be of types:
-        #   na.ndarray
+        #   np.ndarray
         #   dict
         #   data field dict
         if datatype is not None:
             pass
         elif isinstance(data, types.DictType):
             datatype == "dict"
-        elif isinstance(data, na.ndarray):
+        elif isinstance(data, np.ndarray):
             datatype == "array"
         elif isinstance(data, types.ListType):
             datatype == "list"
@@ -549,14 +549,14 @@
             field_keys = data.keys()
             field_keys.sort()
             size = data[field_keys[0]].shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
@@ -581,16 +581,16 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = na.zeros(0, dtype=dtype) # This only works for
+                    data = np.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
@@ -608,7 +608,7 @@
     def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
-        if isinstance(data, na.ndarray) and \
+        if isinstance(data, np.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
             if self.comm.rank == root:
                 info = (data.shape, data.dtype)
@@ -616,7 +616,7 @@
                 info = ()
             info = self.comm.bcast(info, root=root)
             if self.comm.rank != root:
-                data = na.empty(info[0], dtype=info[1])
+                data = np.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
             self.comm.Bcast([data, mpi_type], root = root)
             return data
@@ -636,7 +636,7 @@
     @parallel_passthrough
     def mpi_allreduce(self, data, dtype=None, op='sum'):
         op = op_names[op]
-        if isinstance(data, na.ndarray) and data.dtype != na.bool:
+        if isinstance(data, np.ndarray) and data.dtype != np.bool:
             if dtype is None:
                 dtype = data.dtype
             if dtype != data.dtype:
@@ -743,7 +743,7 @@
         return (obj._owner == self.comm.rank)
 
     def send_quadtree(self, target, buf, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
         self.comm.Send([sizebuf, MPI.LONG], dest=target)
         self.comm.Send([buf[0], MPI.INT], dest=target)
@@ -751,11 +751,11 @@
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
     def recv_quadtree(self, target, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
-        buf = [na.empty((sizebuf[0],), 'int32'),
-               na.empty((sizebuf[0], args[2]),'float64'),
-               na.empty((sizebuf[0],),'float64')]
+        buf = [np.empty((sizebuf[0],), 'int32'),
+               np.empty((sizebuf[0], args[2]),'float64'),
+               np.empty((sizebuf[0],),'float64')]
         self.comm.Recv([buf[0], MPI.INT], source=target)
         self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
         self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
@@ -775,8 +775,8 @@
         sys.exit()
 
         args = qt.get_args() # Will always be the same
-        tgd = na.array([args[0], args[1]], dtype='int64')
-        sizebuf = na.zeros(1, 'int64')
+        tgd = np.array([args[0], args[1]], dtype='int64')
+        sizebuf = np.zeros(1, 'int64')
 
         while mask < size:
             if (mask & rank) != 0:
@@ -802,9 +802,9 @@
             sizebuf[0] = buf[0].size
         self.comm.Bcast([sizebuf, MPI.LONG], root=0)
         if rank != 0:
-            buf = [na.empty((sizebuf[0],), 'int32'),
-                   na.empty((sizebuf[0], args[2]),'float64'),
-                   na.empty((sizebuf[0],),'float64')]
+            buf = [np.empty((sizebuf[0],), 'int32'),
+                   np.empty((sizebuf[0], args[2]),'float64'),
+                   np.empty((sizebuf[0],),'float64')]
         self.comm.Bcast([buf[0], MPI.INT], root=0)
         self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
         self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
@@ -816,7 +816,7 @@
 
 
     def send_array(self, arr, dest, tag = 0):
-        if not isinstance(arr, na.ndarray):
+        if not isinstance(arr, np.ndarray):
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)
             return
@@ -830,7 +830,7 @@
         dt, ne = self.comm.recv(source=source, tag=tag)
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)
-        arr = na.empty(ne, dtype=dt)
+        arr = np.empty(ne, dtype=dt)
         tmp = arr.view(self.__tocast)
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
@@ -841,11 +841,11 @@
             for i in range(send.shape[0]):
                 recv.append(self.alltoallv_array(send[i,:].copy(), 
                                                  total_size, offsets, sizes))
-            recv = na.array(recv)
+            recv = np.array(recv)
             return recv
         offset = offsets[self.comm.rank]
         tmp_send = send.view(self.__tocast)
-        recv = na.empty(total_size, dtype=send.dtype)
+        recv = np.empty(total_size, dtype=send.dtype)
         recv[offset:offset+send.size] = send[:]
         dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
         roff = [off * dtr for off in offsets]
@@ -867,7 +867,7 @@
 
 communication_system = CommunicationSystem()
 if parallel_capable:
-    ranks = na.arange(MPI.COMM_WORLD.size)
+    ranks = np.arange(MPI.COMM_WORLD.size)
     communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
@@ -926,13 +926,13 @@
         xax, yax = x_dict[axis], y_dict[axis]
         cc = MPI.Compute_dims(self.comm.size, 2)
         mi = self.comm.rank
-        cx, cy = na.unravel_index(mi, cc)
-        x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
+        cx, cy = np.unravel_index(mi, cc)
+        x = np.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
 
         DLE, DRE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        LE = na.ones(3, dtype='float64') * DLE
-        RE = na.ones(3, dtype='float64') * DRE
+        LE = np.ones(3, dtype='float64') * DLE
+        RE = np.ones(3, dtype='float64') * DRE
         LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
         RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
         LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
@@ -943,7 +943,7 @@
         return True, reg
 
     def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
-        LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
+        LE, RE = np.array(ds.left_edge), np.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
         if (LE == self.pf.domain_left_edge).all() and \
@@ -973,13 +973,13 @@
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \
@@ -1000,13 +1000,13 @@
         
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import time, threading, random
 
 from yt.funcs import *
@@ -142,8 +142,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
@@ -170,8 +170,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -21,7 +21,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 
 import matplotlib
 import matplotlib.colors as cc
@@ -83,14 +83,14 @@
 matplotlib.rc('image', cmap="algae")
 
 # This next colormap was designed by Tune Kamae and converted here by Matt
-_vs = na.linspace(0,1,255)
-_kamae_red = na.minimum(255,
-                113.9*na.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
+_vs = np.linspace(0,1,255)
+_kamae_red = np.minimum(255,
+                113.9*np.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
               + 3587.9*_vs+2563.4)/255.0
-_kamae_grn = na.minimum(255,
-                70.0*na.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
-_kamae_blu = na.minimum(255,
-                194.5*_vs**2.88+99.72*na.exp(-77.24*(_vs-0.742)**2.0)
+_kamae_grn = np.minimum(255,
+                70.0*np.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
+_kamae_blu = np.minimum(255,
+                194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
 cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
@@ -121,15 +121,15 @@
 _h_cubehelix = 1.0
 
 _cubehelix_data = {
-        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
 }
 
 add_cmap("cubehelix", _cubehelix_data)
 
 # Add colormaps in _colormap_data.py that weren't defined here
-_vs = na.linspace(0,1,255)
+_vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps:
         cdict = { 'red': zip(_vs,v[0],v[0]),
@@ -143,5 +143,5 @@
     r = cmap._lut[:-3, 0]
     g = cmap._lut[:-3, 1]
     b = cmap._lut[:-3, 2]
-    a = na.ones(b.shape)
+    a = np.ones(b.shape)
     return [r, g, b, a]


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import pyx
-import numpy as na
+import numpy as np
 from matplotlib import cm
 from _mpl_imports import FigureCanvasAgg
 
@@ -243,7 +243,7 @@
             if xdata == None:
                 self.canvas.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 self.canvas.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
         else:
             plot = pyx.graph.graphxy \
@@ -253,7 +253,7 @@
             if xdata == None:
                 plot.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 plot.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
             self.canvas.insert(plot)
         self.axes_drawn = True
@@ -495,7 +495,7 @@
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
         # Convert the colormap into a string
-        x = na.linspace(1,0,256)
+        x = np.linspace(1,0,256)
         cm_string = cm.cmap_d[name](x, bytes=True)[:,0:3].tostring()
 
         cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,7 +29,7 @@
     y_dict, \
     axis_names
 import _MPL
-import numpy as na
+import numpy as np
 import weakref
 
 class FixedResolutionBuffer(object):
@@ -352,7 +352,7 @@
         """
         import numdisplay
         numdisplay.open()
-        if take_log: data=na.log10(self[field])
+        if take_log: data=np.log10(self[field])
         else: data=self[field]
         numdisplay.display(data)    
 
@@ -374,7 +374,7 @@
     """
     def __getitem__(self, item):
         if item in self.data: return self.data[item]
-        indices = na.argsort(self.data_source['dx'])[::-1]
+        indices = np.argsort(self.data_source['dx'])[::-1]
         buff = _MPL.CPixelize( self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
                                self.data_source['px'],  self.data_source['py'],
                                self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -21,7 +21,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import types, os
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer, ObliqueFixedResolutionBuffer
@@ -163,7 +163,7 @@
         """
         self.xlim = (low[0], high[0])
         self.ylim = (low[1], high[1])
-        return na.log10(self.buffer)
+        return np.log10(self.buffer)
 
     def set_width(self, width):
         """
@@ -283,7 +283,7 @@
 
     def __call__(self, val):
         self.pylab.clf()
-        self.pylab.imshow(na.log10(val), interpolation='nearest')
+        self.pylab.imshow(np.log10(val), interpolation='nearest')
         self.pylab.savefig("wimage_%03i.png" % self.tile_id)
 
 class TransportAppender(object):
@@ -297,13 +297,13 @@
     def __call__(self, val):
         from yt.utilities.lib import write_png_to_string
         from yt.visualization.image_writer import map_to_colors
-        image = na.log10(val)
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        image = np.log10(val)
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
         image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
         to_plot = map_to_colors(image, "algae")
-        to_plot = na.clip(to_plot, 0, 255)
+        to_plot = np.clip(to_plot, 0, 255)
         s = write_png_to_string(to_plot)
         response_body = "data:image/png;base64," + base64.encodestring(s)
         tf.close()


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -23,7 +23,7 @@
 import types
 import imp
 import os
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import _colormap_data as cmd
@@ -44,7 +44,7 @@
 
         >>> image = scale_image(image, min=0, max=1000)
     """
-    if isinstance(image, na.ndarray) and image.dtype == na.uint8:
+    if isinstance(image, np.ndarray) and image.dtype == np.uint8:
         return image
     if isinstance(image, (types.TupleType, types.ListType)):
         image, mi, ma = image
@@ -52,7 +52,7 @@
         mi = image.min()
     if ma is None:
         ma = image.max()
-    image = (na.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
+    image = (np.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
     return image
 
 def multi_image_composite(fn, red_channel, blue_channel,
@@ -97,22 +97,22 @@
     Examples
     --------
 
-        >>> red_channel = na.log10(frb["Temperature"])
-        >>> blue_channel = na.log10(frb["Density"])
+        >>> red_channel = np.log10(frb["Temperature"])
+        >>> blue_channel = np.log10(frb["Density"])
         >>> multi_image_composite("multi_channel1.png", red_channel, blue_channel)
 
     """
     red_channel = scale_image(red_channel)
     blue_channel = scale_image(blue_channel)
     if green_channel is None:
-        green_channel = na.zeros(red_channel.shape, dtype='uint8')
+        green_channel = np.zeros(red_channel.shape, dtype='uint8')
     else:
         green_channel = scale_image(green_channel)
     if alpha_channel is None:
-        alpha_channel = na.zeros(red_channel.shape, dtype='uint8') + 255
+        alpha_channel = np.zeros(red_channel.shape, dtype='uint8') + 255
     else:
         alpha_channel = scale_image(alpha_channel) 
-    image = na.array([red_channel, green_channel, blue_channel, alpha_channel])
+    image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
@@ -141,16 +141,16 @@
         The upper limit to clip values to in the output, if converting to uint8.
         If `bitmap_array` is already uint8, this will be ignore.
     """
-    if bitmap_array.dtype != na.uint8:
+    if bitmap_array.dtype != np.uint8:
         if max_val is None: max_val = bitmap_array.max()
-        bitmap_array = na.clip(bitmap_array / max_val, 0.0, 1.0) * 255
+        bitmap_array = np.clip(bitmap_array / max_val, 0.0, 1.0) * 255
         bitmap_array = bitmap_array.astype("uint8")
     if len(bitmap_array.shape) != 3 or bitmap_array.shape[-1] not in (3,4):
         raise RuntimeError
     if bitmap_array.shape[-1] == 3:
         s1, s2 = bitmap_array.shape[:2]
-        alpha_channel = 255*na.ones((s1,s2,1), dtype='uint8')
-        bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
+        alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
+        bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
         for channel in range(bitmap_array.shape[2]):
             bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
@@ -229,14 +229,14 @@
     """
     image = func(image)
     if color_bounds is None:
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
     else:
         color_bounds = [func(c) for c in color_bounds]
     image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
     to_plot = map_to_colors(image, cmap_name)
-    to_plot = na.clip(to_plot, 0, 255)
+    to_plot = np.clip(to_plot, 0, 255)
     return to_plot
 
 def annotate_image(image, text, xpos, ypos, font_name = "Vera",
@@ -279,7 +279,7 @@
     >>> annotate_image(bitmap, "Hello!", 0, 100)
     >>> write_bitmap(bitmap, "saved.png")
     """
-    if len(image.shape) != 3 or image.dtype != na.uint8:
+    if len(image.shape) != 3 or image.dtype != np.uint8:
         raise RuntimeError("This routine requires a UINT8 bitmapped image.")
     font_path = os.path.join(imp.find_module("matplotlib")[1],
                              "mpl-data/fonts/ttf/",
@@ -295,10 +295,10 @@
         print "Your color map was not found in the extracted colormap file."
         raise KeyError(cmap_name)
     lut = cmd.color_map_luts[cmap_name]
-    x = na.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+    x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
     shape = buff.shape
-    mapped = na.dstack(
-            [(na.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
+    mapped = np.dstack(
+            [(np.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
     return mapped.copy("C")
 
 def strip_colormap_data(fn = "color_map_data.py",


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -26,7 +26,7 @@
 from matplotlib import figure
 import shutil
 import tempfile
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -71,7 +71,7 @@
 
     def add_image(self, fn, descr):
         self.image_metadata.append(descr)
-        self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+        self.images.append((os.path.basename(fn), np.fromfile(fn, dtype='c')))
 
 class PlotCollection(object):
     __id_counter = 0
@@ -122,7 +122,7 @@
         elif center == "center" or center == "c":
             self.c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         else:
-            self.c = na.array(center, dtype='float64')
+            self.c = np.array(center, dtype='float64')
         mylog.info("Created plot collection with default plot-center = %s",
                     list(self.c))
 
@@ -1878,7 +1878,7 @@
         norm = matplotlib.colors.Normalize()
     ax = pylab.figure().gca()
     ax.autoscale(False)
-    axi = ax.imshow(na.random.random((npix, npix)),
+    axi = ax.imshow(np.random.random((npix, npix)),
                     extent = extent, norm = norm,
                     origin = 'lower')
     cb = pylab.colorbar(axi, norm = norm)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -52,25 +52,25 @@
     def convert_to_plot(self, plot, coord, offset = True):
         # coord should be a 2 x ncoord array-like datatype.
         try:
-            ncoord = na.array(coord).shape[1]
+            ncoord = np.array(coord).shape[1]
         except IndexError:
             ncoord = 1
 
         # Convert the data and plot limits to tiled numpy arrays so that
         # convert_to_plot is automatically vectorized.
 
-        x0 = na.tile(plot.xlim[0],ncoord)
-        x1 = na.tile(plot.xlim[1],ncoord)
-        xx0 = na.tile(plot._axes.get_xlim()[0],ncoord)
-        xx1 = na.tile(plot._axes.get_xlim()[1],ncoord)
+        x0 = np.tile(plot.xlim[0],ncoord)
+        x1 = np.tile(plot.xlim[1],ncoord)
+        xx0 = np.tile(plot._axes.get_xlim()[0],ncoord)
+        xx1 = np.tile(plot._axes.get_xlim()[1],ncoord)
         
-        y0 = na.tile(plot.ylim[0],ncoord)
-        y1 = na.tile(plot.ylim[1],ncoord)
-        yy0 = na.tile(plot._axes.get_ylim()[0],ncoord)
-        yy1 = na.tile(plot._axes.get_ylim()[1],ncoord)
+        y0 = np.tile(plot.ylim[0],ncoord)
+        y1 = np.tile(plot.ylim[1],ncoord)
+        yy0 = np.tile(plot._axes.get_ylim()[0],ncoord)
+        yy1 = np.tile(plot._axes.get_ylim()[1],ncoord)
         
         # We need a special case for when we are only given one coordinate.
-        if na.array(coord).shape == (2,):
+        if np.array(coord).shape == (2,):
             return ((coord[0]-x0)/(x1-x0)*(xx1-xx0) + xx0,
                     (coord[1]-y0)/(y1-y0)*(yy1-yy0) + yy0)
         else:
@@ -195,10 +195,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = na.meshgrid(na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
+            nn = np.sqrt(pixX**2 + pixY**2)
             pixX /= nn
             pixY /= nn
         plot._axes.quiver(X,Y, pixX, pixY, scale=self.scale, scale_units=self.scale_units)
@@ -250,12 +250,12 @@
         #appropriate shift to the coppied field.  
 
         #set the cumulative arrays for the periodic shifting.
-        AllX = na.zeros(plot.data["px"].size, dtype='bool')
-        AllY = na.zeros(plot.data["py"].size, dtype='bool')
+        AllX = np.zeros(plot.data["px"].size, dtype='bool')
+        AllY = np.zeros(plot.data["py"].size, dtype='bool')
         XShifted = plot.data["px"].copy()
         YShifted = plot.data["py"].copy()
         dom_x, dom_y = plot._period
-        for shift in na.mgrid[-1:1:3j]:
+        for shift in np.mgrid[-1:1:3j]:
             xlim = ((plot.data["px"] + shift*dom_x >= x0)
                  &  (plot.data["px"] + shift*dom_x <= x1))
             ylim = ((plot.data["py"] + shift*dom_y >= y0)
@@ -269,24 +269,24 @@
         wI = (AllX & AllY)
 
         # We want xi, yi in plot coordinates
-        xi, yi = na.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
+        xi, yi = np.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
                           yy0:yy1:numPoints_y/(self.factor*1j)]
 
         # This converts XShifted and YShifted into plot coordinates
         x = (XShifted[wI]-x0)*dx + xx0
         y = (YShifted[wI]-y0)*dy + yy0
         z = plot.data[self.field][wI]
-        if plot.pf.field_info[self.field].take_log: z=na.log10(z)
+        if plot.pf.field_info[self.field].take_log: z=np.log10(z)
 
         # Both the input and output from the triangulator are in plot
         # coordinates
         zi = self.triang(x,y).nn_interpolator(z)(xi,yi)
         
         if plot.pf.field_info[self.field].take_log and self.clim is not None: 
-            self.clim = (na.log10(self.clim[0]), na.log10(self.clim[1]))
+            self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = na.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
         plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -322,9 +322,9 @@
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
         if self.periodic:
-            pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+            pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
         else:
-            pxs, pys = na.mgrid[0:0:1j,0:0:1j]
+            pxs, pys = np.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
@@ -337,7 +337,7 @@
             visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
                        ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
             if visible.nonzero()[0].size == 0: continue
-            verts = na.array(
+            verts = np.array(
                 [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
                  (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
@@ -411,18 +411,18 @@
                              plot.data[self.field_y],
                              int(nx), int(ny),
                            (x0, x1, y0, y1),)
-        r0 = na.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
+        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
                       self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = na.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
+        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
         lines[0,:,:,:] = r0
-        mag = na.sqrt(pixX**2 + pixY**2)
-        scale = na.sqrt(nx*ny) / (self.factor * mag.mean())
+        mag = np.sqrt(pixX**2 + pixY**2)
+        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
         dt = 1.0 / (self.nsample-1)
         for i in range(1,self.nsample):
             xt = lines[i-1,0,:,:]
             yt = lines[i-1,1,:,:]
-            ix = na.maximum(na.minimum((xt).astype('int'), nx-1), 0)
-            iy = na.maximum(na.minimum((yt).astype('int'), ny-1), 0)
+            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
+            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
         for i in range(self.data_size[0]):
@@ -486,18 +486,18 @@
         max_dx = plot.data['pdx'].max()
         w_min_x = 250.0 * min_dx
         w_max_x = 1.0 / self.factor
-        min_exp_x = na.ceil(na.log10(w_min_x*plot.data.pf[self.unit])
-                           /na.log10(self.factor))
-        max_exp_x = na.floor(na.log10(w_max_x*plot.data.pf[self.unit])
-                            /na.log10(self.factor))
+        min_exp_x = np.ceil(np.log10(w_min_x*plot.data.pf[self.unit])
+                           /np.log10(self.factor))
+        max_exp_x = np.floor(np.log10(w_max_x*plot.data.pf[self.unit])
+                            /np.log10(self.factor))
         n_x = max_exp_x - min_exp_x + 1
-        widths = na.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
+        widths = np.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
         widths /= plot.data.pf[self.unit]
         left_edge_px = (center[xi] - widths/2.0 - x0)*dx
         left_edge_py = (center[yi] - widths/2.0 - y0)*dy
         right_edge_px = (center[xi] + widths/2.0 - x0)*dx
         right_edge_py = (center[yi] + widths/2.0 - y0)*dy
-        verts = na.array(
+        verts = np.array(
                 [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                  (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
         visible =  ( right_edge_px - left_edge_px > 25 ) & \
@@ -604,7 +604,7 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0] / self.factor
         ny = plot.image._A.shape[1] / self.factor
-        indices = na.argsort(plot.data['dx'])[::-1]
+        indices = np.argsort(plot.data['dx'])[::-1]
         pixX = _MPL.CPixelize( plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
@@ -619,8 +619,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = na.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = na.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
+        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -656,7 +656,7 @@
         DomainWidth = DomainRight - DomainLeft
         
         nx, ny = plot.image._A.shape
-        buff = na.zeros((nx,ny),dtype='float64')
+        buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
@@ -670,7 +670,7 @@
                                  clump['dx']*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
-            buff = na.maximum(temp, buff)
+            buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
                                      **self.plot_args)
         plot._axes.hold(False)
@@ -814,7 +814,7 @@
             if size < self.min_size or size > self.max_size: continue
             # This could use halo.maximum_radius() instead of width
             if self.width is not None and \
-                na.abs(halo.center_of_mass() - 
+                np.abs(halo.center_of_mass() - 
                        plot.data.center)[plot.data.axis] > \
                    self.width:
                 continue
@@ -1062,8 +1062,8 @@
         LE[zax] = data.center[zax] - self.width*0.5
         RE[zax] = data.center[zax] + self.width*0.5
         if self.region is not None \
-            and na.all(self.region.left_edge <= LE) \
-            and na.all(self.region.right_edge >= RE):
+            and np.all(self.region.left_edge <= LE) \
+            and np.all(self.region.right_edge >= RE):
             return self.region
         self.region = data.pf.h.periodic_region(
             data.center, LE, RE)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -183,21 +183,21 @@
         if (zmin in (None,'min')) or (zmax in (None,'max')):    
             imbuff = self._axes.images[-1]._A
             if zmin == 'min':
-                zmin = na.nanmin(imbuff[na.nonzero(imbuff)])
+                zmin = np.nanmin(imbuff[np.nonzero(imbuff)])
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(imbuff))
+                    zmax = min(zmin*10**(dex),np.nanmax(imbuff))
             if zmax == 'max':
-                zmax = na.nanmax(imbuff)
+                zmax = np.nanmax(imbuff)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(imbuff))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(imbuff))
         if self.colorbar is not None:
             if ticks is not None:
-                ticks = na.sort(ticks)
+                ticks = np.sort(ticks)
                 self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                 self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
             elif minmaxtick:
                 if self.log_field: 
-                    ticks = na.array(self.colorbar._ticker()[1],dtype='float')
+                    ticks = np.array(self.colorbar._ticker()[1],dtype='float')
                     ticks = [zmin] + ticks.tolist() + [zmax]
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
@@ -205,11 +205,11 @@
                     mylog.error('Sorry, we do not support minmaxtick for linear fields.  It likely comes close by default')
             elif nticks is not None:
                 if self.log_field:
-                    lin = na.linspace(na.log10(zmin),na.log10(zmax),nticks)
+                    lin = np.linspace(np.log10(zmin),np.log10(zmax),nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(10**lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (10**x) for x in lin])
                 else: 
-                    lin = na.linspace(zmin,zmax,nticks)
+                    lin = np.linspace(zmin,zmax,nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % x for x in lin])
 
@@ -218,7 +218,7 @@
                     self.colorbar.locator = self._old_locator
                 if hasattr(self,'_old_formatter'):
                     self.colorbar.formatter = self._old_formatter
-        self.norm.autoscale(na.array([zmin,zmax], dtype='float64'))
+        self.norm.autoscale(np.array([zmin,zmax], dtype='float64'))
         self.image.changed()
         if self.colorbar is not None:
             mpl_notify(self.image, self.colorbar)
@@ -343,7 +343,7 @@
             self.colorbar.formatter = ttype()
 
     def __init_temp_image(self, setup_colorbar):
-        temparray = na.ones(self.size)
+        temparray = np.ones(self.size)
         self.image = \
             self._axes.imshow(temparray, interpolation='nearest',
                              norm = self.norm, aspect=1.0, picker=True,
@@ -394,20 +394,20 @@
         if self[self.axis_names["Z"]].size == 0:
             raise YTNoDataInObjectError(self.data)
         mylog.debug("Received buffer of min %s and max %s (data: %s %s)",
-                    na.nanmin(buff), na.nanmax(buff),
+                    np.nanmin(buff), np.nanmax(buff),
                     self[self.axis_names["Z"]].min(),
                     self[self.axis_names["Z"]].max())
         if self.log_field:
-            bI = na.where(buff > 0)
+            bI = np.where(buff > 0)
             if len(bI[0]) == 0:
                 newmin = 1e-99
                 newmax = 1e-99
             else:
-                newmin = na.nanmin(buff[bI])
-                newmax = na.nanmax(buff[bI])
+                newmin = np.nanmin(buff[bI])
+                newmax = np.nanmax(buff[bI])
         else:
-            newmin = na.nanmin(buff)
-            newmax = na.nanmax(buff)
+            newmin = np.nanmin(buff)
+            newmax = np.nanmax(buff)
         aspect = (self.ylim[1]-self.ylim[0])/(self.xlim[1]-self.xlim[0])
         if self.image._A.size != buff.size:
             self._axes.clear()
@@ -418,7 +418,7 @@
             self.image.set_data(buff)
         if self._axes.get_aspect() != aspect: self._axes.set_aspect(aspect)
         if self.do_autoscale:
-            self.norm.autoscale(na.array((newmin,newmax), dtype='float64'))
+            self.norm.autoscale(np.array((newmin,newmax), dtype='float64'))
         self._reset_image_parameters()
         self._run_callbacks()
 
@@ -476,8 +476,8 @@
         self._redraw_image()
 
     def autoscale(self):
-        zmin = na.nanmin(self._axes.images[-1]._A)
-        zmax = na.nanmax(self._axes.images[-1]._A)
+        zmin = np.nanmin(self._axes.images[-1]._A)
+        zmax = np.nanmax(self._axes.images[-1]._A)
         self.set_zlim(zmin, zmax)
 
     def switch_y(self, *args, **kwargs):
@@ -558,16 +558,16 @@
         numPoints_y = int(width)
         dx = numPoints_x / (x1-x0)
         dy = numPoints_y / (y1-y0)
-        xlim = na.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
+        xlim = np.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
                               self.data["px"]-2.0*self.data['pdx'] <= x1)
-        ylim = na.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
+        ylim = np.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
                               self.data["py"]-2.0*self.data['pdy'] <= y1)
-        wI = na.where(na.logical_and(xlim,ylim))
-        xi, yi = na.mgrid[0:numPoints_x, 0:numPoints_y]
+        wI = np.where(np.logical_and(xlim,ylim))
+        xi, yi = np.mgrid[0:numPoints_x, 0:numPoints_y]
         x = (self.data["px"][wI]-x0)*dx
         y = (self.data["py"][wI]-y0)*dy
         z = self.data[self.axis_names["Z"]][wI]
-        if self.log_field: z=na.log10(z)
+        if self.log_field: z=np.log10(z)
         buff = de.Triangulation(x,y).nn_interpolator(z)(xi,yi)
         buff = buff.clip(z.min(), z.max())
         if self.log_field: buff = 10**buff
@@ -603,7 +603,7 @@
         else:
             height = width
         self.pix = (width,height)
-        indices = na.argsort(self.data['dx'])[::-1]
+        indices = np.argsort(self.data['dx'])[::-1]
         buff = _MPL.CPixelize( self.data['x'], self.data['y'], self.data['z'],
                                self.data['px'], self.data['py'],
                                self.data['pdx'], self.data['pdy'], self.data['pdz'],
@@ -756,7 +756,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)
@@ -823,7 +823,7 @@
             cb(self)
 
     def __init_colorbar(self):
-        temparray = na.ones((self.x_bins.size, self.y_bins.size))
+        temparray = np.ones((self.x_bins.size, self.y_bins.size))
         self.norm = matplotlib.colors.Normalize()
         self.image = self._axes.pcolormesh(self.x_bins, self.y_bins,
                                       temparray, shading='flat',
@@ -858,13 +858,13 @@
         #self._redraw_image()
         if (zmin is None) or (zmax is None):    
             if zmin == 'min':
-                zmin = na.nanmin(self._axes.images[-1]._A)
+                zmin = np.nanmin(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(self._axes.images[-1]._A))
+                    zmax = min(zmin*10**(dex),np.nanmax(self._axes.images[-1]._A))
             if zmax == 'max':
-                zmax = na.nanmax(self._axes.images[-1]._A)
+                zmax = np.nanmax(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(self._axes.images[-1]._A))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(self._axes.images[-1]._A))
         self._zlim = (zmin, zmax)
 
     def set_log_field(self, val):
@@ -883,8 +883,8 @@
     def _redraw_image(self):
         vals = self.data[self.fields[2]].transpose()
         used_bin = self.data["UsedBins"].transpose()
-        vmin = na.nanmin(vals[used_bin])
-        vmax = na.nanmax(vals[used_bin])
+        vmin = np.nanmin(vals[used_bin])
+        vmax = np.nanmax(vals[used_bin])
         if self._zlim is not None: vmin, vmax = self._zlim
         if self._log_z:
             # We want smallest non-zero vmin
@@ -892,10 +892,10 @@
                                                 clip=False)
             self.ticker = matplotlib.ticker.LogLocator()
             if self._zlim is None:
-                vI = na.where(vals > 0)
+                vI = np.where(vals > 0)
                 vmin = vals[vI].min()
                 vmax = vals[vI].max()
-            self.norm.autoscale(na.array((vmin,vmax), dtype='float64'))
+            self.norm.autoscale(np.array((vmin,vmax), dtype='float64'))
         else:
             self.norm=matplotlib.colors.Normalize(vmin=vmin, vmax=vmax,
                                                   clip=False)
@@ -979,7 +979,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -31,7 +31,7 @@
 import __builtin__
 from functools import wraps
 
-import numpy as na
+import numpy as np
 from ._mpl_imports import *
 from .color_maps import yt_colormaps, is_colormap
 from .image_writer import \
@@ -120,7 +120,7 @@
             ticks = []
         return ticks
 
-log_transform = FieldTransform('log10', na.log10, LogLocator())
+log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
 def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
@@ -162,7 +162,7 @@
     if not iterable(width):
         width = (width, width)
     Wx, Wy = width
-    width = na.array((Wx/pf[unit], Wy/pf[unit]))
+    width = np.array((Wx/pf[unit], Wy/pf[unit]))
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -172,11 +172,11 @@
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
     # Transforming to the cutting plane coordinate system
-    center = na.array(center)
+    center = np.array(center)
     center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
     (normal,perp1,perp2) = ortho_find(normal)
-    mat = na.transpose(na.column_stack((perp1,perp2,normal)))
-    center = na.dot(mat,center)
+    mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+    center = np.dot(mat,center)
     width = width/pf.domain_width.min()
 
     bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
@@ -1072,7 +1072,7 @@
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
             x_width = self.xlim[1] - self.xlim[0]
-            zoom_fac = na.log10(x_width*self.pf['unitary'])/na.log10(min_zoom)
+            zoom_fac = np.log10(x_width*self.pf['unitary'])/np.log10(min_zoom)
             zoom_fac = 100.0*max(0.0, zoom_fac)
             ticks = self.get_ticks(field)
             payload = {'type':'png_string',
@@ -1116,12 +1116,12 @@
 
         raw_data = self._frb.data_source
         b = self._frb.bounds
-        xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
+        xi, yi = np.mgrid[b[0]:b[1]:(vi / 8) * 1j,
                           b[2]:b[3]:(vj / 8) * 1j]
         x = raw_data['px']
         y = raw_data['py']
         z = raw_data[field]
-        if logit: z = na.log10(z)
+        if logit: z = np.log10(z)
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         ax.contour(fvals, number, colors='w')
@@ -1140,8 +1140,8 @@
         fy = "%s-velocity" % (axis_names[y_dict[axis]])
         px = new_frb[fx][::-1,:]
         py = new_frb[fy][::-1,:]
-        x = na.mgrid[0:vi-1:ny*1j]
-        y = na.mgrid[0:vj-1:nx*1j]
+        x = np.mgrid[0:vi-1:ny*1j]
+        y = np.mgrid[0:vj-1:nx*1j]
         # Always normalize, then we scale
         nn = ((px**2.0 + py**2.0)**0.5).max()
         px /= nn
@@ -1165,7 +1165,7 @@
     def _get_cbar_image(self, height = 400, width = 40, field = None):
         if field is None: field = self._current_field
         cmap_name = self._colormaps[field]
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals, cmap_name = cmap_name)
         pngs = write_png_to_string(to_plot)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -27,7 +27,7 @@
 import types
 
 from functools import wraps
-import numpy as na
+import numpy as np
 
 from .image_writer import \
     write_image, apply_colormap
@@ -129,19 +129,19 @@
         use_mesh = False
         xmi, xma = self.x_spec.bounds
         if self.x_spec.scale == 'log':
-            x_bins = na.logspace(na.log10(xmi), na.log10(xma),
+            x_bins = np.logspace(np.log10(xmi), np.log10(xma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            x_bins = na.logspace(xmi, xma, self.image.shape[0]+1)
+            x_bins = np.logspace(xmi, xma, self.image.shape[0]+1)
 
         ymi, yma = self.y_spec.bounds
         if self.y_spec.scale == 'log':
-            y_bins = na.logspace(na.log10(ymi), na.log10(yma),
+            y_bins = np.logspace(np.log10(ymi), np.log10(yma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            y_bins = na.logspace(ymi, yma, self.image.shape[0]+1)
+            y_bins = np.logspace(ymi, yma, self.image.shape[0]+1)
 
         im = self.image
         if self.cbar.scale == 'log':
@@ -338,11 +338,11 @@
         raw_data = self.plot.image[::-1,:]
 
         if self.plot.cbar.scale == 'log':
-            func = na.log10
+            func = np.log10
         else:
             func = lambda a: a
-        raw_data = na.repeat(raw_data, 3, axis=0)
-        raw_data = na.repeat(raw_data, 3, axis=1)
+        raw_data = np.repeat(raw_data, 3, axis=0)
+        raw_data = np.repeat(raw_data, 3, axis=1)
         to_plot = apply_colormap(raw_data, self.plot.cbar.bounds,
                                  self.plot.cbar.cmap, func)
         if self.plot.cbar.scale == 'log':
@@ -369,7 +369,7 @@
 
     def _convert_axis(self, spec):
         func = lambda a: a
-        if spec.scale == 'log': func = na.log10
+        if spec.scale == 'log': func = np.log10
         tick_info = self._convert_ticks(spec.ticks, spec.bounds, func)
         ax = {'ticks':tick_info,
               'title': spec.title}
@@ -378,7 +378,7 @@
     def _get_cbar_image(self, height = 400, width = 40):
         # Right now there's just the single 'cmap', but that will eventually
         # change.  I think?
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals)
         pngs = write_png_to_string(to_plot)


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_passthrough
@@ -61,7 +61,7 @@
         Default: minimum dx
     length : float, optional
         Optionally specify the length of integration.  
-        Default: na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        Default: np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
     direction : real, optional
         Specifies the direction of integration.  The magnitude of this
         value has no effect, only the sign.
@@ -77,10 +77,10 @@
     >>> from yt.visualization.api import Streamlines
     >>> pf = load('DD1701') # Load pf
 
-    >>> c = na.array([0.5]*3)
+    >>> c = np.array([0.5]*3)
     >>> N = 100
     >>> scale = 1.0
-    >>> pos_dx = na.random.random((N,3))*scale-scale/2.
+    >>> pos_dx = np.random.random((N,3))*scale-scale/2.
     >>> pos = c+pos_dx
     
     >>> streamlines = Streamlines(pf,pos,'x-velocity', 'y-velocity', 'z-velocity', length=1.0) 
@@ -91,7 +91,7 @@
     >>> fig=pl.figure() 
     >>> ax = Axes3D(fig)
     >>> for stream in streamlines.streamlines:
-    >>>     stream = stream[na.all(stream != 0.0, axis=1)]
+    >>>     stream = stream[np.all(stream != 0.0, axis=1)]
     >>>     ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
     >>> pl.savefig('streamlines.png')
     """
@@ -101,13 +101,13 @@
                  get_magnitude=False):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.start_positions = na.array(positions)
+        self.start_positions = np.array(positions)
         self.N = self.start_positions.shape[0]
         self.xfield = xfield
         self.yfield = yfield
         self.zfield = zfield
         self.get_magnitude=get_magnitude
-        self.direction = na.sign(direction)
+        self.direction = np.sign(direction)
         if volume is None:
             volume = AMRKDTree(self.pf, fields=[self.xfield,self.yfield,self.zfield],
                             log_fields=[False,False,False], merge_trees=True)
@@ -116,13 +116,13 @@
             dx = self.pf.h.get_smallest_dx()
         self.dx = dx
         if length is None:
-            length = na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+            length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
         self.steps = int(length/dx)
-        self.streamlines = na.zeros((self.N,self.steps,3), dtype='float64')
+        self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
-            self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
+            self.magnitudes = np.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
         nprocs = self.comm.size
@@ -161,21 +161,21 @@
                 brick.integrate_streamline(stream[-step+1], self.direction*self.dx, marr)
                 mag[-step+1] = marr[0]
                 
-            if na.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
-                   na.any(stream[-step+1,:] >= self.pf.domain_right_edge):
+            if np.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
+                   np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if na.any(stream[-step+1,:] < node.l_corner) | \
-                   na.any(stream[-step+1,:] >= node.r_corner):
+            if np.any(stream[-step+1,:] < node.l_corner) | \
+                   np.any(stream[-step+1,:] >= node.r_corner):
                 return step-1
             step -= 1
         return step
 
     def clean_streamlines(self):
-        temp = na.empty(self.N, dtype='object')
-        temp2 = na.empty(self.N, dtype='object')
+        temp = np.empty(self.N, dtype='object')
+        temp2 = np.empty(self.N, dtype='object')
         for i,stream in enumerate(self.streamlines):
-            mask = na.all(stream != 0.0, axis=1)
+            mask = np.all(stream != 0.0, axis=1)
             temp[i] = stream[mask]
             temp2[i] = self.magnitudes[i,mask]
         self.streamlines = temp


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/tick_locators.py
--- a/yt/visualization/tick_locators.py
+++ b/yt/visualization/tick_locators.py
@@ -5,7 +5,7 @@
 ##
 
 import math
-import numpy as na
+import numpy as np
 
 def is_decade(x,base=10):
     if x == 0.0:
@@ -40,7 +40,7 @@
         if subs is None:
             self._subs = None  # autosub
         else:
-            self._subs = na.asarray(subs)+0.0
+            self._subs = np.asarray(subs)+0.0
 
     def _set_numticks(self):
         self.numticks = 15  # todo; be smart here; this is just for dev
@@ -62,9 +62,9 @@
         numdec = math.floor(vmax)-math.ceil(vmin)
 
         if self._subs is None: # autosub
-            if numdec>10: subs = na.array([1.0])
-            elif numdec>6: subs = na.arange(2.0, b, 2.0)
-            else: subs = na.arange(2.0, b)
+            if numdec>10: subs = np.array([1.0])
+            elif numdec>6: subs = np.arange(2.0, b, 2.0)
+            else: subs = np.arange(2.0, b)
         else:
             subs = self._subs
 
@@ -72,7 +72,7 @@
         while numdec/stride+1 > self.numticks:
             stride += 1
 
-        decades = na.arange(math.floor(vmin),
+        decades = np.arange(math.floor(vmin),
                              math.ceil(vmax)+stride, stride)
         if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
             ticklocs = []
@@ -81,7 +81,7 @@
         else:
             ticklocs = b**decades
 
-        return na.array(ticklocs)
+        return np.array(ticklocs)
 
 
 class LinearLocator(object):
@@ -122,7 +122,7 @@
 
 
         if self.numticks==0: return []
-        ticklocs = na.linspace(vmin, vmax, self.numticks)
+        ticklocs = np.linspace(vmin, vmax, self.numticks)
 
         #return self.raise_if_exceeds(ticklocs)
         return ticklocs


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/CUDARayCast.py
--- a/yt/visualization/volume_rendering/CUDARayCast.py
+++ b/yt/visualization/volume_rendering/CUDARayCast.py
@@ -27,7 +27,7 @@
 
 from yt.mods import *
 import yt.extensions.HierarchySubset as hs
-import numpy as na
+import numpy as np
 import h5py, time
 
 import matplotlib;matplotlib.use("Agg");import pylab
@@ -62,7 +62,7 @@
 
     print "Constructing transfer function."
     if "Data" in fn:
-        mh = na.log10(1.67e-24)
+        mh = np.log10(1.67e-24)
         tf = ColorTransferFunction((7.5+mh, 14.0+mh))
         tf.add_gaussian( 8.25+mh, 0.002, [0.2, 0.2, 0.4, 0.1])
         tf.add_gaussian( 9.75+mh, 0.002, [0.0, 0.0, 0.3, 0.1])
@@ -77,17 +77,17 @@
         tf.add_gaussian(-28.5, 0.05, [1.0, 1.0, 1.0, 1.0])
     else: raise RuntimeError
 
-    cpu['ngrids'] = na.array([cpu['dims'].shape[0]], dtype='int32')
+    cpu['ngrids'] = np.array([cpu['dims'].shape[0]], dtype='int32')
     cpu['tf_r'] = tf.red.y.astype("float32")
     cpu['tf_g'] = tf.green.y.astype("float32")
     cpu['tf_b'] = tf.blue.y.astype("float32")
     cpu['tf_a'] = tf.alpha.y.astype("float32")
 
-    cpu['tf_bounds'] = na.array(tf.x_bounds, dtype='float32')
+    cpu['tf_bounds'] = np.array(tf.x_bounds, dtype='float32')
 
-    cpu['v_dir'] = na.array([0.3, 0.5, 0.6], dtype='float32')
+    cpu['v_dir'] = np.array([0.3, 0.5, 0.6], dtype='float32')
 
-    c = na.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
+    c = np.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
 
     print "Getting cutting plane."
     cp = pf.h.cutting(cpu['v_dir'], c)
@@ -98,16 +98,16 @@
     back_c = c - cp._norm_vec * W
     front_c = c + cp._norm_vec * W
 
-    px, py = na.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
+    px, py = np.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
     xv = cp._inv_mat[0,0]*px + cp._inv_mat[0,1]*py + cp.center[0]
     yv = cp._inv_mat[1,0]*px + cp._inv_mat[1,1]*py + cp.center[1]
     zv = cp._inv_mat[2,0]*px + cp._inv_mat[2,1]*py + cp.center[2]
-    cpu['v_pos'] = na.array([xv, yv, zv], dtype='float32').transpose()
+    cpu['v_pos'] = np.array([xv, yv, zv], dtype='float32').transpose()
 
-    cpu['image_r'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_g'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_b'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_a'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_r'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_g'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_b'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_a'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
 
     print "Generating module"
     source = open("yt/extensions/volume_rendering/_cuda_caster.cu").read()
@@ -161,7 +161,7 @@
         pylab.imshow(image[-1], interpolation='nearest')
         pylab.savefig("/u/ki/mturk/public_html/vr6/%s.png" % (ii))
 
-    image = na.array(image).transpose()
+    image = np.array(image).transpose()
     image = (image - mi) / (ma - mi)
     pylab.clf()
     pylab.imshow(image, interpolation='nearest')


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/UBVRI.py
--- a/yt/visualization/volume_rendering/UBVRI.py
+++ b/yt/visualization/volume_rendering/UBVRI.py
@@ -24,21 +24,21 @@
 """
 
 
-import numpy as na
+import numpy as np
 
 johnson_filters = dict(
     B = dict(
-      wavelen = na.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
+      wavelen = np.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550, 4600,
         4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000, 5050, 5100, 5150, 5200,
         5250, 5300, 5350, 5400, 5450, 5500, 5550], dtype='float64'),
-      trans = na.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
+      trans = np.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
         0.95, 0.98, 0.99, 1.0, 0.99, 0.98, 0.96, 0.94, 0.91, 0.87, 0.83, 0.79,
         0.74, 0.69, 0.63, 0.58, 0.52, 0.46, 0.41, 0.36, 0.3, 0.25, 0.2, 0.15,
         0.12, 0.09, 0.06, 0.04, 0.02, 0.01, 0.0, ], dtype='float64'),
       ),
     I = dict(
-      wavelen = na.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
+      wavelen = np.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
         7150, 7200, 7250, 7300, 7350, 7400, 7450, 7500, 7550, 7600, 7650, 7700,
         7750, 7800, 7850, 7900, 7950, 8000, 8050, 8100, 8150, 8200, 8250, 8300,
         8350, 8400, 8450, 8500, 8550, 8600, 8650, 8700, 8750, 8800, 8850, 8900,
@@ -48,7 +48,7 @@
         10600, 10650, 10700, 10750, 10800, 10850, 10900, 10950, 11000, 11050,
         11100, 11150, 11200, 11250, 11300, 11350, 11400, 11450, 11500, 11550,
         11600, 11650, 11700, 11750, 11800, 11850, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
         0.21, 0.26, 0.3, 0.36, 0.4, 0.44, 0.49, 0.56, 0.6, 0.65, 0.72, 0.76,
         0.84, 0.9, 0.93, 0.96, 0.97, 0.97, 0.98, 0.98, 0.99, 0.99, 0.99, 0.99,
         1.0, 1.0, 1.0, 1.0, 1.0, 0.99, 0.98, 0.98, 0.97, 0.96, 0.94, 0.93, 0.9,
@@ -59,7 +59,7 @@
         0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     R = dict(
-      wavelen = na.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
+      wavelen = np.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, 7400,
@@ -67,7 +67,7 @@
         8050, 8100, 8150, 8200, 8250, 8300, 8350, 8400, 8450, 8500, 8550, 8600,
         8650, 8700, 8750, 8800, 8850, 8900, 8950, 9000, 9050, 9100, 9150, 9200,
         9250, 9300, 9350, 9400, 9450, 9500, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
+      trans = np.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
         0.34, 0.4, 0.46, 0.5, 0.55, 0.6, 0.64, 0.69, 0.71, 0.74, 0.77, 0.79,
         0.81, 0.84, 0.86, 0.88, 0.9, 0.91, 0.92, 0.94, 0.95, 0.96, 0.97, 0.98,
         0.99, 0.99, 1.0, 1.0, 0.99, 0.98, 0.96, 0.94, 0.92, 0.9, 0.88, 0.85,
@@ -77,20 +77,20 @@
         0.02, 0.01, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     U = dict(
-      wavelen = na.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
+      wavelen = np.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
         3450, 3500, 3550, 3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
+      trans = np.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
         0.95, 0.97, 0.99, 1.0, 0.99, 0.97, 0.92, 0.73, 0.56, 0.36, 0.23, 0.05,
         0.03, 0.01, 0.0, ], dtype='float64'),),
     V = dict(
-      wavelen = na.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
+      wavelen = np.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
         5050, 5100, 5150, 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, ],
           dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
         0.67, 0.78, 0.85, 0.91, 0.94, 0.96, 0.98, 0.98, 0.95, 0.87, 0.79, 0.72,
         0.71, 0.69, 0.65, 0.62, 0.58, 0.52, 0.46, 0.4, 0.34, 0.29, 0.24, 0.2,
         0.17, 0.14, 0.11, 0.08, 0.06, 0.05, 0.03, 0.02, 0.02, 0.01, 0.01, 0.01,
@@ -102,4 +102,4 @@
 for filter, vals in johnson_filters.items():
     wavelen = vals["wavelen"]
     trans = vals["trans"]
-    vals["Lchar"] = wavelen[na.argmax(trans)]
+    vals["Lchar"] = wavelen[np.argmax(trans)]


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -24,7 +24,7 @@
 """
 
 import __builtin__
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import *
@@ -167,12 +167,12 @@
         >>> pf = EnzoStaticOutput('DD1701') # Load pf
         >>> c = [0.5]*3 # Center
         >>> L = [1.0,1.0,1.0] # Viewpoint
-        >>> W = na.sqrt(3) # Width
+        >>> W = np.sqrt(3) # Width
         >>> N = 1024 # Pixels (1024^2)
 
         # Get density min, max
         >>> mi, ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi, ma = na.log10(mi), na.log10(ma)
+        >>> mi, ma = np.log10(mi), np.log10(ma)
 
         # Construct transfer function
         >>> tf = vr.ColorTransferFunction((mi-2, ma+2))
@@ -226,10 +226,10 @@
     def _setup_box_properties(self, width, center, unit_vectors):
         self.width = width
         self.center = center
-        self.box_vectors = na.array([unit_vectors[0]*width[0],
+        self.box_vectors = np.array([unit_vectors[0]*width[0],
                                      unit_vectors[1]*width[1],
                                      unit_vectors[2]*width[2]])
-        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.origin = center - 0.5*np.dot(width,unit_vectors)
         self.back_center =  center - 0.5*width[2]*unit_vectors[2]
         self.front_center = center + 0.5*width[2]*unit_vectors[2]         
 
@@ -289,23 +289,23 @@
                                          north_vector = north_vector)
         self._setup_box_properties(width, self.center, self.orienter.unit_vectors)
     def new_image(self):
-        image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
+        image = np.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.transfer_function, self.sub_samples)
+                np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
     def get_sampler(self, args):
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
-            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = np.empty(3,dtype='float64')
             temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
                     self.light_dir[1] * self.orienter.unit_vectors[2] + \
                     self.light_dir[2] * self.orienter.unit_vectors[0]
@@ -326,13 +326,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
 
         view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
@@ -510,30 +510,30 @@
         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
         ...     iw.write_bitmap(snapshot, "move_%04i.png" % i)
         """
-        self.center = na.array(self.center)
+        self.center = np.array(self.center)
         dW = None
         if exponential:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back 
                 if (self.center == 0.0).all():
-                    self.center += (na.array(final) - self.center) / (10. * n_steps)
-                final_zoom = final_width/na.array(self.width)
+                    self.center += (np.array(final) - self.center) / (10. * n_steps)
+                final_zoom = final_width/np.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
             else:
-                dW = na.array([1.0,1.0,1.0])
-            position_diff = (na.array(final)/self.center)*1.0
+                dW = np.array([1.0,1.0,1.0])
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back
-                dW = (1.0*final_width-na.array(self.width))/n_steps
+                dW = (1.0*final_width-np.array(self.width))/n_steps
             else:
-                dW = na.array([0.0,0.0,0.0])
-            dx = (na.array(final)-self.center)*1.0/n_steps
+                dW = np.array([0.0,0.0,0.0])
+            dx = (np.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.switch_view(center=self.center*dx, width=self.width*dW)
@@ -559,7 +559,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.rotation_vector
@@ -568,7 +568,7 @@
 
         normal_vector = self.front_center-self.center
 
-        self.switch_view(normal_vector=na.dot(R,normal_vector))
+        self.switch_view(normal_vector=np.dot(R,normal_vector))
 
     def roll(self, theta):
         r"""Roll by a given angle
@@ -583,12 +583,12 @@
         Examples
         --------
 
-        >>> cam.roll(na.pi/4)
+        >>> cam.roll(np.pi/4)
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
         north_vector = self.orienter.north_vector
-        self.switch_view(north_vector=na.dot(R, north_vector))
+        self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
         r"""Loop over rotate, creating a rotation
@@ -613,7 +613,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -676,12 +676,12 @@
         self.front_center += self.expand_factor*dl
         self.back_center -= dl
 
-        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+        px = np.linspace(-self.width[0]/2.0, self.width[0]/2.0,
                          self.resolution[0])[:,None]
-        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+        py = np.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
         inv_mat = self.orienter.inv_mat
-        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+        positions = np.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
@@ -693,14 +693,14 @@
         positions = self.front_center - 1.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
         vectors = (self.front_center - positions)
 
-        uv = na.ones(3, dtype='float64')
+        uv = np.ones(3, dtype='float64')
         image.shape = (self.resolution[0]**2,1,3)
         vectors.shape = (self.resolution[0]**2,1,3)
         positions.shape = (self.resolution[0]**2,1,3)
         args = (positions, vectors, self.back_center, 
                 (0.0,1.0,0.0,1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'), 
+                np.zeros(3, dtype='float64'), 
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -708,7 +708,7 @@
         image.shape = self.resolution[0], self.resolution[0], 3
 
 def corners(left_edge, right_edge):
-    return na.array([
+    return np.array([
       [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
@@ -726,7 +726,7 @@
                  pf = None, use_kd=True, no_ghost=False, use_light=False):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.nside = nside
         self.use_kd = use_kd
@@ -747,20 +747,20 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
+        image = np.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
         nv = 12 * self.nside ** 2
-        vs = arr_pix2vec_nest(self.nside, na.arange(nv))
+        vs = arr_pix2vec_nest(self.nside, np.arange(nv))
         vs *= self.radius
         vs.shape = nv, 1, 3
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nv, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nv, 1, 3), dtype='float64') * self.center
         args = (positions, vs, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
  
@@ -771,13 +771,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -823,14 +823,14 @@
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg
-            phi, theta = na.mgrid[0.0:2*na.pi:800j, 0:na.pi:800j]
+            phi, theta = np.mgrid[0.0:2*np.pi:800j, 0:np.pi:800j]
             pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
             image *= self.radius * self.pf['cm']
-            img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+            img = np.log10(image[:,0,0][pixi]).reshape((800,800))
 
             fig = matplotlib.figure.Figure((10, 5))
             ax = fig.add_subplot(1,1,1,projection='hammer')
-            implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
+            implot = ax.imshow(img, extent=(-np.pi,np.pi,-np.pi/2,np.pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
 
             if label == None:
@@ -852,7 +852,7 @@
                  rays_per_cell = 0.1, max_nside = 8192):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.use_kd = use_kd
         if transfer_function is None:
@@ -880,8 +880,8 @@
                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-        left_edges = na.array([b.LeftEdge for b in bricks])
-        right_edges = na.array([b.RightEdge for b in bricks])
+        left_edges = np.array([b.LeftEdge for b in bricks])
+        right_edges = np.array([b.RightEdge for b in bricks])
         min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
                      for b in bricks))
         # We jitter a bit if we're on a boundary of our initial grid
@@ -896,7 +896,7 @@
         for i,brick in enumerate(bricks):
             ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
                                        bricks)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         info, values = ray_source.get_rays()
@@ -935,10 +935,10 @@
         self.use_light = use_light
         self.light_dir = None
         self.light_rgba = None
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.fov = fov
         if iterable(resolution):
@@ -957,7 +957,7 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        image = np.zeros((self.resolution**2,1,3), dtype='float64', order='C')
         return image
         
     def get_sampler_args(self, image):
@@ -968,13 +968,13 @@
             vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
         del vp2
         vp *= self.radius
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
 
         args = (positions, vp, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -988,13 +988,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -1088,7 +1088,7 @@
         
         >>> field='Density'
         >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi,ma = na.log10(mi), na.log10(ma)
+        >>> mi,ma = np.log10(mi), np.log10(ma)
         
         # You may want to comment out the above lines and manually set the min and max
         # of the log of the Density field. For example:
@@ -1106,7 +1106,7 @@
         # the color range to the min and max values, rather than the transfer function
         # bounds.
         >>> Nc = 5
-        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=np.logspace(-2,0,Nc),
         >>>         colormap='RdBu_r')
         >>> 
         # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
@@ -1164,18 +1164,18 @@
             self.nimy = 1
         if pf is not None: self.pf = pf
         
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         
-        self.normal_vector = na.array([0.,0.,1])
-        self.north_vector = na.array([1.,0.,0.])
-        self.east_vector = na.array([0.,1.,0.])
+        self.normal_vector = np.array([0.,0.,1])
+        self.north_vector = np.array([1.,0.,0.])
+        self.east_vector = np.array([0.,1.,0.])
         self.rotation_vector = self.north_vector
 
         if iterable(resolution):
             raise RuntimeError("Resolution must be a single int")
         self.resolution = resolution
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.focal_center = focal_center
         self.radius = radius
         self.fov = fov
@@ -1195,17 +1195,17 @@
 
     def get_vector_plane(self):
         if self.focal_center is not None:
-            rvec =  na.array(self.focal_center) - na.array(self.center)
+            rvec =  np.array(self.focal_center) - np.array(self.center)
             rvec /= (rvec**2).sum()**0.5
-            angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
+            angle = np.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
                 (rvec**2).sum()**0.5))
-            rot_vector = na.cross(rvec, self.normal_vector)
+            rot_vector = np.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
             self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
-            self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
-            self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
-            self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
+            self.normal_vector = np.dot(self.rotation_matrix,self.normal_vector)
+            self.north_vector = np.dot(self.rotation_matrix,self.north_vector)
+            self.east_vector = np.dot(self.rotation_matrix,self.east_vector)
         else:
             self.focal_center = self.center + self.radius*self.normal_vector  
         dist = ((self.focal_center - self.center)**2).sum()**0.5
@@ -1228,9 +1228,9 @@
             self.get_vector_plane()
 
         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+        image = np.zeros((nx*ny,1,3), dtype='float64', order='C')
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nx*ny, 1, 3), dtype='float64') * self.center
         vector_plane = VectorPlane(positions, self.vp, self.center,
                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
         tfp = TransferFunctionProxy(self.transfer_function)
@@ -1243,7 +1243,7 @@
         total_cells = 0
         for brick in self.volume.traverse(None, self.center, image):
             brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         image.shape = (nx, ny, 3)
@@ -1269,7 +1269,7 @@
         if self.image_decomp:
             if self.comm.rank == 0:
                 if self.global_comm.rank == 0:
-                    final_image = na.empty((nx*self.nimx, 
+                    final_image = np.empty((nx*self.nimx, 
                         ny*self.nimy, 3),
                         dtype='float64',order='C')
                     final_image[:nx, :ny, :] = image
@@ -1312,7 +1312,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.north_vector
@@ -1322,9 +1322,9 @@
         R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
-        self.normal_vector = na.dot(R,self.normal_vector)
-        self.north_vector = na.dot(R,self.north_vector)
-        self.east_vector = na.dot(R,self.east_vector)
+        self.normal_vector = np.dot(R,self.normal_vector)
+        self.north_vector = np.dot(R,self.north_vector)
+        self.east_vector = np.dot(R,self.east_vector)
 
         if keep_focus:
             self.center = self.focal_center - dist*self.normal_vector
@@ -1349,7 +1349,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -1381,10 +1381,10 @@
         ...     cam.save_image('move_%04i.png' % i)
         """
         if exponential:
-            position_diff = (na.array(final)/self.center)*1.0
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
-            dx = (na.array(final) - self.center)*1.0/n_steps
+            dx = (np.array(final) - self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.center *= dx
@@ -1426,7 +1426,7 @@
         effects of nearby cells.
     rotation : optional, 3x3 array
         If supplied, the vectors will be rotated by this.  You can construct
-        this by, for instance, calling na.array([v1,v2,v3]) where those are the
+        this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
 
     Returns
@@ -1445,7 +1445,7 @@
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
-    center = na.array(center, dtype='float64')
+    center = np.array(center, dtype='float64')
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
         def _make_wf(f, w):
@@ -1457,8 +1457,8 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     nv = 12*nside**2
-    image = na.zeros((nv,1,3), dtype='float64', order='C')
-    vs = arr_pix2vec_nest(nside, na.arange(nv))
+    image = np.zeros((nv,1,3), dtype='float64', order='C')
+    vs = arr_pix2vec_nest(nside, np.arange(nv))
     vs.shape = (nv,1,3)
     if rotation is not None:
         vs2 = vs.copy()
@@ -1466,14 +1466,14 @@
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     else:
         vs += 1e-8
-    positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
+    positions = np.ones((nv, 1, 3), dtype='float64', order='C') * center
     dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
     positions += inner_radius * dx * vs
     vs *= radius
-    uv = na.ones(3, dtype='float64')
+    uv = np.ones(3, dtype='float64')
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
-                                image, uv, uv, na.zeros(3, dtype='float64'))
+                                image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
         data = [grid[field] * grid.child_mask.astype('float64')
@@ -1502,15 +1502,15 @@
                         take_log = True, resolution=512, cmin=None, cmax=None):
     import matplotlib.figure
     import matplotlib.backends.backend_agg
-    if rotation is None: rotation = na.eye(3).astype("float64")
+    if rotation is None: rotation = np.eye(3).astype("float64")
 
     img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
 
     fig = matplotlib.figure.Figure((10, 5))
     ax = fig.add_subplot(1,1,1,projection='aitoff')
-    if take_log: func = na.log10
+    if take_log: func = np.log10
     else: func = lambda a: a
-    implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
+    implot = ax.imshow(func(img), extent=(-np.pi,np.pi,-np.pi/2,np.pi/2),
                        clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
     cb = fig.colorbar(implot, orientation='horizontal')
     cb.set_label(label)
@@ -1568,12 +1568,12 @@
             pass
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
             (-self.width[0]/2, self.width[0]/2,
              -self.width[1]/2, self.width[1]/2),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.sub_samples)
+                np.array(self.width), self.sub_samples)
         return args
 
     def finalize_image(self,image):
@@ -1607,8 +1607,8 @@
                     this_point = (self.center + width/2. * off1 * north_vector
                                          + width/2. * off2 * east_vector
                                          + width/2. * off3 * normal_vector)
-                    na.minimum(mi, this_point, mi)
-                    na.maximum(ma, this_point, ma)
+                    np.minimum(mi, this_point, mi)
+                    np.maximum(ma, this_point, ma)
         # Now we have a bounding box.
         grids = pf.h.region(self.center, mi, ma)._grids
 
@@ -1630,7 +1630,7 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.pf.field_info[self.field].take_log:
-            im = na.log10(image)
+            im = np.log10(image)
         else:
             im = image
         if self.comm.rank is 0 and fn is not None:
@@ -1722,7 +1722,7 @@
 
     >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
                       0.2, N, "Temperature", "Density")
-    >>> write_image(na.log10(image), "offaxis.png")
+    >>> write_image(np.log10(image), "offaxis.png")
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -24,7 +24,7 @@
 """
 
 import random
-import numpy as na
+import numpy as np
 from .create_spline import create_spline
 
 class Keyframes(object):
@@ -67,12 +67,12 @@
         Examples
         --------
 
-        >>> import numpy as na
+        >>> import numpy as np
         >>> import matplotlib.pyplot as plt
         >>> from yt.visualization.volume_rendering.camera_path import *
 
         # Make a camera path from 10 random (x,y,z) keyframes
-        >>> data = na.random.random.((10,3))
+        >>> data = np.random.random.((10,3))
         >>> kf = Keyframes(data[:,0], data[:,1], data[:,2])
         >>> path = kf.create_path(250, shortest_path=False)
 
@@ -93,7 +93,7 @@
             print "Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz)
             sys.exit()
         self.nframes = Nx
-        self.pos = na.zeros((Nx,3))
+        self.pos = np.zeros((Nx,3))
         self.pos[:,0] = x
         self.pos[:,1] = y
         if z != None:
@@ -103,7 +103,7 @@
         self.north_vectors = north_vectors
         self.up_vectors = up_vectors
         if times == None:
-            self.times = na.arange(self.nframes)
+            self.times = np.arange(self.nframes)
         else:
             self.times = times
         self.cartesian_matrix()
@@ -131,7 +131,7 @@
         """
         # randomize tour
         self.tour = range(self.nframes)
-        na.random.shuffle(self.tour)
+        np.random.shuffle(self.tour)
         if fixed_start:
             first = self.tour.index(0)
             self.tour[0], self.tour[first] = self.tour[first], self.tour[0]
@@ -191,17 +191,17 @@
         Create a distance matrix for the city coords that uses
         straight line distance
         """
-        self.dist_matrix = na.zeros((self.nframes, self.nframes))
-        xmat = na.zeros((self.nframes, self.nframes))
+        self.dist_matrix = np.zeros((self.nframes, self.nframes))
+        xmat = np.zeros((self.nframes, self.nframes))
         xmat[:,:] = self.pos[:,0]
         dx = xmat - xmat.T
-        ymat = na.zeros((self.nframes, self.nframes))
+        ymat = np.zeros((self.nframes, self.nframes))
         ymat[:,:] = self.pos[:,1]
         dy = ymat - ymat.T
-        zmat = na.zeros((self.nframes, self.nframes))
+        zmat = np.zeros((self.nframes, self.nframes))
         zmat[:,:] = self.pos[:,2]
         dz = zmat - zmat.T
-        self.dist_matrix = na.sqrt(dx*dx + dy*dy + dz*dz)
+        self.dist_matrix = np.sqrt(dx*dx + dy*dy + dz*dz)
 
     def tour_length(self, tour):
         r"""
@@ -227,7 +227,7 @@
         if next > prev:
             return 1.0
         else:
-            return na.exp( -abs(next-prev) / temperature )
+            return np.exp( -abs(next-prev) / temperature )
 
     def get_shortest_path(self):
         r"""Determine shortest path between all keyframes.
@@ -294,14 +294,14 @@
             path.  Also saved to self.path.
         """
         self.npoints = npoints
-        self.path = {"time": na.zeros(npoints),
-                     "position": na.zeros((npoints, 3)),
-                     "north_vectors": na.zeros((npoints,3)),
-                     "up_vectors": na.zeros((npoints,3))}
+        self.path = {"time": np.zeros(npoints),
+                     "position": np.zeros((npoints, 3)),
+                     "north_vectors": np.zeros((npoints,3)),
+                     "up_vectors": np.zeros((npoints,3))}
         if shortest_path:
             self.get_shortest_path()
         if path_time == None:
-            path_time = na.linspace(0, self.nframes, npoints)
+            path_time = np.linspace(0, self.nframes, npoints)
         self.path["time"] = path_time
         for dim in range(3):
             self.path["position"][:,dim] = create_spline(self.times, self.pos[:,dim],


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/create_spline.py
--- a/yt/visualization/volume_rendering/create_spline.py
+++ b/yt/visualization/volume_rendering/create_spline.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def create_spline(old_x, old_y, new_x, tension=0.5, sorted=False):
     """
@@ -45,18 +45,18 @@
     """
     ndata = len(old_x)
     N = len(new_x)
-    result = na.zeros(N)
+    result = np.zeros(N)
     if not sorted:
-        isort = na.argsort(old_x)
+        isort = np.argsort(old_x)
         old_x = old_x[isort]
         old_y = old_y[isort]
     # Floor/ceiling of values outside of the original data
-    new_x = na.minimum(new_x, old_x[-1])
-    new_x = na.maximum(new_x, old_x[0])
-    ind = na.searchsorted(old_x, new_x)
-    im2 = na.maximum(ind-2, 0)
-    im1 = na.maximum(ind-1, 0)
-    ip1 = na.minimum(ind+1, ndata-1)
+    new_x = np.minimum(new_x, old_x[-1])
+    new_x = np.maximum(new_x, old_x[0])
+    ind = np.searchsorted(old_x, new_x)
+    im2 = np.maximum(ind-2, 0)
+    im1 = np.maximum(ind-1, 0)
+    ip1 = np.minimum(ind+1, ndata-1)
     for i in range(N):
         if ind[i] != im1[i]:
             u = (new_x[i] - old_x[im1[i]]) / (old_x[ind[i]] - old_x[im1[i]])


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 import h5py
 
@@ -63,10 +63,10 @@
                    len(self.bricks), back_point, front_point)
         if self.bricks is None: self.initialize_source()
         vec = front_point - back_point
-        dist = na.minimum(
-             na.sum((self.brick_left_edges - back_point) * vec, axis=1),
-             na.sum((self.brick_right_edges - back_point) * vec, axis=1))
-        ind = na.argsort(dist)
+        dist = np.minimum(
+             np.sum((self.brick_left_edges - back_point) * vec, axis=1),
+             np.sum((self.brick_right_edges - back_point) * vec, axis=1))
+        ind = np.argsort(dist)
         for b in self.bricks[ind]:
             #print b.LeftEdge, b.RightEdge
             yield b
@@ -79,7 +79,7 @@
         for field, log_field in zip(self.fields, self.log_fields):
             vcd = grid.get_vertex_centered_data(field, no_ghost = self.no_ghost)
             vcd = vcd.astype("float64")
-            if log_field: vcd = na.log10(vcd)
+            if log_field: vcd = np.log10(vcd)
             vcds.append(vcd)
 
         GF = GridFaces(grid.Children + [grid])
@@ -121,11 +121,11 @@
         # intersection, we only need to do the left edge & right edge.
         #
         # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.bricks = na.empty(len(bricks), dtype='object')
+        self.brick_left_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_right_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_parents = np.zeros( NB, dtype='int64')
+        self.brick_dimensions = np.zeros( (NB, 3), dtype='int64')
+        self.bricks = np.empty(len(bricks), dtype='object')
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
             self.brick_right_edges[i,:] = b.RightEdge
@@ -143,12 +143,12 @@
             for j in [-1, 1]:
                 for k in [-1, 1]:
                     for b in self.bricks:
-                        BB = na.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
-                        LE, RE = na.min(BB, axis=0), na.max(BB, axis=0)
+                        BB = np.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
+                        LE, RE = np.min(BB, axis=0), np.max(BB, axis=0)
                         nb.append(
                             PartitionedGrid(b.parent_grid_id, len(b.my_data), 
                                 [md[::i,::j,::k].copy("C") for md in b.my_data],
-                                LE, RE, na.array(b.my_data[0].shape) - 1))
+                                LE, RE, np.array(b.my_data[0].shape) - 1))
         # Replace old bricks
         self.initialize_bricks(nb)
 
@@ -183,7 +183,7 @@
                                 self.brick_right_edges[i,:],
                                 self.brick_dimensions[i,:],
                                 ))
-        self.bricks = na.array(bricks, dtype='object')
+        self.bricks = np.array(bricks, dtype='object')
         f.close()
 
     def reset_cast(self):
@@ -194,10 +194,10 @@
     def __init__(self, data_array):
         self.bricks = [PartitionedGrid(-1, 1, 
                        [data_array.astype("float64")],
-                       na.zeros(3, dtype='float64'),
-                       na.ones(3, dtype='float64'),
-                       na.array(data_array.shape, dtype='int64')-1)]
-        self.brick_dimensions = na.ones((1, 3), dtype='int64')*data_array.shape
+                       np.zeros(3, dtype='float64'),
+                       np.ones(3, dtype='float64'),
+                       np.array(data_array.shape, dtype='int64')-1)]
+        self.brick_dimensions = np.ones((1, 3), dtype='int64')*data_array.shape
 
     def initialize_source(self):
         pass
@@ -221,24 +221,24 @@
     def __getitem__(self, item):
         return self.faces[item]
 
-def export_partitioned_grids(grid_list, fn, int_type=na.int64, float_type=na.float64):
+def export_partitioned_grids(grid_list, fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "w")
     pbar = get_pbar("Writing Grids", len(grid_list))
     nelem = sum((grid.my_data.size for grid in grid_list))
     ngrids = len(grid_list)
     group = f.create_group("/PGrids")
-    left_edge = na.concatenate([[grid.LeftEdge,] for grid in grid_list])
+    left_edge = np.concatenate([[grid.LeftEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/LeftEdges", data=left_edge, dtype=float_type); del left_edge
-    right_edge = na.concatenate([[grid.RightEdge,] for grid in grid_list])
+    right_edge = np.concatenate([[grid.RightEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/RightEdges", data=right_edge, dtype=float_type); del right_edge
-    dims = na.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
+    dims = np.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
     f.create_dataset("/PGrids/Dims", data=dims, dtype=int_type); del dims
-    data = na.concatenate([grid.my_data.ravel() for grid in grid_list])
+    data = np.concatenate([grid.my_data.ravel() for grid in grid_list])
     f.create_dataset("/PGrids/Data", data=data, dtype=float_type); del data
     f.close()
     pbar.finish()
 
-def import_partitioned_grids(fn, int_type=na.int64, float_type=na.float64):
+def import_partitioned_grids(fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "r")
     n_groups = len(f)
     grid_list = []
@@ -258,4 +258,4 @@
         pbar.update(i)
     pbar.finish()
     f.close()
-    return na.array(grid_list, dtype='object')
+    return np.array(grid_list, dtype='object')


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -25,7 +25,7 @@
 import h5py
 try: import pyfits
 except: pass
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -67,7 +67,7 @@
         f.close()
     else:
         print 'No support for fits import.'
-    return na.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
+    return np.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
 
 def plot_channel(image, name, cmap='gist_heat', log=True, dex=3, zero_factor=1.0e-10, 
                  label=None, label_color='w', label_size='large'):
@@ -84,7 +84,7 @@
     import matplotlib
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     ma = image[image>0.0].max()
     image[image==0.0] = ma*zero_factor
     if log:
@@ -113,7 +113,7 @@
     """
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     if image.shape[2] >= 4:
         image = image[:,:,:3]
     pylab.clf()


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -261,7 +261,7 @@
         tex_coord.Append((t1,t0,t1)); ver_coord.Append((x1, y0, z1)) # 7
         
         # Store quads
-        self._quads[tex_id] = (tex_coord, ver_coord, na.array(indices,dtype=na.uint8))
+        self._quads[tex_id] = (tex_coord, ver_coord, np.array(indices,dtype=np.uint8))
 
 def visvis_plot(vp):
     """
@@ -280,10 +280,10 @@
     ax = vv.gca()
 
     for i,g in enumerate(gs):
-        ss = ((g.RightEdge - g.LeftEdge) / (na.array(g.my_data[0].shape)-1)).tolist()
+        ss = ((g.RightEdge - g.LeftEdge) / (np.array(g.my_data[0].shape)-1)).tolist()
         origin = g.LeftEdge.astype("float32").tolist()
         dd = (g.my_data[0].astype("float32") - mi)/(ma - mi)
-        dd = na.clip(dd, 0.0, 1.0)
+        dd = np.clip(dd, 0.0, 1.0)
         print ss
         texes.append(vv.Aarray(dd, origin = origin, sampling = ss))
 


diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from matplotlib.cm import get_cmap
 
 from yt.funcs import *
@@ -59,10 +59,10 @@
         self.pass_through = 0
         self.nbins = nbins
         self.x_bounds = x_bounds
-        self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
-        self.y = na.zeros(nbins, dtype='float64')
+        self.x = np.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
+        self.y = np.zeros(nbins, dtype='float64')
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -88,8 +88,8 @@
         >>> tf = TransferFunction( (-10.0, -5.0) )
         >>> tf.add_gaussian(-9.0, 0.01, 1.0)
         """
-        vals = height * na.exp(-(self.x - location)**2.0/width)
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        vals = height * np.exp(-(self.x - location)**2.0/width)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_line(self, start, stop):
         r"""Add a line between two points to the transmission function.
@@ -122,7 +122,7 @@
         # not satisfy our bounding box arguments
         vals = slope * (self.x - x0) + y0
         vals[~((self.x >= x0) & (self.x <= x1))] = 0.0
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.
@@ -154,12 +154,12 @@
         >>> tf.add_gaussian(-7.0, 0.01, 1.0)
         >>> tf.add_step(-8.0, -6.0, 0.5)
         """
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         vals[(self.x >= start) & (self.x <= stop)] = value
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_filtered_planck(self, wavelength, trans):
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         nu = clight/(wavelength*1e-8)
         nu = nu[::-1]
 
@@ -167,15 +167,15 @@
             T = 10**logT
             # Black body at this nu, T
             Bnu = ((2.0 * hcgs * nu**3) / clight**2.0) / \
-                    (na.exp(hcgs * nu / (kboltz * T)) - 1.0)
+                    (np.exp(hcgs * nu / (kboltz * T)) - 1.0)
             # transmission
             f = Bnu * trans[::-1]
             # integrate transmission over nu
-            vals[i] = na.trapz(f,nu)
+            vals[i] = np.trapz(f,nu)
 
         # normalize by total transmission over filter
-        self.y = vals/trans.sum() #/na.trapz(trans[::-1],nu)
-        #self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = vals/trans.sum() #/np.trapz(trans[::-1],nu)
+        #self.y = np.clip(np.maximum(vals, self.y), 0.0, 1.0)
 
     def plot(self, filename):
         r"""Save an image file of the transfer function.
@@ -245,7 +245,7 @@
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):
@@ -459,20 +459,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -500,20 +500,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -574,7 +574,7 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
-        tomap = na.linspace(0.,1.,num=rel1-rel0)
+        tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)*scale
         if scale_func is None:
@@ -640,17 +640,17 @@
             if ma is None: ma = col_bounds[1] - dist/(10.0*N)
         if w is None: w = 0.001 * (ma-mi)/N
         if alpha is None and self.grey_opacity:
-            alpha = na.ones(N, dtype="float64")
+            alpha = np.ones(N, dtype="float64")
         elif alpha is None and not self.grey_opacity:
-            alpha = na.logspace(-3, 0, N)
-        for v, a in zip(na.mgrid[mi:ma:N*1j], alpha):
+            alpha = np.logspace(-3, 0, N)
+        for v, a in zip(np.mgrid[mi:ma:N*1j], alpha):
             self.sample_colormap(v, w, a, colormap=colormap, col_bounds=col_bounds)
 
     def get_colormap_image(self, height, width):
-        image = na.zeros((height, width, 3), dtype='uint8')
-        hvals = na.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
+        image = np.zeros((height, width, 3), dtype='uint8')
+        hvals = np.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
         for i,f in enumerate(self.funcs[:3]):
-            vals = na.interp(hvals, f.x, f.y)
+            vals = np.interp(hvals, f.x, f.y)
             image[:,:,i] = (vals[:,None] * 255).astype('uint8')
         image = image[::-1,:,:]
         return image
@@ -736,7 +736,7 @@
         self._normalize()
 
     def _normalize(self):
-        fmax  = na.array([f.y for f in self.tables[:3]])
+        fmax  = np.array([f.y for f in self.tables[:3]])
         normal = fmax.max(axis=0)
         for f in self.tables[:3]:
             f.y = f.y/normal



https://bitbucket.org/yt_analysis/yt-3.0/changeset/376f17175659/
changeset:   376f17175659
branch:      yt
user:        MatthewTurk
date:        2012-08-31 20:03:34
summary:     Changing np, which meant npart, to npart, to avoid conflicts with the new np,
which used to be na.
affected #:  1 file

diff -r 8e9b059aacc17bc8b095a9725b647ea91d5e691b -r 376f17175659a4ee0f033081ca677d4e00937c58 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -291,7 +291,7 @@
         f = open(self.hierarchy_filename, "rb")
         self.grids = [self.grid(1, self)]
         self.grids[0].Level = 0
-        si, ei, LE, RE, fn, np = [], [], [], [], [], []
+        si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
         pbar = get_pbar("Parsing Hierarchy", self.num_grids)
         for grid_id in xrange(self.num_grids):
@@ -304,15 +304,15 @@
             nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
             fn.append(["-1"])
             if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
-            np.append(int(_next_token_line("NumberOfParticles", f)[0]))
-            if nb == 0 and np[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
+            npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
+            if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
             for line in f:
                 if len(line) < 2: break
                 if line.startswith("Pointer:"):
                     vv = patt.findall(line)[0]
                     self.__pointer_handler(vv)
         pbar.finish()
-        self._fill_arrays(ei, si, LE, RE, np)
+        self._fill_arrays(ei, si, LE, RE, npart)
         temp_grids = np.empty(self.num_grids, dtype='object')
         temp_grids[:] = self.grids
         self.grids = temp_grids
@@ -320,13 +320,13 @@
         self._store_binary_hierarchy()
         t2 = time.time()
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions.flat[:] = ei
         self.grid_dimensions -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge.flat[:] = LE
         self.grid_right_edge.flat[:] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
 
     def __pointer_handler(self, m):
         sgi = int(m[2])-1



https://bitbucket.org/yt_analysis/yt-3.0/changeset/61f8e76d0470/
changeset:   61f8e76d0470
branch:      yt
user:        sskory
date:        2012-08-30 21:47:33
summary:     Adding a line to automatically build the Fortran kD-tree during install.
affected #:  1 file

diff -r 9c119e2bbb44180dfe7701c974dcb5f3b66d297e -r 61f8e76d0470183e8ae4b2dcee1ff9d05b599d03 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -683,6 +683,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
+echo "Building Fortran kD-tree module."
+cd yt/utilities/kdtree
+( make 2>&1 ) 1>> ${LOG_FILE}
+cd ../../..
+
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b768f6d5c775/
changeset:   b768f6d5c775
branch:      yt
user:        MatthewTurk
date:        2012-08-31 23:51:45
summary:     Merged in sskory/yt (pull request #254)
affected #:  1 file

diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r b768f6d5c7756bd548c02011354f86675ffb21c1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -683,6 +683,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
+echo "Building Fortran kD-tree module."
+cd yt/utilities/kdtree
+( make 2>&1 ) 1>> ${LOG_FILE}
+cd ../../..
+
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c03e94c86cd5/
changeset:   c03e94c86cd5
branch:      yt
user:        jzuhone
date:        2012-08-09 06:57:14
summary:     Merging
affected #:  6 files

diff -r 298587cec10f5fae81862ab241b7ea4944ba9841 -r c03e94c86cd5ec73681871f49360a998edc50740 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -417,6 +417,7 @@
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
+echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
 
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
@@ -437,6 +438,7 @@
 get_ytproject h5py-2.0.1.tar.gz
 get_ytproject Cython-0.16.tar.gz
 get_ytproject reason-js-20120623.zip
+get_ytproject Forthon-0.8.10.tar.gz
 
 if [ $INST_BZLIB -eq 1 ]
 then
@@ -674,6 +676,7 @@
 do_setup_py ipython-0.13
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.16
+do_setup_py Forthon-0.8.10
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r 298587cec10f5fae81862ab241b7ea4944ba9841 -r c03e94c86cd5ec73681871f49360a998edc50740 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -28,7 +28,8 @@
       StreamGrid, \
       StreamHierarchy, \
       StreamStaticOutput, \
-      StreamHandler
+      StreamHandler, \
+      load_uniform_grid
 
 from .fields import \
       KnownStreamFields, \


diff -r 298587cec10f5fae81862ab241b7ea4944ba9841 -r c03e94c86cd5ec73681871f49360a998edc50740 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
 
 from .fields import \
     StreamFieldInfo, \
@@ -288,3 +290,89 @@
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         return False
+
+class StreamDictFieldHandler(dict):
+
+    @property
+    def all_fields(self): return self[0].keys()
+
+def load_uniform_grid(data, domain_dimensions, domain_size_in_cm):
+    r"""Load a uniform grid of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a uniform grid of data to be loaded directly into yt and
+    analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+    domain_dimensiosn : array_like
+        This is the domain dimensions of the grid
+    domain_size_in_cm : float
+        The size of the domain, in centimeters
+
+    Examples
+    --------
+
+    >>> arr = na.random.random((256, 256, 256))
+    >>> data = dict(Density = arr)
+    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
+    """
+    sfh = StreamDictFieldHandler()
+    sfh.update({0:data})
+    domain_dimensions = na.array(domain_dimensions)
+    if na.unique(domain_dimensions).size != 1:
+        print "We don't support variably sized domains yet."
+        raise RuntimeError
+    domain_left_edge = na.zeros(3, 'float64')
+    domain_right_edge = na.ones(3, 'float64')
+    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
+    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
+
+    grid_levels = na.array([0], dtype='int32').reshape((1,1))
+    grid_dimensions = grid_right_edges - grid_left_edges
+
+    grid_left_edges  = grid_left_edges.astype("float64")
+    grid_left_edges /= domain_dimensions*2**grid_levels
+    grid_left_edges *= domain_right_edge - domain_left_edge
+    grid_left_edges += domain_left_edge
+
+    grid_right_edges  = grid_right_edges.astype("float64")
+    grid_right_edges /= domain_dimensions*2**grid_levels
+    grid_right_edges *= domain_right_edge - domain_left_edge
+    grid_right_edges += domain_left_edge
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        na.array([-1], dtype='int64'),
+        na.zeros(1, dtype='int64').reshape((1,1)),
+        na.zeros(1).reshape((1,1)),
+        sfh,
+    )
+
+    handler.name = "UniformGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = 0.0
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = domain_size_in_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf


diff -r 298587cec10f5fae81862ab241b7ea4944ba9841 -r c03e94c86cd5ec73681871f49360a998edc50740 yt/utilities/kdtree/Makefile
--- a/yt/utilities/kdtree/Makefile
+++ b/yt/utilities/kdtree/Makefile
@@ -9,9 +9,10 @@
 endif
 
 fKD: fKD.f90 fKD.v fKD_source.f90
-#	Forthon --compile_first fKD_source --no2underscores --with-numpy -g fKD fKD.f90 fKD_source.f90
+#	Forthon --compile_first fKD_source --no2underscores -g fKD fKD.f90 fKD_source.f90
 	@echo "Using $(FORTHON) ($(FORTHON_EXE))"
-	$(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --with-numpy --fopt "-O3" fKD fKD_source.f90
+	$(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --fopt "-O3" fKD fKD_source.f90
+	mv build/lib*/fKDpy.so .
 
 clean:
 	rm -rf build fKDpy.a fKDpy.so




diff -r 298587cec10f5fae81862ab241b7ea4944ba9841 -r c03e94c86cd5ec73681871f49360a998edc50740 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -458,10 +458,15 @@
             Log on/off.
 
         """
-        if log:
-            self._field_transform[field] = log_transform
+        if field == 'all':
+            fields = self.plots.keys()
         else:
-            self._field_transform[field] = linear_transform
+            fields = [field]
+        for field in fields:
+            if log:
+                self._field_transform[field] = log_transform
+            else:
+                self._field_transform[field] = linear_transform
 
     @invalidate_plot
     def set_transform(self, field, name):
@@ -472,34 +477,70 @@
     @invalidate_plot
     def set_cmap(self, field, cmap_name):
         """set the colormap for one of the fields
-        
+
         Parameters
         ----------
         field : string
-            the field to set a transform
+            the field to set the colormap
+            if field == 'all', applies to all plots.
         cmap_name : string
             name of the colormap
 
         """
-        self._colorbar_valid = False
-        self._colormaps[field] = cmap_name
+
+        if field is 'all':
+            fields = self.plots.keys()
+        else:
+            fields = [field]
+        for field in fields:
+            self._colorbar_valid = False
+            self._colormaps[field] = cmap_name
 
     @invalidate_plot
-    def set_zlim(self, field, zmin, zmax):
+    def set_zlim(self, field, zmin, zmax, dynamic_range=None):
         """set the scale of the colormap
-        
+
         Parameters
         ----------
         field : string
-            the field to set a transform
+            the field to set a colormap scale
+            if field == 'all', applies to all plots.
         zmin : float
-            the new minimum of the colormap scale
+            the new minimum of the colormap scale. If 'min', will
+            set to the minimum value in the current view.
         zmax : float
-            the new maximum of the colormap scale
+            the new maximum of the colormap scale. If 'max', will
+            set to the maximum value in the current view.
+
+        Keyword Parameters
+        ------------------
+        dyanmic_range : float (default: None)
+            The dynamic range of the image.
+            If zmin == None, will set zmin = zmax / dynamic_range
+            If zmax == None, will set zmax = zmin * dynamic_range
+            When dynamic_range is specified, defaults to setting
+            zmin = zmax / dynamic_range.
 
         """
-        self.plots[field].zmin = zmin
-        self.plots[field].zmax = zmax
+        if field is 'all':
+            fields = self.plots.keys()
+        else:
+            fields = [field]
+        for field in fields:
+            myzmin = zmin
+            myzmax = zmax
+            if zmin == 'min':
+                myzmin = self.plots[field].image._A.min()
+            if zmax == 'max':
+                myzmax = self.plots[field].image._A.max()
+            if dynamic_range is not None:
+                if zmax is None:
+                    myzmax = myzmin * dynamic_range
+                else:
+                    myzmin = myzmax / dynamic_range
+
+            self.plots[field].zmin = myzmin
+            self.plots[field].zmax = myzmax
 
     def setup_callbacks(self):
         for key in callback_registry:
@@ -512,7 +553,7 @@
             callback = invalidate_plot(apply_callback(CallbackMaker))
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
-        
+
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
         mi = fval.min()
@@ -651,25 +692,32 @@
     @invalidate_plot
     def set_cmap(self, field, cmap):
         """set the colormap for one of the fields
-        
+
         Parameters
         ----------
         field : string
             the field to set a transform
+            if field == 'all', applies to all plots.
         cmap_name : string
             name of the colormap
 
         """
-        self._colorbar_valid = False
-        self._colormaps[field] = cmap
-        if isinstance(cmap, types.StringTypes):
-            if str(cmap) in yt_colormaps:
-                cmap = yt_colormaps[str(cmap)]
-            elif hasattr(matplotlib.cm, cmap):
-                cmap = getattr(matplotlib.cm, cmap)
-        if not is_colormap(cmap) and cmap is not None:
-            raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
-        self.plots[field].image.set_cmap(cmap)
+        if field == 'all':
+            fields = self.plots.keys()
+        else:
+            fields = [field]
+
+        for field in fields:
+            self._colorbar_valid = False
+            self._colormaps[field] = cmap
+            if isinstance(cmap, types.StringTypes):
+                if str(cmap) in yt_colormaps:
+                    cmap = yt_colormaps[str(cmap)]
+                elif hasattr(matplotlib.cm, cmap):
+                    cmap = getattr(matplotlib.cm, cmap)
+            if not is_colormap(cmap) and cmap is not None:
+                raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
+            self.plots[field].image.set_cmap(cmap)
 
     def save(self,name=None):
         """saves the plot to disk.
@@ -762,7 +810,7 @@
              the image centers on the location of the maximum density
              cell.  If set to 'c' or 'center', the plot is centered on
              the middle of the domain.
-	width : tuple or a float.
+        width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
              
@@ -781,7 +829,7 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.  
-	origin : string
+        origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
              to the bottom-left hand corner of the simulation domain, 'center-domain',
@@ -830,7 +878,7 @@
             the image centers on the location of the maximum density
             cell.  If set to 'c' or 'center', the plot is centered on
             the middle of the domain.
-	width : tuple or a float.
+        width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
              



https://bitbucket.org/yt_analysis/yt-3.0/changeset/166ef5a2bbab/
changeset:   166ef5a2bbab
branch:      yt
user:        jzuhone
date:        2012-08-10 01:53:27
summary:     Adding "sim_time" and "number_of_particles" as optional arguments to load_uniform_grid. With these we can add a simulation time and particles to the uniform grid.
affected #:  1 file

diff -r c03e94c86cd5ec73681871f49360a998edc50740 -r 166ef5a2bbab3c3fedab52c16a615d4735fd35a6 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -296,7 +296,8 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm):
+def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
+                      sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -316,13 +317,18 @@
         This is the domain dimensions of the grid
     domain_size_in_cm : float
         The size of the domain, in centimeters
-
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+        
     Examples
     --------
 
     >>> arr = na.random.random((256, 256, 256))
     >>> data = dict(Density = arr)
     >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
+                
     """
     sfh = StreamDictFieldHandler()
     sfh.update({0:data})
@@ -354,7 +360,7 @@
         grid_dimensions,
         grid_levels,
         na.array([-1], dtype='int64'),
-        na.zeros(1, dtype='int64').reshape((1,1)),
+        number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
         na.zeros(1).reshape((1,1)),
         sfh,
     )
@@ -365,7 +371,7 @@
     handler.refine_by = 2
     handler.dimensionality = 3
     handler.domain_dimensions = domain_dimensions
-    handler.simulation_time = 0.0
+    handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f5b355b6ad0d/
changeset:   f5b355b6ad0d
branch:      yt
user:        jzuhone
date:        2012-09-01 05:09:57
summary:     Getting rid of FLASH ParticleIO for now since it may be reincarnated in yt 3.0
affected #:  3 files

diff -r 166ef5a2bbab3c3fedab52c16a615d4735fd35a6 -r f5b355b6ad0d34f8bcc2d9f890114aae3ed56f60 yt/frontends/flash/_flash_particle_reader.pyx
--- a/yt/frontends/flash/_flash_particle_reader.pyx
+++ /dev/null
@@ -1,219 +0,0 @@
-import numpy as np
-cimport numpy as np
-cimport cython
-import h5py
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef particles_validator_region(np.ndarray[np.float64_t, ndim=1] x,
-                                np.ndarray[np.float64_t, ndim=1] y,
-                                np.ndarray[np.float64_t, ndim=1] z,
-                                np.ndarray[np.float64_t, ndim=1] left_edge,
-                                np.ndarray[np.float64_t, ndim=1] right_edge,
-                                np.int32_t periodic,
-                                np.ndarray[np.float64_t, ndim=1] DLE,
-                                np.ndarray[np.float64_t, ndim=1] DRE) :
-
-    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] mask
-    cdef int i, ax
-
-    mask = np.zeros(x.shape[0], 'bool')
-
-    cdef np.ndarray[np.float64_t, ndim=1] DW = np.zeros(3, 'float64')
-
-    if periodic == 1: 
-        DW[:] = DRE - DLE
-
-    cdef np.float64_t pos[3]
-    cdef int inside
-    for i in range(x.shape[0]):
-        pos[0] = x[i]
-        pos[1] = y[i]
-        pos[2] = z[i]
-        inside = 1
-        for ax in range(3):
-            if pos[ax] < left_edge[ax]: pos[ax] += DW[ax]
-            if pos[ax] > right_edge[ax]: pos[ax] -= DW[ax]
-        for ax in range(3):
-            if pos[ax] < left_edge[ax] or pos[ax] > right_edge[ax]:
-                inside = 0
-                break
-        if inside == 1:
-            mask[i] = 1
-
-    return mask
-
-cdef particles_validator_sphere(np.ndarray[np.float64_t, ndim=1] x,
-                                np.ndarray[np.float64_t, ndim=1] y, 
-                                np.ndarray[np.float64_t, ndim=1] z,
-                                np.ndarray[np.float64_t, ndim=1] center,
-                                np.float64_t radius,
-                                np.int32_t periodic,
-                                np.ndarray[np.float64_t, ndim=1] DLE,
-                                np.ndarray[np.float64_t, ndim=1] DRE) :
-
-    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxs
-
-    cdef np.ndarray[np.float64_t, ndim=1] r
-    cdef np.ndarray[np.float64_t, ndim=1] xx
-    cdef np.ndarray[np.float64_t, ndim=1] yy
-    cdef np.ndarray[np.float64_t, ndim=1] zz
-
-    cdef np.ndarray[np.float64_t, ndim=1] DW
-
-    idxs = np.zeros(x.shape[0], 'bool')
-    
-    r = np.zeros(x.shape[0], 'float64')
-    xx = np.zeros(x.shape[0], 'float64')
-    yy = np.zeros(x.shape[0], 'float64')
-    zz = np.zeros(x.shape[0], 'float64')
-
-    DW = np.zeros(3, 'float64')
-    
-    xx = np.abs(x-center[0])
-    yy = np.abs(y-center[1])
-    zz = np.abs(z-center[2])
-
-    if periodic == 1 : 
-
-        DW = DRE - DLE
-
-        xx = np.minimum(xx,DW[0]-xx)
-        yy = np.minimum(yy,DW[1]-yy)
-        zz = np.minimum(zz,DW[2]-zz)
-
-    r = np.sqrt(xx*xx+yy*yy+zz*zz)
-
-    idxs = np.array(r <= radius)
-    
-    return idxs
-
-cdef particles_validator_disk(np.ndarray[np.float64_t, ndim=1] x,
-                              np.ndarray[np.float64_t, ndim=1] y,
-                              np.ndarray[np.float64_t, ndim=1] z,
-                              np.ndarray[np.float64_t, ndim=1] center,
-                              np.ndarray[np.float64_t, ndim=1] normal,
-                              np.float64_t radius, np.float64_t height) :
-
-    cdef np.float64_t d
-
-    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxs
-
-    cdef np.ndarray[np.float64_t, ndim=1] ph
-    cdef np.ndarray[np.float64_t, ndim=1] pd2
-    cdef np.ndarray[np.float64_t, ndim=1] pr
-
-    idxs = np.zeros(x.shape[0], 'bool')
-    
-    ph = np.zeros(x.shape[0], 'float64')
-    pd2 = np.zeros(x.shape[0], 'float64')
-    pr = np.zeros(x.shape[0], 'float64')
-    
-    d = -np.dot(normal*center)
-
-    ph = np.abs(x*normal[0] + y*normal[1] + z*normal[2] + d)
-    pd2 = (x-center[0])**2+(y-center[1])**2+(z-center[2])**2
-
-    pr = np.sqrt(pd2-ph*ph)
-
-    idxs = np.logical_and(pr <= radius, ph <= height)
-    
-    return idxs
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def read_particles(file_id, int x_index, int y_index, int z_index,
-                   int num_fields, int rtype, args,
-                   np.ndarray[np.int32_t, ndim=1] field_indices) :
-
-    cdef np.ndarray[np.uint8_t, cast=True, ndim=1] idxs
-    cdef int i
-    cdef int num_particles
-    cdef np.int32_t periodic
-    cdef np.ndarray[np.float64_t, ndim=1] left_edge
-    cdef np.ndarray[np.float64_t, ndim=1] right_edge
-    cdef np.ndarray[np.float64_t, ndim=1] DLE
-    cdef np.ndarray[np.float64_t, ndim=1] DRE
-    cdef np.float64_t radius
-    cdef np.float64_t height
-    cdef np.ndarray[np.float64_t, ndim=1] normal
-    cdef np.ndarray[np.float64_t, ndim=1] center
-    cdef np.ndarray[np.float64_t, ndim=1] particle_field
-    cdef np.ndarray[np.float64_t, ndim=1] posx
-    cdef np.ndarray[np.float64_t, ndim=1] posy
-    cdef np.ndarray[np.float64_t, ndim=1] posz
-
-    left_edge = np.zeros(3, 'float64')
-    right_edge = np.zeros(3, 'float64')
-    DLE = np.zeros(3, 'float64')
-    DRE = np.zeros(3, 'float64')
-    normal = np.zeros(3, 'float64')
-    center = np.zeros(3, 'float64')
-
-    dataset = h5py.h5d.open(file_id, "tracer particles")
-    dataspace = dataset.get_space()
-    rank = dataspace.get_simple_extent_dims()
-    memspace = h5py.h5s.create_simple((rank[0],))
-
-    num_particles = rank[0]
-    count = (num_particles,1)
-
-    posx = np.zeros(num_particles, 'float64')
-    posy = np.zeros(num_particles, 'float64')
-    posz = np.zeros(num_particles, 'float64')
-
-    start = (0,x_index)
-    dataspace.select_hyperslab(start,count)
-    dataset.read(memspace, dataspace, posx)
-
-    start = (0,y_index)
-    dataspace.select_hyperslab(start,count)
-    dataset.read(memspace, dataspace, posy)
-
-    start = (0,z_index)
-    dataspace.select_hyperslab(start,count)
-    dataset.read(memspace, dataspace, posz)
-    
-    idxs = np.zeros(num_particles, 'bool')
-
-    particle_field = np.zeros(num_particles, 'float64')
-    
-    if rtype == 0 :
-        left_edge = args[0]
-        right_edge = args[1]
-        periodic = args[2]
-        DLE = args[3]
-        DRE = args[4]
-        idxs = particles_validator_region(posx,posy,posz,
-                                          left_edge,right_edge,
-                                          periodic,DLE,DRE)
-    elif rtype == 1:
-        center = args[0]
-        radius = args[1]
-        periodic = args[2]
-        DLE = args[3]
-        DRE = args[4]
-        idxs = particles_validator_sphere(posx,posy,posz,
-                                          center,radius,
-                                          periodic,DLE,DRE)
-    elif rtype == 2:
-        center = args[0]
-        normal = args[1]
-        radius = args[2]
-        height = args[3]
-        idxs = particles_validator_disk(posx,posy,posz,
-                                        center,normal,
-                                        radius,height)
-
-    _particles = []
-
-    for i in range(num_fields) :
-
-        start = (0,field_indices[i])
-        dataspace.select_hyperslab(start,count)
-        dataset.read(memspace, dataspace, particle_field)
-        _particles.append(particle_field[idxs])
-        
-    return _particles
-    


diff -r 166ef5a2bbab3c3fedab52c16a615d4735fd35a6 -r f5b355b6ad0d34f8bcc2d9f890114aae3ed56f60 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -25,13 +25,12 @@
 
 import numpy as na
 import h5py
-from yt.frontends.flash import _flash_particle_reader
 
 from yt.utilities.io_handler import \
     BaseIOHandler
 
 class IOHandlerFLASH(BaseIOHandler):
-    _particle_reader = True
+    _particle_reader = False
     _data_style = "flash_hdf5"
 
     def __init__(self, pf, *args, **kwargs):
@@ -50,17 +49,7 @@
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
-        fx = self._particle_fields["particle_posx"]
-        fy = self._particle_fields["particle_posy"]
-        fz = self._particle_fields["particle_posz"]
-        field_indices = na.array([self._particle_fields[field]
-                                  for field in fields_to_read],
-                                 dtype='int32')
-        return _flash_particle_reader.read_particles(self._handle.fid,
-                                                     fx, fy, fz,
-                                                     len(fields_to_read),
-                                                     type, args,
-                                                     field_indices)
+        pass
 
     def _read_data_set(self, grid, field):
         f = self._handle


diff -r 166ef5a2bbab3c3fedab52c16a615d4735fd35a6 -r f5b355b6ad0d34f8bcc2d9f890114aae3ed56f60 yt/frontends/flash/setup.py
--- a/yt/frontends/flash/setup.py
+++ b/yt/frontends/flash/setup.py
@@ -8,10 +8,6 @@
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('flash', parent_package, top_path)
-    config.add_extension("_flash_particle_reader",
-                         ["yt/frontends/flash/_flash_particle_reader.pyx"],
-                         language="c"
-                         )
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b0690ab390d0/
changeset:   b0690ab390d0
branch:      yt
user:        jzuhone
date:        2012-09-01 05:15:23
summary:     Changed the order of units setup and comoving units setup so that the latter always goes last and doesn't get overridden.

Fixing the conversion for velocity units as well.
affected #:  1 file

diff -r f5b355b6ad0d34f8bcc2d9f890114aae3ed56f60 -r b0690ab390d0128e3ddc0456b313448eca70a54c yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -229,13 +229,13 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         if "EOSType" not in self.parameters:
             self.parameters["EOSType"] = -1
-        if self.cosmological_simulation == 1:
-            self._setup_comoving_units()
         if "pc_unitsbase" in self.parameters:
             if self.parameters["pc_unitsbase"] == "CGS":
                 self._setup_cgs_units()
         else:
             self._setup_nounits_units()
+        if self.cosmological_simulation == 1:
+            self._setup_comoving_units()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / \
@@ -252,10 +252,10 @@
         self.conversion_factors['eint'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['ener'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
-        self.conversion_factors['velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['vely'] = self.conversion_factors['velx']
         self.conversion_factors['velz'] = self.conversion_factors['velx']
-        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['particle_vely'] = \
             self.conversion_factors['particle_velx']
         self.conversion_factors['particle_velz'] = \



https://bitbucket.org/yt_analysis/yt-3.0/changeset/68e12d11d7a4/
changeset:   68e12d11d7a4
branch:      yt
user:        jzuhone
date:        2012-09-01 07:21:23
summary:     Applying the scale factor to the units so that any units other than '1' refer to proper distances.

Also, changed the units of the hubble parameter from H0 in s^-1 to h.
affected #:  1 file

diff -r b0690ab390d0128e3ddc0456b313448eca70a54c -r 68e12d11d7a473853913fd25ce422e0fd42ccacf yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -39,7 +39,7 @@
     mpc_conversion, sec_conversion
 from yt.utilities.io_handler import \
     io_registry
-
+from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
      ValidateDataField
@@ -265,7 +265,8 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
+            self.units[unit] /= (1.0+self.current_redshift)
+            
     def _setup_cgs_units(self):
         self.conversion_factors['dens'] = 1.0
         self.conversion_factors['pres'] = 1.0
@@ -407,6 +408,7 @@
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']
+            self.hubble_constant *= cm_per_mpc * 1.0e-5 * 1.0e-2 # convert to 'h'
         except:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e0af089357f3/
changeset:   e0af089357f3
branch:      yt
user:        jzuhone
date:        2012-09-01 07:35:23
summary:     Reverting my changes which I never intended to commit
affected #:  1 file

diff -r 68e12d11d7a473853913fd25ce422e0fd42ccacf -r e0af089357f3f5360d748620a098b330443056b9 yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -80,8 +80,7 @@
         indices.sort() # Just in case the caller wasn't careful
         
         self.field_data = YTFieldData()
-        #self.pfs = TimeSeriesData.from_filenames(filenames)
-        self.pfs = [load(fn) for fn in filenames]
+        self.pfs = TimeSeriesData.from_filenames(filenames)
         self.masks = []
         self.sorts = []
         self.indices = indices
@@ -114,14 +113,14 @@
         for pf in self.pfs :
             dd = pf.h.all_data()
             newtags = dd["particle_index"].astype("int")
-            #if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
-            #    print "Not all requested particle ids contained in this file!"
-            #    raise IndexError
-            #mask = na.in1d(newtags, indices, assume_unique=True)
-            #sorts = na.argsort(newtags[mask])
-            #self.masks.append(mask)            
-            #self.sorts.append(sorts)
-            #self.times.append(pf.current_time)
+            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+                print "Not all requested particle ids contained in this file!"
+                raise IndexError
+            mask = na.in1d(newtags, indices, assume_unique=True)
+            sorts = na.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
 
         self.times = na.array(self.times)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/51341cebfb77/
changeset:   51341cebfb77
branch:      yt
user:        jzuhone
date:        2012-09-01 07:36:23
summary:     And another
affected #:  1 file

diff -r e0af089357f3f5360d748620a098b330443056b9 -r 51341cebfb77402c1386aafb2a56704e8137741d yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -23,7 +23,6 @@
 from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.time_series import TimeSeriesData
 from yt.utilities.lib import sample_field_at_positions
-from yt.convenience import load
 from yt.funcs import *
 
 import numpy as na



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4561e3ff9d02/
changeset:   4561e3ff9d02
branch:      yt
user:        MatthewTurk
date:        2012-09-01 19:21:28
summary:     Merged in jzuhone/yt (pull request #260)
affected #:  5 files



diff -r b768f6d5c7756bd548c02011354f86675ffb21c1 -r 4561e3ff9d02043ae20e66fc2623c82b69f5ef42 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -39,7 +39,7 @@
     mpc_conversion, sec_conversion
 from yt.utilities.io_handler import \
     io_registry
-
+from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
      ValidateDataField
@@ -229,13 +229,13 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         if "EOSType" not in self.parameters:
             self.parameters["EOSType"] = -1
-        if self.cosmological_simulation == 1:
-            self._setup_comoving_units()
         if "pc_unitsbase" in self.parameters:
             if self.parameters["pc_unitsbase"] == "CGS":
                 self._setup_cgs_units()
         else:
             self._setup_nounits_units()
+        if self.cosmological_simulation == 1:
+            self._setup_comoving_units()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / \
@@ -252,10 +252,10 @@
         self.conversion_factors['eint'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['ener'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
-        self.conversion_factors['velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['vely'] = self.conversion_factors['velx']
         self.conversion_factors['velz'] = self.conversion_factors['velx']
-        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['particle_vely'] = \
             self.conversion_factors['particle_velx']
         self.conversion_factors['particle_velz'] = \
@@ -265,7 +265,8 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
+            self.units[unit] /= (1.0+self.current_redshift)
+            
     def _setup_cgs_units(self):
         self.conversion_factors['dens'] = 1.0
         self.conversion_factors['pres'] = 1.0
@@ -407,6 +408,7 @@
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']
+            self.hubble_constant *= cm_per_mpc * 1.0e-5 * 1.0e-2 # convert to 'h'
         except:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0


diff -r b768f6d5c7756bd548c02011354f86675ffb21c1 -r 4561e3ff9d02043ae20e66fc2623c82b69f5ef42 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -51,23 +51,6 @@
             count_list, conv_factors):
         pass
 
-    def _select_particles(self, grid, field):
-        f = self._handle
-        npart = f["/tracer particles"].shape[0]
-        total_selected = 0
-        start = 0
-        stride = 1e6
-        blki = self._particle_fields["particle_blk"]
-        bi = grid.id - grid._id_offset
-        fi = self._particle_fields[field]
-        tr = []
-        while start < npart:
-            end = min(start + stride - 1, npart)
-            gi = f["/tracer particles"][start:end,blki] == bi
-            tr.append(f["/tracer particles"][gi,fi])
-            start = end
-        return na.concatenate(tr)
-
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:







https://bitbucket.org/yt_analysis/yt-3.0/changeset/9ee80a7e4bb4/
changeset:   9ee80a7e4bb4
branch:      yt
user:        xarthisius
date:        2012-08-31 15:07:50
summary:     Draw grid ids only on the highest available level of refinement
affected #:  1 file

diff -r 327b4358244afdbf8507002e02c79d4ea9b3dd1d -r 9ee80a7e4bb4432b46cd29b46f0e0deea7e505ea yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -348,9 +348,13 @@
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
             if self.draw_ids:
-                ids = [g.id for g in plot.data._grids]
-                for n in visible.nonzero()[0]:
-                    plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n],clip_on=True)
+                visible_gids = visible.nonzero()[0]
+                ids = [g.id for g in plot.data._grids
+                       if na.any(g.child_mask) and g.id in visible_gids]
+                for gid in ids:
+                    plot._axes.text(left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
+                                    left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),
+                                    gid, clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8a7c75b0a9c7/
changeset:   8a7c75b0a9c7
branch:      yt
user:        xarthisius
date:        2012-08-31 18:26:30
summary:     Check size of AMRPatch.Children instead of doing logical operation on AMRPatch.child_mask. Thanks to Matt for the tip
affected #:  1 file

diff -r 9ee80a7e4bb4432b46cd29b46f0e0deea7e505ea -r 8a7c75b0a9c75976bc58bf2661f9d42931036f44 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -350,7 +350,7 @@
             if self.draw_ids:
                 visible_gids = visible.nonzero()[0]
                 ids = [g.id for g in plot.data._grids
-                       if na.any(g.child_mask) and g.id in visible_gids]
+                       if len(g.Children)==0 and g.id in visible_gids]
                 for gid in ids:
                     plot._axes.text(left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
                                     left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5d3a357b5897/
changeset:   5d3a357b5897
branch:      yt
user:        xarthisius
date:        2012-08-31 20:37:30
summary:     Use explicit loop over visible grids, with additional if statement that checks whether grid has overlapping regions
affected #:  1 file

diff -r 8a7c75b0a9c75976bc58bf2661f9d42931036f44 -r 5d3a357b589757d8f21c80abfc2f29dded9874b5 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -348,13 +348,13 @@
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
             if self.draw_ids:
-                visible_gids = visible.nonzero()[0]
-                ids = [g.id for g in plot.data._grids
-                       if len(g.Children)==0 and g.id in visible_gids]
-                for gid in ids:
-                    plot._axes.text(left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
-                                    left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),
-                                    gid, clip_on=True)
+                for i, g in enumerate(plot.data._grids[visible]):
+                    if na.any(g.child_mask):
+                        gid = na.where(visible)[0][i]
+                        plot._axes.text(
+                                left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
+                                left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),
+                                g.id, clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/80ff2fb3b5eb/
changeset:   80ff2fb3b5eb
branch:      yt
user:        ngoldbaum
date:        2012-09-02 23:57:15
summary:     Merged in xarthisius/yt (pull request #257)
affected #:  1 file

diff -r 4561e3ff9d02043ae20e66fc2623c82b69f5ef42 -r 80ff2fb3b5eb96f600df3da184d908b6ed4d81eb yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -348,9 +348,13 @@
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
             if self.draw_ids:
-                ids = [g.id for g in plot.data._grids]
-                for n in visible.nonzero()[0]:
-                    plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n],clip_on=True)
+                for i, g in enumerate(plot.data._grids[visible]):
+                    if na.any(g.child_mask):
+                        gid = na.where(visible)[0][i]
+                        plot._axes.text(
+                                left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
+                                left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),
+                                g.id, clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/dd13ee5cec63/
changeset:   dd13ee5cec63
branch:      yt
user:        samskillman
date:        2012-08-29 23:48:59
summary:     A new way to size the plot window frames so that they keep the same final image size for a given aspect ratio.  The calculation is a bit messy, but it now avoids relying on bbox_inches=tight.
affected #:  1 file

diff -r a7570ca33e168bbf7b9a9f4f399582b38632cc0a -r dd13ee5cec63c0625fb66438ff7c73de5cc6de2c yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -685,10 +685,12 @@
 
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
+            norm_size = 10.0
+            cbar_frac = 0.0
             if aspect > 1.0:
-                size = (10.0, 10.0/aspect)
+                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
             else:
-                size = (10.0*aspect, 10.0)
+                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
@@ -1184,10 +1186,13 @@
     figure = None
     def __init__(self, field, size):
         self._plot_valid = True
-        self.figure = matplotlib.figure.Figure(figsize = size, frameon = True)
+        fsize, axrect, caxrect = self._get_best_layout(size)
         # Hardcoding the axis dimensions for now
-        self.axes = self.figure.add_axes((.07,.10,.8,.8))
-        self.cax = self.figure.add_axes((.87,.10,.04,.8))
+        
+        self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                               frameon = True)
+        self.axes = self.figure.add_axes(axrect)
+        self.cax = self.figure.add_axes(caxrect)
 
     def save(self, name, canvas = None):
         if name[-4:] == '.png':
@@ -1206,9 +1211,47 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn, bbox_inches='tight')
+        canvas.print_figure(fn)
         return fn
 
+    def _get_best_layout(self, size):
+        aspect = 1.0*size[0]/size[1]
+
+        # add room for a colorbar
+        cbar_inches = 0.7
+        newsize = [size[0] + cbar_inches, size[1]]
+        
+        # add buffers for text, and a bit of whitespace on top
+        text_buffx = 0.7/(newsize[0])
+        text_bottomy = 0.7/size[1]
+        text_topy = 0.3/size[1]
+
+        # calculate how much room the colorbar takes
+        cbar_frac = cbar_inches/newsize[0] 
+        
+        # Calculate y fraction, then use to make x fraction.
+        yfrac = 1.0-text_bottomy-text_topy
+        ysize = yfrac*size[1]
+        xsize = aspect*ysize
+        xfrac = xsize/newsize[0]
+
+        # Now make sure it all fits!
+        xbig = xfrac + text_buffx + 2.0*cbar_frac
+        ybig = yfrac + text_bottomy + text_topy
+
+        if xbig > 1:
+            xsize /= xbig
+            ysize /= xbig
+        if ybig > 1:
+            xsize /= ybig
+            ysize /= ybig
+        xfrac = xsize/newsize[0]
+        yfrac = ysize/newsize[1]
+
+        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
+        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
+        return newsize, axrect, caxrect
+
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)
         f = cStringIO.StringIO()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ecb1b4c43077/
changeset:   ecb1b4c43077
branch:      yt
user:        samskillman
date:        2012-09-01 03:57:43
summary:     Attempt to give a bit more buffer on the left side, and go back to making the images only smaller than 10x10 inches, not increasing one of the lengths.  This helps keep the text sizes large enough to read.
affected #:  1 file

diff -r dd13ee5cec63c0625fb66438ff7c73de5cc6de2c -r ecb1b4c4307763f14b9762eeba604d0238900da3 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -688,9 +688,9 @@
             norm_size = 10.0
             cbar_frac = 0.0
             if aspect > 1.0:
+                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
+            else:
                 size = (aspect*norm_size*(1.+cbar_frac), norm_size)
-            else:
-                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
@@ -1222,7 +1222,7 @@
         newsize = [size[0] + cbar_inches, size[1]]
         
         # add buffers for text, and a bit of whitespace on top
-        text_buffx = 0.7/(newsize[0])
+        text_buffx = 1.0/(newsize[0])
         text_bottomy = 0.7/size[1]
         text_topy = 0.3/size[1]
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e12ec7dbed9b/
changeset:   e12ec7dbed9b
branch:      yt
user:        samskillman
date:        2012-09-03 13:40:48
summary:     Switching to scientific notation a bit earlier to not push values off the edge. Thanks ngoldbaum.
affected #:  1 file

diff -r ecb1b4c4307763f14b9762eeba604d0238900da3 -r e12ec7dbed9b0ca6553545c227efc5db6e9426e7 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1273,3 +1273,5 @@
         self.image = self.axes.imshow(data, origin='lower', extent = extent,
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
+        self.image.axes.ticklabel_format(scilimits=(-4,3))
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3840366c0379/
changeset:   3840366c0379
branch:      yt
user:        ngoldbaum
date:        2012-09-03 13:48:28
summary:     Merged in samskillman/yt (pull request #253)
affected #:  1 file

diff -r 80ff2fb3b5eb96f600df3da184d908b6ed4d81eb -r 3840366c03796c355a7043f43f6a135f35211be3 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -708,10 +708,12 @@
 
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
+            norm_size = 10.0
+            cbar_frac = 0.0
             if aspect > 1.0:
-                size = (10.0, 10.0/aspect)
+                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
             else:
-                size = (10.0*aspect, 10.0)
+                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
@@ -1207,10 +1209,13 @@
     figure = None
     def __init__(self, field, size):
         self._plot_valid = True
-        self.figure = matplotlib.figure.Figure(figsize = size, frameon = True)
+        fsize, axrect, caxrect = self._get_best_layout(size)
         # Hardcoding the axis dimensions for now
-        self.axes = self.figure.add_axes((.07,.10,.8,.8))
-        self.cax = self.figure.add_axes((.87,.10,.04,.8))
+        
+        self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                               frameon = True)
+        self.axes = self.figure.add_axes(axrect)
+        self.cax = self.figure.add_axes(caxrect)
 
     def save(self, name, canvas = None):
         if name[-4:] == '.png':
@@ -1229,9 +1234,47 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn, bbox_inches='tight')
+        canvas.print_figure(fn)
         return fn
 
+    def _get_best_layout(self, size):
+        aspect = 1.0*size[0]/size[1]
+
+        # add room for a colorbar
+        cbar_inches = 0.7
+        newsize = [size[0] + cbar_inches, size[1]]
+        
+        # add buffers for text, and a bit of whitespace on top
+        text_buffx = 1.0/(newsize[0])
+        text_bottomy = 0.7/size[1]
+        text_topy = 0.3/size[1]
+
+        # calculate how much room the colorbar takes
+        cbar_frac = cbar_inches/newsize[0] 
+        
+        # Calculate y fraction, then use to make x fraction.
+        yfrac = 1.0-text_bottomy-text_topy
+        ysize = yfrac*size[1]
+        xsize = aspect*ysize
+        xfrac = xsize/newsize[0]
+
+        # Now make sure it all fits!
+        xbig = xfrac + text_buffx + 2.0*cbar_frac
+        ybig = yfrac + text_bottomy + text_topy
+
+        if xbig > 1:
+            xsize /= xbig
+            ysize /= xbig
+        if ybig > 1:
+            xsize /= ybig
+            ysize /= ybig
+        xfrac = xsize/newsize[0]
+        yfrac = ysize/newsize[1]
+
+        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
+        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
+        return newsize, axrect, caxrect
+
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)
         f = cStringIO.StringIO()
@@ -1253,3 +1296,5 @@
         self.image = self.axes.imshow(data, origin='lower', extent = extent,
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
+        self.image.axes.ticklabel_format(scilimits=(-4,3))
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f1f5b5c886d6/
changeset:   f1f5b5c886d6
branch:      yt
user:        ngoldbaum
date:        2012-09-02 23:51:35
summary:     Merging.
affected #:  1 file

diff -r 4561e3ff9d02043ae20e66fc2623c82b69f5ef42 -r f1f5b5c886d6d60d4ae96b60a73f809822d2651d yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -348,9 +348,13 @@
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
             if self.draw_ids:
-                ids = [g.id for g in plot.data._grids]
-                for n in visible.nonzero()[0]:
-                    plot._axes.text(left_edge_x[n]+(2*(xx1-xx0)/xpix),left_edge_y[n]+(2*(yy1-yy0)/ypix),ids[n],clip_on=True)
+                for i, g in enumerate(plot.data._grids[visible]):
+                    if na.any(g.child_mask):
+                        gid = na.where(visible)[0][i]
+                        plot._axes.text(
+                                left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
+                                left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),
+                                g.id, clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6f63804e2143/
changeset:   6f63804e2143
branch:      yt
user:        ngoldbaum
date:        2012-09-02 23:59:26
summary:     Merging.
affected #:  1 file

diff -r 80ff2fb3b5eb96f600df3da184d908b6ed4d81eb -r 6f63804e21438fe998a507a9f1361100c701e9bc yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -708,10 +708,12 @@
 
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
+            norm_size = 10.0
+            cbar_frac = 0.0
             if aspect > 1.0:
-                size = (10.0, 10.0/aspect)
+                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
             else:
-                size = (10.0*aspect, 10.0)
+                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
@@ -1207,10 +1209,13 @@
     figure = None
     def __init__(self, field, size):
         self._plot_valid = True
-        self.figure = matplotlib.figure.Figure(figsize = size, frameon = True)
+        fsize, axrect, caxrect = self._get_best_layout(size)
         # Hardcoding the axis dimensions for now
-        self.axes = self.figure.add_axes((.07,.10,.8,.8))
-        self.cax = self.figure.add_axes((.87,.10,.04,.8))
+        
+        self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                               frameon = True)
+        self.axes = self.figure.add_axes(axrect)
+        self.cax = self.figure.add_axes(caxrect)
 
     def save(self, name, canvas = None):
         if name[-4:] == '.png':
@@ -1229,9 +1234,47 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn, bbox_inches='tight')
+        canvas.print_figure(fn)
         return fn
 
+    def _get_best_layout(self, size):
+        aspect = 1.0*size[0]/size[1]
+
+        # add room for a colorbar
+        cbar_inches = 0.7
+        newsize = [size[0] + cbar_inches, size[1]]
+        
+        # add buffers for text, and a bit of whitespace on top
+        text_buffx = 1.0/(newsize[0])
+        text_bottomy = 0.7/size[1]
+        text_topy = 0.3/size[1]
+
+        # calculate how much room the colorbar takes
+        cbar_frac = cbar_inches/newsize[0] 
+        
+        # Calculate y fraction, then use to make x fraction.
+        yfrac = 1.0-text_bottomy-text_topy
+        ysize = yfrac*size[1]
+        xsize = aspect*ysize
+        xfrac = xsize/newsize[0]
+
+        # Now make sure it all fits!
+        xbig = xfrac + text_buffx + 2.0*cbar_frac
+        ybig = yfrac + text_bottomy + text_topy
+
+        if xbig > 1:
+            xsize /= xbig
+            ysize /= xbig
+        if ybig > 1:
+            xsize /= ybig
+            ysize /= ybig
+        xfrac = xsize/newsize[0]
+        yfrac = ysize/newsize[1]
+
+        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
+        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
+        return newsize, axrect, caxrect
+
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)
         f = cStringIO.StringIO()



https://bitbucket.org/yt_analysis/yt-3.0/changeset/010761fec279/
changeset:   010761fec279
branch:      yt
user:        ngoldbaum
date:        2012-09-02 23:59:35
summary:     Merging.
affected #:  0 files


https://bitbucket.org/yt_analysis/yt-3.0/changeset/a7adfaeec819/
changeset:   a7adfaeec819
branch:      yt
user:        ngoldbaum
date:        2012-09-03 01:10:48
summary:     Only plot ids for grids that are (roughly) bigger than the annotation.  I use 20 pixels, I guess we could do something fancier, but this choice seems to work.
affected #:  1 file

diff -r 010761fec27923eee26a306495ab4e5164b5d651 -r a7adfaeec8198eaf8307d42d33395655faa0979e yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -295,7 +295,7 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, draw_ids=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True):
         """
         annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
@@ -306,6 +306,7 @@
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
+        self.min_pix_ids = min_pix_ids
         self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
 
@@ -348,9 +349,11 @@
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
             if self.draw_ids:
-                for i, g in enumerate(plot.data._grids[visible]):
+                visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
+                               ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
+                for i, g in enumerate(plot.data._grids[visible_ids]):
                     if na.any(g.child_mask):
-                        gid = na.where(visible)[0][i]
+                        gid = na.where(visible_ids)[0][i]
                         plot._axes.text(
                                 left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
                                 left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8e367bcb5612/
changeset:   8e367bcb5612
branch:      yt
user:        ngoldbaum
date:        2012-09-03 01:12:01
summary:     Documenting min_pix_ids
affected #:  1 file

diff -r a7adfaeec8198eaf8307d42d33395655faa0979e -r 8e367bcb5612ce3af7aaa1dc1252ff8c5d2c3ba0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -302,6 +302,7 @@
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
         *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.
         """
         PlotCallback.__init__(self)
         self.alpha = alpha



https://bitbucket.org/yt_analysis/yt-3.0/changeset/487adfd5ee1d/
changeset:   487adfd5ee1d
branch:      yt
user:        ngoldbaum
date:        2012-09-03 01:27:48
summary:     Allowing the axes unit to be set in the plot window constructors.
affected #:  1 file

diff -r 8e367bcb5612ce3af7aaa1dc1252ff8c5d2c3ba0 -r 487adfd5ee1d935938f33afb872dd829e4c49a45 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -189,8 +189,8 @@
     _contour_info = None
     _vector_info = None
     _frb = None
-    def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, 
-                 periodic = True, origin='center-window', oblique=False):
+    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
+                 periodic=True, origin='center-window', oblique=False):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -451,16 +451,17 @@
         pass
 
 class PWViewer(PlotWindow):
+    _unit = None
+    _colormaps = defaultdict(lambda: 'algae')
+    _callbacks = []
+    _field_transform = {}
     """A viewer for PlotWindows.
 
     """
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._colormaps = defaultdict(lambda: 'algae')
         self.setup_callbacks()
-        self._callbacks = []
-        self._field_transform = {}
         for field in self._frb.data.keys():
             if self.pf.field_info[field].take_log:
                 self._field_transform[field] = log_transform
@@ -577,7 +578,6 @@
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
-    _unit = None
     @invalidate_plot
     def set_axes_unit(self, unit_name):
         r"""Set the unit for display on the x and y axes of the image.
@@ -587,7 +587,7 @@
         unit_name : string
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
-            units will be reset.
+            units will be reset.  If the unit is None, the default is chosen.
 
         Raises
         ------
@@ -850,7 +850,8 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None, origin='center-window'):
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
+                 origin='center-window'):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -895,6 +896,9 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.  
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -916,9 +920,10 @@
         (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None,
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
         
@@ -964,6 +969,9 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         origin : A string
             The location of the origin of the plot coordinate system.
             Currently, can be set to three options: 'left-domain', corresponding
@@ -989,9 +997,11 @@
         (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), north_vector=None):
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1019,6 +1029,9 @@
             A tuple containing the width of image and the string key of
             the unit: (width, 'unit').  If set to a float, code units
             are assumed
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         north-vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
@@ -1030,6 +1043,7 @@
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
 
 _metadata_template = """
 %(pf)s<br>



https://bitbucket.org/yt_analysis/yt-3.0/changeset/15008795b735/
changeset:   15008795b735
branch:      yt
user:        MatthewTurk
date:        2012-09-03 14:06:15
summary:     Merged in ngoldbaum/yt-cleancopy (pull request #261)
affected #:  2 files

diff -r 3840366c03796c355a7043f43f6a135f35211be3 -r 15008795b7351ee3024db8fa4506abde8d502b89 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -295,17 +295,19 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, draw_ids=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True):
         """
         annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
         *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.
         """
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
+        self.min_pix_ids = min_pix_ids
         self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
 
@@ -348,9 +350,11 @@
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
             if self.draw_ids:
-                for i, g in enumerate(plot.data._grids[visible]):
+                visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
+                               ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
+                for i, g in enumerate(plot.data._grids[visible_ids]):
                     if na.any(g.child_mask):
-                        gid = na.where(visible)[0][i]
+                        gid = na.where(visible_ids)[0][i]
                         plot._axes.text(
                                 left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
                                 left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),


diff -r 3840366c03796c355a7043f43f6a135f35211be3 -r 15008795b7351ee3024db8fa4506abde8d502b89 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -189,8 +189,8 @@
     _contour_info = None
     _vector_info = None
     _frb = None
-    def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, 
-                 periodic = True, origin='center-window', oblique=False):
+    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
+                 periodic=True, origin='center-window', oblique=False):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -451,16 +451,17 @@
         pass
 
 class PWViewer(PlotWindow):
+    _unit = None
+    _colormaps = defaultdict(lambda: 'algae')
+    _callbacks = []
+    _field_transform = {}
     """A viewer for PlotWindows.
 
     """
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._colormaps = defaultdict(lambda: 'algae')
         self.setup_callbacks()
-        self._callbacks = []
-        self._field_transform = {}
         for field in self._frb.data.keys():
             if self.pf.field_info[field].take_log:
                 self._field_transform[field] = log_transform
@@ -577,7 +578,6 @@
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
-    _unit = None
     @invalidate_plot
     def set_axes_unit(self, unit_name):
         r"""Set the unit for display on the x and y axes of the image.
@@ -587,7 +587,7 @@
         unit_name : string
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
-            units will be reset.
+            units will be reset.  If the unit is None, the default is chosen.
 
         Raises
         ------
@@ -850,7 +850,8 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None, origin='center-window'):
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
+                 origin='center-window'):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -895,6 +896,9 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.  
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -916,9 +920,10 @@
         (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None,
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
         
@@ -964,6 +969,9 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         origin : A string
             The location of the origin of the plot coordinate system.
             Currently, can be set to three options: 'left-domain', corresponding
@@ -989,9 +997,11 @@
         (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), north_vector=None):
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1019,6 +1029,9 @@
             A tuple containing the width of image and the string key of
             the unit: (width, 'unit').  If set to a float, code units
             are assumed
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         north-vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
@@ -1030,6 +1043,7 @@
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
 
 _metadata_template = """
 %(pf)s<br>



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3abe9786ac49/
changeset:   3abe9786ac49
branch:      yt
user:        ngoldbaum
date:        2012-09-03 16:18:55
summary:     Adding a universal grid ID field which I use to see whether grids overlap with a plot window.
affected #:  2 files

diff -r 487adfd5ee1d935938f33afb872dd829e4c49a45 -r 3abe9786ac4961a02f6bfd38630fa051c693fcf0 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -107,6 +107,12 @@
           validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
+def _GridID(field, data):
+    return na.ones(data.ActiveDimensions)*(data.id)
+add_field("GridID", function=_GridID,
+          validators=[ValidateGridType(),
+                      ValidateSpatial(0)])
+
 def _GridIndices(field, data):
     return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,


diff -r 487adfd5ee1d935938f33afb872dd829e4c49a45 -r 3abe9786ac4961a02f6bfd38630fa051c693fcf0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -352,13 +352,13 @@
             if self.draw_ids:
                 visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
                                ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
-                for i, g in enumerate(plot.data._grids[visible_ids]):
-                    if na.any(g.child_mask):
-                        gid = na.where(visible_ids)[0][i]
-                        plot._axes.text(
-                                left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
-                                left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),
-                                g.id, clip_on=True)
+                id_mask = plot.data['GridID']
+                active_ids = na.where(na.histogram(id_mask, bins=(id_mask.max()+1),range=(0,id_mask.max()+1))[0])[0] 
+                for i in na.where(visible_ids)[0]:
+                    plot._axes.text(
+                        left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
+                        left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
+                        active_ids[i], clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8fbe7d0e5642/
changeset:   8fbe7d0e5642
branch:      yt
user:        ngoldbaum
date:        2012-09-03 16:32:32
summary:     Removing GridID since it is wrong and redundant (Thanks MatthewTurk!).
Also using na.unique to find the active grid ids and forcing the IDs
to be formatted as strings.
affected #:  2 files

diff -r 3abe9786ac4961a02f6bfd38630fa051c693fcf0 -r 8fbe7d0e56429dbd3bd0bd2f81f15f4188c79c6c yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -107,12 +107,6 @@
           validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
-def _GridID(field, data):
-    return na.ones(data.ActiveDimensions)*(data.id)
-add_field("GridID", function=_GridID,
-          validators=[ValidateGridType(),
-                      ValidateSpatial(0)])
-
 def _GridIndices(field, data):
     return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,


diff -r 3abe9786ac4961a02f6bfd38630fa051c693fcf0 -r 8fbe7d0e56429dbd3bd0bd2f81f15f4188c79c6c yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -352,13 +352,12 @@
             if self.draw_ids:
                 visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
                                ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
-                id_mask = plot.data['GridID']
-                active_ids = na.where(na.histogram(id_mask, bins=(id_mask.max()+1),range=(0,id_mask.max()+1))[0])[0] 
+                active_ids = na.unique(plot.data['GridIndices'])
                 for i in na.where(visible_ids)[0]:
                     plot._axes.text(
                         left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
                         left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
-                        active_ids[i], clip_on=True)
+                        "%d" % active_ids[i], clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3e35b1adb541/
changeset:   3e35b1adb541
branch:      yt
user:        MatthewTurk
date:        2012-09-03 20:06:36
summary:     Merged in ngoldbaum/yt-cleancopy (pull request #262)
affected #:  2 files



diff -r 15008795b7351ee3024db8fa4506abde8d502b89 -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -352,13 +352,12 @@
             if self.draw_ids:
                 visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
                                ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
-                for i, g in enumerate(plot.data._grids[visible_ids]):
-                    if na.any(g.child_mask):
-                        gid = na.where(visible_ids)[0][i]
-                        plot._axes.text(
-                                left_edge_x[gid] + (2 * (xx1 - xx0) / xpix),
-                                left_edge_y[gid] + (2 * (yy1 - yy0) / ypix),
-                                g.id, clip_on=True)
+                active_ids = na.unique(plot.data['GridIndices'])
+                for i in na.where(visible_ids)[0]:
+                    plot._axes.text(
+                        left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
+                        left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
+                        "%d" % active_ids[i], clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/37540bd65173/
changeset:   37540bd65173
branch:      yt
user:        samskillman
date:        2012-08-31 21:21:51
summary:     Adding preliminary Athena support.  From what I can tell all basic functionality works.  Is fragile to how you layout our data.
affected #:  11 files



diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/frontends/athena/api.py
--- /dev/null
+++ b/yt/frontends/athena/api.py
@@ -0,0 +1,42 @@
+"""
+API for yt.frontends.athena
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+from .data_structures import \
+      AthenaGrid, \
+      AthenaHierarchy, \
+      AthenaStaticOutput
+
+from .fields import \
+      AthenaFieldInfo, \
+      KnownAthenaFields, \
+      add_athena_field
+
+from .io import \
+      IOHandlerAthena


diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/frontends/athena/data_structures.py
--- /dev/null
+++ b/yt/frontends/athena/data_structures.py
@@ -0,0 +1,396 @@
+"""
+Data structures for Athena.
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import numpy as na
+import weakref
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+           AMRGridPatch
+from yt.data_objects.hierarchy import \
+           AMRHierarchy
+from yt.data_objects.static_output import \
+           StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+from .fields import AthenaFieldInfo, KnownAthenaFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+import pdb
+
+def _get_convert(fname):
+    def _conv(data):
+        return data.convert(fname)
+    return _conv
+
+class AthenaGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level, start, dimensions):
+        df = hierarchy.storage_filename
+        if id == 0:
+            gname = 'id0/' + df + '.vtk'
+        else:
+            gname = 'id%i/' % id + df[:-5] + '-id%i'%id + df[-5:] + '.vtk'
+        AMRGridPatch.__init__(self, id, filename = gname,
+                              hierarchy = hierarchy)
+        self.filename = gname
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.pf.refine_by
+        else:
+            LE, RE = self.hierarchy.grid_left_edge[id,:], \
+                     self.hierarchy.grid_right_edge[id,:]
+            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = 1.0
+        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+def parse_line(line, grid):
+    # grid is a dictionary
+    splitup = line.strip().split()
+    if "vtk" in splitup:
+        grid['vtk_version'] = splitup[-1]
+    elif "Really" in splitup:
+        grid['time'] = splitup[-1]
+    elif 'PRIMITIVE' in splitup:
+        grid['time'] = float(splitup[4].rstrip(','))
+        grid['level'] = int(splitup[6].rstrip(','))
+        grid['domain'] = int(splitup[8].rstrip(','))
+    elif "DIMENSIONS" in splitup:
+        grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+    elif "ORIGIN" in splitup:
+        grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+    elif "SPACING" in splitup:
+        grid['dds'] = na.array(splitup[-3:]).astype('float64')
+    elif "CELL_DATA" in splitup:
+        grid["ncells"] = int(splitup[-1])
+    elif "SCALARS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'scalar'
+    elif "VECTORS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'vector'
+
+
+
+class AthenaHierarchy(AMRHierarchy):
+
+    grid = AthenaGrid
+    _data_style='athena'
+    
+    def __init__(self, pf, data_style='athena'):
+        self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
+        # for now, the hierarchy file is the parameter file!
+        self.storage_filename = self.parameter_file.storage_filename
+        self.hierarchy_filename = self.parameter_file.filename
+        #self.directory = os.path.dirname(self.hierarchy_filename)
+        self._fhandle = file(self.hierarchy_filename,'rb')
+        AMRHierarchy.__init__(self,pf,data_style)
+
+        self._fhandle.close()
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        field_map = {}
+        f = open(self.hierarchy_filename,'rb')
+        line = f.readline()
+        while line != '':
+            splitup = line.strip().split()
+            if "DIMENSIONS" in splitup:
+                grid_dims = na.array(splitup[-3:]).astype('int')
+                line = f.readline()
+                continue
+            elif "CELL_DATA" in splitup:
+                grid_ncells = int(splitup[-1])
+                line = f.readline()
+                if na.prod(grid_dims) != grid_ncells:
+                    grid_dims -= 1
+                    grid_dims[grid_dims==0]=1
+                if na.prod(grid_dims) != grid_ncells:
+                    mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                          (na.prod(grid_dims), grid_ncells))
+                    raise TypeError
+                break
+            else:
+                del line
+                line = f.readline()
+        read_table = False
+        read_table_offset = f.tell()
+        while line != '':
+            if len(line) == 0: break
+            splitup = line.strip().split()
+            if 'SCALARS' in splitup:
+                field = splitup[1]
+                if not read_table:
+                    line = f.readline() # Read the lookup table line
+                    read_table = True
+                field_map[field] = 'scalar',f.tell() - read_table_offset
+                read_table=False
+
+            elif 'VECTORS' in splitup:
+                field = splitup[1]
+                vfield = field+'_x'
+                field_map[vfield] = 'vector',f.tell() - read_table_offset
+                vfield = field+'_y'
+                field_map[vfield] = 'vector',f.tell() - read_table_offset
+                vfield = field+'_z'
+                field_map[vfield] = 'vector',f.tell() - read_table_offset
+            del line
+            line = f.readline()
+
+        f.close()
+        del f
+
+        self.field_list = field_map.keys()
+        self._field_map = field_map
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = self.parameter_file.nvtk
+
+    def _parse_hierarchy(self):
+        f = open(self.hierarchy_filename,'rb')
+        grid = {}
+        grid['read_field'] = None
+        grid['read_type'] = None
+        table_read=False
+        line = f.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            del line
+            line = f.readline()
+        f.close()
+        del f
+
+        if na.prod(grid['dimensions']) != grid['ncells']:
+            grid['dimensions'] -= 1
+            grid['dimensions'][grid['dimensions']==0]=1
+        if na.prod(grid['dimensions']) != grid['ncells']:
+            mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                  (na.prod(grid['dimensions']), grid['ncells']))
+            raise TypeError
+
+        dxs=[]
+        self.grids = na.empty(self.num_grids, dtype='object')
+        levels = na.zeros(self.num_grids, dtype='int32')
+        single_grid_width = grid['dds']*grid['dimensions']
+        grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
+        glis = na.empty((self.num_grids,3), dtype='int64')
+        for i in range(self.num_grids):
+            procz = i/(grids_per_dim[0]*grids_per_dim[1])
+            procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
+            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[1]
+            glis[i, 0] = procx*grid['dimensions'][0]
+            glis[i, 1] = procy*grid['dimensions'][1]
+            glis[i, 2] = procz*grid['dimensions'][2]
+        gdims = na.ones_like(glis)
+        gdims[:] = grid['dimensions']
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+            self.grids[i]._level_id = levels[i]
+
+            dx = (self.parameter_file.domain_right_edge-
+                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+            dx = dx/self.parameter_file.refine_by**(levels[i])
+            dxs.append(grid['dds'])
+        dx = na.array(dxs)
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
+        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_particle_count = na.zeros([self.num_grids, 1], dtype='int64')
+        del levels, glis, gdims
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = na.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+class AthenaStaticOutput(StaticOutput):
+    _hierarchy_class = AthenaHierarchy
+    _fieldinfo_fallback = AthenaFieldInfo
+    _fieldinfo_known = KnownAthenaFields
+    _data_style = "athena"
+
+    def __init__(self, filename, data_style='athena',
+                 storage_filename = None, parameters = {}):
+        StaticOutput.__init__(self, filename, data_style)
+        self.filename = filename
+        self.storage_filename = filename[4:-4]
+        self.specified_parameters = parameters
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['cm'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in mpc_conversion.keys():
+            self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+
+        # Here should read through and add fields.
+
+        #default_fields=['density']
+        # for field in self.field_list:
+        #     self.units[field] = 1.0
+        #     self._fieldinfo_known.add_field(field, function=NullFunc, take_log=False,
+        #             units="", projected_units="",
+        #             convert_function=None)
+
+        # This should be improved.
+        # self._handle = h5py.File(self.parameter_filename, "r")
+        # for field_name in self._handle["/field_types"]:
+        #     current_field = self._handle["/field_types/%s" % field_name]
+        #     try:
+        #         self.units[field_name] = current_field.attrs['field_to_cgs']
+        #     except:
+        #         self.units[field_name] = 1.0
+        #     try:
+        #         current_fields_unit = current_field.attrs['field_units'][0]
+        #     except:
+        #         current_fields_unit = ""
+        #     self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
+        #            units=current_fields_unit, projected_units="", 
+        #            convert_function=_get_convert(field_name))
+
+        # self._handle.close()
+        # del self._handle
+
+    def _parse_parameter_file(self):
+        self._handle = open(self.parameter_filename, "rb")
+        # Read the start of a grid to get simulation parameters.
+        grid = {}
+        grid['read_field'] = None
+        line = self._handle.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            del line
+            line = self._handle.readline()
+
+        self.domain_left_edge = grid['left_edge']
+        self.domain_right_edge = -grid['left_edge']
+        self.domain_width = self.domain_right_edge-self.domain_left_edge
+        self.domain_dimensions = self.domain_width/grid['dds']
+        refine_by = None
+        if refine_by is None: refine_by = 2
+        self.refine_by = refine_by
+        self.dimensionality = 3
+        self.current_time = grid["time"]
+        self.unique_identifier = None
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.field_ordering = 'fortran'
+        self.boundary_conditions = [1]*6
+
+        self.nvtk = int(na.product(self.domain_dimensions/(grid['dimensions']-1)))
+
+        # if self.cosmological_simulation:
+        #     self.current_redshift = sp["current_redshift"]
+        #     self.omega_lambda = sp["omega_lambda"]
+        #     self.omega_matter = sp["omega_matter"]
+        #     self.hubble_constant = sp["hubble_constant"]
+        # else:
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        self._handle.close()
+        del self._handle
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = file(args[0],'rb')
+            if "gridded_data_format" in fileh:
+                return True
+        except:
+            pass
+        return False
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+


diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/frontends/athena/definitions.py
--- /dev/null
+++ b/yt/frontends/athena/definitions.py
@@ -0,0 +1,25 @@
+"""
+Various definitions for various other modules and routines
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/frontends/athena/fields.py
--- /dev/null
+++ b/yt/frontends/athena/fields.py
@@ -0,0 +1,91 @@
+"""
+Athena-specific fields
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
+import yt.data_objects.universal_fields
+
+log_translation_dict = {"Density": "density",
+                        "Pressure": "pressure"}
+
+translation_dict = {"x-velocity": "velocity_x",
+                    "y-velocity": "velocity_y",
+                    "z-velocity": "velocity_z"}
+                    
+# translation_dict = {"mag_field_x": "cell_centered_B_x ",
+#                     "mag_field_y": "cell_centered_B_y ",
+#                     "mag_field_z": "cell_centered_B_z "}
+
+AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = AthenaFieldInfo.add_field
+
+KnownAthenaFields = FieldInfoContainer()
+add_athena_field = KnownAthenaFields.add_field
+
+add_athena_field("density", function=NullFunc, take_log=True,
+          units=r"\rm{g}/\rm{cm}^3",
+          projected_units =r"\rm{g}/\rm{cm}^2")
+
+add_athena_field("specific_energy", function=NullFunc, take_log=True,
+          units=r"\rm{erg}/\rm{g}")
+
+add_athena_field("pressure", function=NullFunc, take_log=True,
+          units=r"\rm{erg}/\rm{g}")
+
+add_athena_field("velocity_x", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_athena_field("velocity_y", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_athena_field("velocity_z", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_athena_field("mag_field_x", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_athena_field("mag_field_y", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_athena_field("mag_field_z", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+for f,v in log_translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=True)
+
+for f,v in translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=False)
+


diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/frontends/athena/io.py
--- /dev/null
+++ b/yt/frontends/athena/io.py
@@ -0,0 +1,116 @@
+"""
+The data-file handling functions
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+from yt.utilities.io_handler import \
+           BaseIOHandler
+import numpy as na
+
+class IOHandlerAthena(BaseIOHandler):
+    _data_style = "athena"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+    _read_table_offset = None
+
+    def _field_dict(self,fhandle):
+        keys = fhandle['field_types'].keys()
+        val = fhandle['field_types'].keys()
+        return dict(zip(keys,val))
+
+    def _read_field_names(self,grid):
+        pass
+
+    def _read_data_set(self,grid,field):
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = na.prod(grid.ActiveDimensions)
+        grid_dims = grid.ActiveDimensions
+        line = f.readline()
+        while True:
+            splitup = line.strip().split()
+            if 'CELL_DATA' in splitup:
+                f.readline()
+                read_table_offset = f.tell()
+                del line
+                break
+            del line; line = f.readline()
+
+
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F').copy()
+        if dtype == 'vector':
+            data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid_dims,order='F').copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid_dims,order='F').copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid_dims,order='F').copy()
+        f.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        if grid.pf.field_ordering == 1:
+            sl.reverse()
+
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = na.prod(grid.ActiveDimensions)
+
+        line = f.readline()
+        while True:
+            splitup = line.strip().split()
+            if 'CELL_DATA' in splitup:
+                f.readline()
+                read_table_offset = f.tell()
+                del line
+                break
+            del line; line = f.readline()
+
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        if dtype == 'vector':
+            data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+
+        f.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
+




diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/frontends/athena/setup.py
--- /dev/null
+++ b/yt/frontends/athena/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('athena', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('frontends', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
+    config.add_subpackage("athena")
     config.add_subpackage("gdf")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")


diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -95,6 +95,9 @@
 from yt.frontends.gdf.api import \
     GDFStaticOutput, GDFFieldInfo, add_gdf_field
 
+from yt.frontends.athena.api import \
+    AthenaStaticOutput, AthenaFieldInfo, add_athena_field
+
 from yt.frontends.art.api import \
     ARTStaticOutput, ARTFieldInfo, add_art_field
 


diff -r 514f5a6a60c893b2a05f929150f352b153d68c65 -r 37540bd65173d499ac8d6397496e716753af2145 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -42,6 +42,7 @@
         def __init__(cls, name, b, d):
             type.__init__(cls, name, b, d)
             if hasattr(cls, "_data_style"):
+                print 'Registering Class ', cls ,' with datastyle ', cls._data_style
                 io_registry[cls._data_style] = cls
 
     def __init__(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0128c2a41436/
changeset:   0128c2a41436
branch:      yt
user:        samskillman
date:        2012-08-31 22:48:44
summary:     yt load, load() now work.  Also now works for serial runs.  Assumes all .vtk files are AthenaStaticOutput.
affected #:  2 files

diff -r 37540bd65173d499ac8d6397496e716753af2145 -r 0128c2a41436595a037b92ad572a2a539b0e77f3 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -54,10 +54,13 @@
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
         df = hierarchy.storage_filename
-        if id == 0:
-            gname = 'id0/' + df + '.vtk'
+        if 'id0' not in hierarchy.parameter_file.filename:
+            gname = hierarchy.parameter_file.filename
         else:
-            gname = 'id%i/' % id + df[:-5] + '-id%i'%id + df[-5:] + '.vtk'
+            if id == 0:
+                gname = 'id0/' + df + '.vtk'
+            else:
+                gname = 'id%i/' % id + df[:-5] + '-id%i'%id + df[-5:] + '.vtk'
         AMRGridPatch.__init__(self, id, filename = gname,
                               hierarchy = hierarchy)
         self.filename = gname
@@ -384,8 +387,7 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
-            fileh = file(args[0],'rb')
-            if "gridded_data_format" in fileh:
+            if 'vtk' in args[0]:
                 return True
         except:
             pass


diff -r 37540bd65173d499ac8d6397496e716753af2145 -r 0128c2a41436595a037b92ad572a2a539b0e77f3 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -42,7 +42,6 @@
         def __init__(cls, name, b, d):
             type.__init__(cls, name, b, d)
             if hasattr(cls, "_data_style"):
-                print 'Registering Class ', cls ,' with datastyle ', cls._data_style
                 io_registry[cls._data_style] = cls
 
     def __init__(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c80c81adee02/
changeset:   c80c81adee02
branch:      yt
user:        samskillman
date:        2012-09-03 19:59:56
summary:     Allow over-riding of domain_right edge in parameters = {} dictionary.  i.e. load('id0/athena.vtk', parameters={'domain_right_edge':[1.0,1.0,1.0]}
affected #:  1 file

diff -r 0128c2a41436595a037b92ad572a2a539b0e77f3 -r c80c81adee02a586cbda215b892e40ce65fa6bde yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -355,7 +355,11 @@
             line = self._handle.readline()
 
         self.domain_left_edge = grid['left_edge']
-        self.domain_right_edge = -grid['left_edge']
+        try:
+            self.domain_right_edge = na.array(self.specified_parameters['domain_right_edge'])
+        except:
+            mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
+                    "if it is not equal to -domain_left_edge.")
         self.domain_width = self.domain_right_edge-self.domain_left_edge
         self.domain_dimensions = self.domain_width/grid['dds']
         refine_by = None



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0470c0e4eecf/
changeset:   0470c0e4eecf
branch:      yt
user:        samskillman
date:        2012-09-04 19:00:19
summary:     na->np., remove pdb.
affected #:  2 files

diff -r c80c81adee02a586cbda215b892e40ce65fa6bde -r 0470c0e4eecff2bb942057822a6d2c1a6ee76ed0 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -28,7 +28,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -43,7 +43,6 @@
 from .fields import AthenaFieldInfo, KnownAthenaFields
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
-import pdb
 
 def _get_convert(fname):
     def _conv(data):
@@ -80,7 +79,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -97,11 +96,11 @@
         grid['level'] = int(splitup[6].rstrip(','))
         grid['domain'] = int(splitup[8].rstrip(','))
     elif "DIMENSIONS" in splitup:
-        grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+        grid['dimensions'] = np.array(splitup[-3:]).astype('int')
     elif "ORIGIN" in splitup:
-        grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+        grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
     elif "SPACING" in splitup:
-        grid['dds'] = na.array(splitup[-3:]).astype('float64')
+        grid['dds'] = np.array(splitup[-3:]).astype('float64')
     elif "CELL_DATA" in splitup:
         grid["ncells"] = int(splitup[-1])
     elif "SCALARS" in splitup:
@@ -128,7 +127,7 @@
         self.hierarchy_filename = self.parameter_file.filename
         #self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = file(self.hierarchy_filename,'rb')
-        AMRHierarchy.__init__(self,pf,data_style)
+        AMRHierarchy.__init__(self, pf, data_style)
 
         self._fhandle.close()
 
@@ -142,18 +141,18 @@
         while line != '':
             splitup = line.strip().split()
             if "DIMENSIONS" in splitup:
-                grid_dims = na.array(splitup[-3:]).astype('int')
+                grid_dims = np.array(splitup[-3:]).astype('int')
                 line = f.readline()
                 continue
             elif "CELL_DATA" in splitup:
                 grid_ncells = int(splitup[-1])
                 line = f.readline()
-                if na.prod(grid_dims) != grid_ncells:
+                if np.prod(grid_dims) != grid_ncells:
                     grid_dims -= 1
                     grid_dims[grid_dims==0]=1
-                if na.prod(grid_dims) != grid_ncells:
+                if np.prod(grid_dims) != grid_ncells:
                     mylog.error('product of dimensions %i not equal to number of cells %i' % 
-                          (na.prod(grid_dims), grid_ncells))
+                          (np.prod(grid_dims), grid_ncells))
                     raise TypeError
                 break
             else:
@@ -218,20 +217,20 @@
         f.close()
         del f
 
-        if na.prod(grid['dimensions']) != grid['ncells']:
+        if np.prod(grid['dimensions']) != grid['ncells']:
             grid['dimensions'] -= 1
             grid['dimensions'][grid['dimensions']==0]=1
-        if na.prod(grid['dimensions']) != grid['ncells']:
+        if np.prod(grid['dimensions']) != grid['ncells']:
             mylog.error('product of dimensions %i not equal to number of cells %i' % 
-                  (na.prod(grid['dimensions']), grid['ncells']))
+                  (np.prod(grid['dimensions']), grid['ncells']))
             raise TypeError
 
         dxs=[]
-        self.grids = na.empty(self.num_grids, dtype='object')
-        levels = na.zeros(self.num_grids, dtype='int32')
+        self.grids = np.empty(self.num_grids, dtype='object')
+        levels = np.zeros(self.num_grids, dtype='int32')
         single_grid_width = grid['dds']*grid['dimensions']
         grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
-        glis = na.empty((self.num_grids,3), dtype='int64')
+        glis = np.empty((self.num_grids,3), dtype='int64')
         for i in range(self.num_grids):
             procz = i/(grids_per_dim[0]*grids_per_dim[1])
             procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
@@ -239,7 +238,7 @@
             glis[i, 0] = procx*grid['dimensions'][0]
             glis[i, 1] = procy*grid['dimensions'][1]
             glis[i, 2] = procz*grid['dimensions'][2]
-        gdims = na.ones_like(glis)
+        gdims = np.ones_like(glis)
         gdims[:] = grid['dimensions']
         for i in range(levels.shape[0]):
             self.grids[i] = self.grid(i, self, levels[i],
@@ -251,11 +250,11 @@
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx = dx/self.parameter_file.refine_by**(levels[i])
             dxs.append(grid['dds'])
-        dx = na.array(dxs)
+        dx = np.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
-        self.grid_particle_count = na.zeros([self.num_grids, 1], dtype='int64')
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
         del levels, glis, gdims
 
     def _populate_grid_objects(self):
@@ -273,7 +272,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
@@ -356,7 +355,7 @@
 
         self.domain_left_edge = grid['left_edge']
         try:
-            self.domain_right_edge = na.array(self.specified_parameters['domain_right_edge'])
+            self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
         except:
             mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
                     "if it is not equal to -domain_left_edge.")
@@ -373,7 +372,7 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(na.product(self.domain_dimensions/(grid['dimensions']-1)))
+        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
 
         # if self.cosmological_simulation:
         #     self.current_redshift = sp["current_redshift"]


diff -r c80c81adee02a586cbda215b892e40ce65fa6bde -r 0470c0e4eecff2bb942057822a6d2c1a6ee76ed0 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -27,7 +27,7 @@
 """
 from yt.utilities.io_handler import \
            BaseIOHandler
-import numpy as na
+import numpy as np
 
 class IOHandlerAthena(BaseIOHandler):
     _data_style = "athena"
@@ -46,7 +46,7 @@
     def _read_data_set(self,grid,field):
         f = file(grid.filename, 'rb')
         dtype, offset = grid.hierarchy._field_map[field]
-        grid_ncells = na.prod(grid.ActiveDimensions)
+        grid_ncells = np.prod(grid.ActiveDimensions)
         grid_dims = grid.ActiveDimensions
         line = f.readline()
         while True:
@@ -61,9 +61,9 @@
 
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
-            data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F').copy()
+            data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F').copy()
         if dtype == 'vector':
-            data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
             if '_x' in field:
                 data = data[0::3].reshape(grid_dims,order='F').copy()
             elif '_y' in field:
@@ -84,7 +84,7 @@
 
         f = file(grid.filename, 'rb')
         dtype, offset = grid.hierarchy._field_map[field]
-        grid_ncells = na.prod(grid.ActiveDimensions)
+        grid_ncells = np.prod(grid.ActiveDimensions)
 
         line = f.readline()
         while True:
@@ -98,9 +98,9 @@
 
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
-            data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
         if dtype == 'vector':
-            data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
             if '_x' in field:
                 data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
             elif '_y' in field:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c883c2104519/
changeset:   c883c2104519
branch:      yt
user:        samskillman
date:        2012-09-04 19:23:36
summary:     First pass on some of Matts suggestions on teh PR.
affected #:  1 file

diff -r 0470c0e4eecff2bb942057822a6d2c1a6ee76ed0 -r c883c21045190ad9e628d28542cfeb7460d4e581 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -57,9 +57,9 @@
             gname = hierarchy.parameter_file.filename
         else:
             if id == 0:
-                gname = 'id0/' + df + '.vtk'
+                gname = 'id0/%s.vtk' % df
             else:
-                gname = 'id%i/' % id + df[:-5] + '-id%i'%id + df[-5:] + '.vtk'
+                gname = 'id%i/%s-id%i%s.vtk' % (id, df[:-5], id, df[-5:] )
         AMRGridPatch.__init__(self, id, filename = gname,
                               hierarchy = hierarchy)
         self.filename = gname
@@ -143,7 +143,6 @@
             if "DIMENSIONS" in splitup:
                 grid_dims = np.array(splitup[-3:]).astype('int')
                 line = f.readline()
-                continue
             elif "CELL_DATA" in splitup:
                 grid_ncells = int(splitup[-1])
                 line = f.readline()
@@ -151,7 +150,7 @@
                     grid_dims -= 1
                     grid_dims[grid_dims==0]=1
                 if np.prod(grid_dims) != grid_ncells:
-                    mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                    mylog.error('product of dimensions %i not equal to number of cells %i' %
                           (np.prod(grid_dims), grid_ncells))
                     raise TypeError
                 break
@@ -168,17 +167,14 @@
                 if not read_table:
                     line = f.readline() # Read the lookup table line
                     read_table = True
-                field_map[field] = 'scalar',f.tell() - read_table_offset
+                field_map[field] = ('scalar', f.tell() - read_table_offset)
                 read_table=False
 
             elif 'VECTORS' in splitup:
                 field = splitup[1]
-                vfield = field+'_x'
-                field_map[vfield] = 'vector',f.tell() - read_table_offset
-                vfield = field+'_y'
-                field_map[vfield] = 'vector',f.tell() - read_table_offset
-                vfield = field+'_z'
-                field_map[vfield] = 'vector',f.tell() - read_table_offset
+                for ax in 'xyz':
+                    field_map["%s_%s" % (field, ax)] =\
+                            ('vector', f.tell() - read_table_offset)
             del line
             line = f.readline()
 
@@ -217,6 +213,8 @@
         f.close()
         del f
 
+        # It seems some datasets have a mismatch between ncells and 
+        # the actual grid dimensions.
         if np.prod(grid['dimensions']) != grid['ncells']:
             grid['dimensions'] -= 1
             grid['dimensions'][grid['dimensions']==0]=1
@@ -244,7 +242,6 @@
             self.grids[i] = self.grid(i, self, levels[i],
                                       glis[i],
                                       gdims[i])
-            self.grids[i]._level_id = levels[i]
 
             dx = (self.parameter_file.domain_right_edge-
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
@@ -268,8 +265,8 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
+#     def _setup_derived_fields(self):
+#         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
         mask = np.zeros(self.num_grids, dtype='bool')
@@ -293,6 +290,7 @@
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
+        This is a stub for future development.  Currently sets arbitrary.
         """
         self.units = {}
         self.time_units = {}
@@ -307,34 +305,6 @@
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
 
-        # Here should read through and add fields.
-
-        #default_fields=['density']
-        # for field in self.field_list:
-        #     self.units[field] = 1.0
-        #     self._fieldinfo_known.add_field(field, function=NullFunc, take_log=False,
-        #             units="", projected_units="",
-        #             convert_function=None)
-
-        # This should be improved.
-        # self._handle = h5py.File(self.parameter_filename, "r")
-        # for field_name in self._handle["/field_types"]:
-        #     current_field = self._handle["/field_types/%s" % field_name]
-        #     try:
-        #         self.units[field_name] = current_field.attrs['field_to_cgs']
-        #     except:
-        #         self.units[field_name] = 1.0
-        #     try:
-        #         current_fields_unit = current_field.attrs['field_units'][0]
-        #     except:
-        #         current_fields_unit = ""
-        #     self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
-        #            units=current_fields_unit, projected_units="", 
-        #            convert_function=_get_convert(field_name))
-
-        # self._handle.close()
-        # del self._handle
-
     def _parse_parameter_file(self):
         self._handle = open(self.parameter_filename, "rb")
         # Read the start of a grid to get simulation parameters.
@@ -366,7 +336,7 @@
         self.refine_by = refine_by
         self.dimensionality = 3
         self.current_time = grid["time"]
-        self.unique_identifier = None
+        self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
         self.num_ghost_zones = 0
         self.field_ordering = 'fortran'
@@ -374,18 +344,11 @@
 
         self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
 
-        # if self.cosmological_simulation:
-        #     self.current_redshift = sp["current_redshift"]
-        #     self.omega_lambda = sp["omega_lambda"]
-        #     self.omega_matter = sp["omega_matter"]
-        #     self.hubble_constant = sp["hubble_constant"]
-        # else:
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
         self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
         self._handle.close()
-        del self._handle
 
     @classmethod
     def _is_valid(self, *args, **kwargs):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3abd0449f7a4/
changeset:   3abd0449f7a4
branch:      yt
user:        samskillman
date:        2012-09-04 19:25:14
summary:     Fix for domain_right_edge.
affected #:  1 file

diff -r c883c21045190ad9e628d28542cfeb7460d4e581 -r 3abd0449f7a4add4a2a379aebe0647b47101a52e yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -329,6 +329,7 @@
         except:
             mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
                     "if it is not equal to -domain_left_edge.")
+            self.domain_right_edge = -self.domain_left_edge
         self.domain_width = self.domain_right_edge-self.domain_left_edge
         self.domain_dimensions = self.domain_width/grid['dds']
         refine_by = None



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e2ba49fe07b7/
changeset:   e2ba49fe07b7
branch:      yt
user:        samskillman
date:        2012-09-04 20:03:17
summary:     More cleanup on aisle Athena.
affected #:  3 files

diff -r 3abd0449f7a4add4a2a379aebe0647b47101a52e -r e2ba49fe07b7680e7d536207a4a82853c8146eda yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -155,12 +155,10 @@
                     raise TypeError
                 break
             else:
-                del line
                 line = f.readline()
         read_table = False
         read_table_offset = f.tell()
         while line != '':
-            if len(line) == 0: break
             splitup = line.strip().split()
             if 'SCALARS' in splitup:
                 field = splitup[1]
@@ -175,11 +173,9 @@
                 for ax in 'xyz':
                     field_map["%s_%s" % (field, ax)] =\
                             ('vector', f.tell() - read_table_offset)
-            del line
             line = f.readline()
 
         f.close()
-        del f
 
         self.field_list = field_map.keys()
         self._field_map = field_map
@@ -208,10 +204,8 @@
             if 'TABLE' in line.strip().split():
                 break
             if len(line) == 0: break
-            del line
             line = f.readline()
         f.close()
-        del f
 
         # It seems some datasets have a mismatch between ncells and 
         # the actual grid dimensions.
@@ -252,7 +246,6 @@
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
         self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
-        del levels, glis, gdims
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -282,10 +275,10 @@
 
     def __init__(self, filename, data_style='athena',
                  storage_filename = None, parameters = {}):
+        self.specified_parameters = parameters
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
         self.storage_filename = filename[4:-4]
-        self.specified_parameters = parameters
 
     def _set_units(self):
         """
@@ -298,7 +291,6 @@
             self._parse_parameter_file()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['cm'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
         for unit in mpc_conversion.keys():
             self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
@@ -320,13 +312,12 @@
             if 'TABLE' in line.strip().split():
                 break
             if len(line) == 0: break
-            del line
             line = self._handle.readline()
 
         self.domain_left_edge = grid['left_edge']
-        try:
+        if 'domain_right_edge' in self.specified_parameters:
             self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
-        except:
+        else:
             mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
                     "if it is not equal to -domain_left_edge.")
             self.domain_right_edge = -self.domain_left_edge


diff -r 3abd0449f7a4add4a2a379aebe0647b47101a52e -r e2ba49fe07b7680e7d536207a4a82853c8146eda yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -44,10 +44,10 @@
 translation_dict = {"x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
                     "z-velocity": "velocity_z"}
-                    
-# translation_dict = {"mag_field_x": "cell_centered_B_x ",
-#                     "mag_field_y": "cell_centered_B_y ",
-#                     "mag_field_z": "cell_centered_B_z "}
+
+translation_dict = {"mag_field_x": "cell_centered_B_x ",
+                    "mag_field_y": "cell_centered_B_y ",
+                    "mag_field_z": "cell_centered_B_z "}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -55,36 +55,33 @@
 KnownAthenaFields = FieldInfoContainer()
 add_athena_field = KnownAthenaFields.add_field
 
-add_athena_field("density", function=NullFunc, take_log=True,
-          units=r"\rm{g}/\rm{cm}^3",
-          projected_units =r"\rm{g}/\rm{cm}^2")
+add_athena_field("density", function=NullFunc, take_log=False,
+          units=r"",
+          projected_units =r"")
 
-add_athena_field("specific_energy", function=NullFunc, take_log=True,
-          units=r"\rm{erg}/\rm{g}")
-
-add_athena_field("pressure", function=NullFunc, take_log=True,
-          units=r"\rm{erg}/\rm{g}")
+add_athena_field("pressure", function=NullFunc, take_log=False,
+          units=r"")
 
 add_athena_field("velocity_x", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+          units=r"")
 
 add_athena_field("velocity_y", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+          units=r"")
 
 add_athena_field("velocity_z", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+          units=r"")
 
-add_athena_field("mag_field_x", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
+          units=r"")
 
-add_athena_field("mag_field_y", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
+          units=r"")
 
-add_athena_field("mag_field_z", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
+          units=r"")
 
 for f,v in log_translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=True)
+    add_field(f, TranslationFunc(v), take_log=False)
 
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)


diff -r 3abd0449f7a4add4a2a379aebe0647b47101a52e -r e2ba49fe07b7680e7d536207a4a82853c8146eda yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -48,20 +48,11 @@
         dtype, offset = grid.hierarchy._field_map[field]
         grid_ncells = np.prod(grid.ActiveDimensions)
         grid_dims = grid.ActiveDimensions
-        line = f.readline()
-        while True:
-            splitup = line.strip().split()
-            if 'CELL_DATA' in splitup:
-                f.readline()
-                read_table_offset = f.tell()
-                del line
-                break
-            del line; line = f.readline()
-
-
+        read_table_offset = get_read_table_offset(f)
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
-            data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F').copy()
+            data = np.fromfile(f, dtype='>f4',
+                    count=grid_ncells).reshape(grid_dims,order='F').copy()
         if dtype == 'vector':
             data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
             if '_x' in field:
@@ -86,19 +77,11 @@
         dtype, offset = grid.hierarchy._field_map[field]
         grid_ncells = np.prod(grid.ActiveDimensions)
 
-        line = f.readline()
-        while True:
-            splitup = line.strip().split()
-            if 'CELL_DATA' in splitup:
-                f.readline()
-                read_table_offset = f.tell()
-                del line
-                break
-            del line; line = f.readline()
-
+        read_table_offset = get_read_table_offset(f)
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
-            data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            data = np.fromfile(f, dtype='>f4', 
+                    count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
         if dtype == 'vector':
             data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
             if '_x' in field:
@@ -107,10 +90,18 @@
                 data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
             elif '_z' in field:
                 data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        f.close()
+        return data
 
-        f.close()
-        if grid.pf.field_ordering == 1:
-            return data.T
-        else:
-            return data
+def get_read_table_offset(f):
+    line = f.readline()
+    while True:
+        splitup = line.strip().split()
+        if 'CELL_DATA' in splitup:
+            f.readline()
+            read_table_offset = f.tell()
+            break
+        line = f.readline()
+    return read_table_offset
 
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/87f9eddc4718/
changeset:   87f9eddc4718
branch:      yt
user:        samskillman
date:        2012-09-04 20:25:47
summary:     Some units modifications for Athena.
affected #:  1 file

diff -r e2ba49fe07b7680e7d536207a4a82853c8146eda -r 87f9eddc471822783826a87abd3714d0209c8ec9 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -283,19 +283,21 @@
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
-        This is a stub for future development.  Currently sets arbitrary.
         """
         self.units = {}
         self.time_units = {}
         if len(self.parameters) == 0:
             self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+
+    def _setup_nounits_units(self):
+        self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
-            self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
 
     def _parse_parameter_file(self):
         self._handle = open(self.parameter_filename, "rb")



https://bitbucket.org/yt_analysis/yt-3.0/changeset/28de3d40037a/
changeset:   28de3d40037a
branch:      yt
user:        samskillman
date:        2012-09-04 21:08:42
summary:     Fixing some athena fields.  Set everything to linear space, not log, by default.
affected #:  1 file

diff -r 87f9eddc471822783826a87abd3714d0209c8ec9 -r 28de3d40037a7a938b026e828fa1999b3cd4605d yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -38,14 +38,14 @@
     TranslationFunc
 import yt.data_objects.universal_fields
 
-log_translation_dict = {"Density": "density",
-                        "Pressure": "pressure"}
+log_translation_dict = {}
 
-translation_dict = {"x-velocity": "velocity_x",
+translation_dict = {"Density": "density",
+                    "Pressure": "pressure",
+                    "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z"}
-
-translation_dict = {"mag_field_x": "cell_centered_B_x ",
+                    "z-velocity": "velocity_z",
+                    "mag_field_x": "cell_centered_B_x ",
                     "mag_field_y": "cell_centered_B_y ",
                     "mag_field_z": "cell_centered_B_z "}
 
@@ -81,7 +81,7 @@
           units=r"")
 
 for f,v in log_translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=False)
+    add_field(f, TranslationFunc(v), take_log=True)
 
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/74fd72ecdba4/
changeset:   74fd72ecdba4
branch:      yt
user:        samskillman
date:        2012-09-04 22:28:12
summary:     Adding johns fix for the conserved flag.
affected #:  1 file

diff -r 28de3d40037a7a938b026e828fa1999b3cd4605d -r 74fd72ecdba41721ab8dce549dee968772db47b9 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -91,7 +91,7 @@
         grid['vtk_version'] = splitup[-1]
     elif "Really" in splitup:
         grid['time'] = splitup[-1]
-    elif 'PRIMITIVE' in splitup:
+    elif any(x in ['PRIMITIVE','CONSERVED'] for x in splitup):
         grid['time'] = float(splitup[4].rstrip(','))
         grid['level'] = int(splitup[6].rstrip(','))
         grid['domain'] = int(splitup[8].rstrip(','))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/aaae6f8d3286/
changeset:   aaae6f8d3286
branch:      yt
user:        samskillman
date:        2012-09-05 04:01:35
summary:     Wrong dimension in index calculation, now parallel with non-trivial processor counts is working.  Fixes issue pointed out by John Zuhone.
affected #:  1 file

diff -r 74fd72ecdba41721ab8dce549dee968772db47b9 -r aaae6f8d3286bfa838c1c5b7c1d385c2eb777d3e yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -112,8 +112,6 @@
         grid['read_field'] = field
         grid['read_type'] = 'vector'
 
-
-
 class AthenaHierarchy(AMRHierarchy):
 
     grid = AthenaGrid
@@ -226,7 +224,7 @@
         for i in range(self.num_grids):
             procz = i/(grids_per_dim[0]*grids_per_dim[1])
             procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
-            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[1]
+            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[0]
             glis[i, 0] = procx*grid['dimensions'][0]
             glis[i, 1] = procy*grid['dimensions'][1]
             glis[i, 2] = procz*grid['dimensions'][2]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0c844cb7df71/
changeset:   0c844cb7df71
branch:      yt
user:        jzuhone
date:        2012-09-05 04:06:59
summary:     Merged in samskillman/yt (pull request #259)
affected #:  11 files



diff -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 -r 0c844cb7df71949206ff497c57ef437d6429d58e yt/frontends/athena/api.py
--- /dev/null
+++ b/yt/frontends/athena/api.py
@@ -0,0 +1,42 @@
+"""
+API for yt.frontends.athena
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+from .data_structures import \
+      AthenaGrid, \
+      AthenaHierarchy, \
+      AthenaStaticOutput
+
+from .fields import \
+      AthenaFieldInfo, \
+      KnownAthenaFields, \
+      add_athena_field
+
+from .io import \
+      IOHandlerAthena


diff -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 -r 0c844cb7df71949206ff497c57ef437d6429d58e yt/frontends/athena/data_structures.py
--- /dev/null
+++ b/yt/frontends/athena/data_structures.py
@@ -0,0 +1,356 @@
+"""
+Data structures for Athena.
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import numpy as np
+import weakref
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+           AMRGridPatch
+from yt.data_objects.hierarchy import \
+           AMRHierarchy
+from yt.data_objects.static_output import \
+           StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+from .fields import AthenaFieldInfo, KnownAthenaFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+
+def _get_convert(fname):
+    def _conv(data):
+        return data.convert(fname)
+    return _conv
+
+class AthenaGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level, start, dimensions):
+        df = hierarchy.storage_filename
+        if 'id0' not in hierarchy.parameter_file.filename:
+            gname = hierarchy.parameter_file.filename
+        else:
+            if id == 0:
+                gname = 'id0/%s.vtk' % df
+            else:
+                gname = 'id%i/%s-id%i%s.vtk' % (id, df[:-5], id, df[-5:] )
+        AMRGridPatch.__init__(self, id, filename = gname,
+                              hierarchy = hierarchy)
+        self.filename = gname
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.pf.refine_by
+        else:
+            LE, RE = self.hierarchy.grid_left_edge[id,:], \
+                     self.hierarchy.grid_right_edge[id,:]
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = 1.0
+        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+def parse_line(line, grid):
+    # grid is a dictionary
+    splitup = line.strip().split()
+    if "vtk" in splitup:
+        grid['vtk_version'] = splitup[-1]
+    elif "Really" in splitup:
+        grid['time'] = splitup[-1]
+    elif any(x in ['PRIMITIVE','CONSERVED'] for x in splitup):
+        grid['time'] = float(splitup[4].rstrip(','))
+        grid['level'] = int(splitup[6].rstrip(','))
+        grid['domain'] = int(splitup[8].rstrip(','))
+    elif "DIMENSIONS" in splitup:
+        grid['dimensions'] = np.array(splitup[-3:]).astype('int')
+    elif "ORIGIN" in splitup:
+        grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
+    elif "SPACING" in splitup:
+        grid['dds'] = np.array(splitup[-3:]).astype('float64')
+    elif "CELL_DATA" in splitup:
+        grid["ncells"] = int(splitup[-1])
+    elif "SCALARS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'scalar'
+    elif "VECTORS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'vector'
+
+class AthenaHierarchy(AMRHierarchy):
+
+    grid = AthenaGrid
+    _data_style='athena'
+    
+    def __init__(self, pf, data_style='athena'):
+        self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
+        # for now, the hierarchy file is the parameter file!
+        self.storage_filename = self.parameter_file.storage_filename
+        self.hierarchy_filename = self.parameter_file.filename
+        #self.directory = os.path.dirname(self.hierarchy_filename)
+        self._fhandle = file(self.hierarchy_filename,'rb')
+        AMRHierarchy.__init__(self, pf, data_style)
+
+        self._fhandle.close()
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        field_map = {}
+        f = open(self.hierarchy_filename,'rb')
+        line = f.readline()
+        while line != '':
+            splitup = line.strip().split()
+            if "DIMENSIONS" in splitup:
+                grid_dims = np.array(splitup[-3:]).astype('int')
+                line = f.readline()
+            elif "CELL_DATA" in splitup:
+                grid_ncells = int(splitup[-1])
+                line = f.readline()
+                if np.prod(grid_dims) != grid_ncells:
+                    grid_dims -= 1
+                    grid_dims[grid_dims==0]=1
+                if np.prod(grid_dims) != grid_ncells:
+                    mylog.error('product of dimensions %i not equal to number of cells %i' %
+                          (np.prod(grid_dims), grid_ncells))
+                    raise TypeError
+                break
+            else:
+                line = f.readline()
+        read_table = False
+        read_table_offset = f.tell()
+        while line != '':
+            splitup = line.strip().split()
+            if 'SCALARS' in splitup:
+                field = splitup[1]
+                if not read_table:
+                    line = f.readline() # Read the lookup table line
+                    read_table = True
+                field_map[field] = ('scalar', f.tell() - read_table_offset)
+                read_table=False
+
+            elif 'VECTORS' in splitup:
+                field = splitup[1]
+                for ax in 'xyz':
+                    field_map["%s_%s" % (field, ax)] =\
+                            ('vector', f.tell() - read_table_offset)
+            line = f.readline()
+
+        f.close()
+
+        self.field_list = field_map.keys()
+        self._field_map = field_map
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = self.parameter_file.nvtk
+
+    def _parse_hierarchy(self):
+        f = open(self.hierarchy_filename,'rb')
+        grid = {}
+        grid['read_field'] = None
+        grid['read_type'] = None
+        table_read=False
+        line = f.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            line = f.readline()
+        f.close()
+
+        # It seems some datasets have a mismatch between ncells and 
+        # the actual grid dimensions.
+        if np.prod(grid['dimensions']) != grid['ncells']:
+            grid['dimensions'] -= 1
+            grid['dimensions'][grid['dimensions']==0]=1
+        if np.prod(grid['dimensions']) != grid['ncells']:
+            mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                  (np.prod(grid['dimensions']), grid['ncells']))
+            raise TypeError
+
+        dxs=[]
+        self.grids = np.empty(self.num_grids, dtype='object')
+        levels = np.zeros(self.num_grids, dtype='int32')
+        single_grid_width = grid['dds']*grid['dimensions']
+        grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
+        glis = np.empty((self.num_grids,3), dtype='int64')
+        for i in range(self.num_grids):
+            procz = i/(grids_per_dim[0]*grids_per_dim[1])
+            procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
+            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[0]
+            glis[i, 0] = procx*grid['dimensions'][0]
+            glis[i, 1] = procy*grid['dimensions'][1]
+            glis[i, 2] = procz*grid['dimensions'][2]
+        gdims = np.ones_like(glis)
+        gdims[:] = grid['dimensions']
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+
+            dx = (self.parameter_file.domain_right_edge-
+                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+            dx = dx/self.parameter_file.refine_by**(levels[i])
+            dxs.append(grid['dds'])
+        dx = np.array(dxs)
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
+        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+#     def _setup_derived_fields(self):
+#         self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+class AthenaStaticOutput(StaticOutput):
+    _hierarchy_class = AthenaHierarchy
+    _fieldinfo_fallback = AthenaFieldInfo
+    _fieldinfo_known = KnownAthenaFields
+    _data_style = "athena"
+
+    def __init__(self, filename, data_style='athena',
+                 storage_filename = None, parameters = {}):
+        self.specified_parameters = parameters
+        StaticOutput.__init__(self, filename, data_style)
+        self.filename = filename
+        self.storage_filename = filename[4:-4]
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+
+    def _setup_nounits_units(self):
+        self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+    def _parse_parameter_file(self):
+        self._handle = open(self.parameter_filename, "rb")
+        # Read the start of a grid to get simulation parameters.
+        grid = {}
+        grid['read_field'] = None
+        line = self._handle.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            line = self._handle.readline()
+
+        self.domain_left_edge = grid['left_edge']
+        if 'domain_right_edge' in self.specified_parameters:
+            self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
+        else:
+            mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
+                    "if it is not equal to -domain_left_edge.")
+            self.domain_right_edge = -self.domain_left_edge
+        self.domain_width = self.domain_right_edge-self.domain_left_edge
+        self.domain_dimensions = self.domain_width/grid['dds']
+        refine_by = None
+        if refine_by is None: refine_by = 2
+        self.refine_by = refine_by
+        self.dimensionality = 3
+        self.current_time = grid["time"]
+        self.unique_identifier = self._handle.__hash__()
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.field_ordering = 'fortran'
+        self.boundary_conditions = [1]*6
+
+        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        self._handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if 'vtk' in args[0]:
+                return True
+        except:
+            pass
+        return False
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+


diff -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 -r 0c844cb7df71949206ff497c57ef437d6429d58e yt/frontends/athena/definitions.py
--- /dev/null
+++ b/yt/frontends/athena/definitions.py
@@ -0,0 +1,25 @@
+"""
+Various definitions for various other modules and routines
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 -r 0c844cb7df71949206ff497c57ef437d6429d58e yt/frontends/athena/fields.py
--- /dev/null
+++ b/yt/frontends/athena/fields.py
@@ -0,0 +1,88 @@
+"""
+Athena-specific fields
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
+import yt.data_objects.universal_fields
+
+log_translation_dict = {}
+
+translation_dict = {"Density": "density",
+                    "Pressure": "pressure",
+                    "x-velocity": "velocity_x",
+                    "y-velocity": "velocity_y",
+                    "z-velocity": "velocity_z",
+                    "mag_field_x": "cell_centered_B_x ",
+                    "mag_field_y": "cell_centered_B_y ",
+                    "mag_field_z": "cell_centered_B_z "}
+
+AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = AthenaFieldInfo.add_field
+
+KnownAthenaFields = FieldInfoContainer()
+add_athena_field = KnownAthenaFields.add_field
+
+add_athena_field("density", function=NullFunc, take_log=False,
+          units=r"",
+          projected_units =r"")
+
+add_athena_field("pressure", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_x", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_y", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_z", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
+          units=r"")
+
+for f,v in log_translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=True)
+
+for f,v in translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=False)
+


diff -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 -r 0c844cb7df71949206ff497c57ef437d6429d58e yt/frontends/athena/io.py
--- /dev/null
+++ b/yt/frontends/athena/io.py
@@ -0,0 +1,107 @@
+"""
+The data-file handling functions
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+from yt.utilities.io_handler import \
+           BaseIOHandler
+import numpy as np
+
+class IOHandlerAthena(BaseIOHandler):
+    _data_style = "athena"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+    _read_table_offset = None
+
+    def _field_dict(self,fhandle):
+        keys = fhandle['field_types'].keys()
+        val = fhandle['field_types'].keys()
+        return dict(zip(keys,val))
+
+    def _read_field_names(self,grid):
+        pass
+
+    def _read_data_set(self,grid,field):
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = np.prod(grid.ActiveDimensions)
+        grid_dims = grid.ActiveDimensions
+        read_table_offset = get_read_table_offset(f)
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = np.fromfile(f, dtype='>f4',
+                    count=grid_ncells).reshape(grid_dims,order='F').copy()
+        if dtype == 'vector':
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid_dims,order='F').copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid_dims,order='F').copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid_dims,order='F').copy()
+        f.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        if grid.pf.field_ordering == 1:
+            sl.reverse()
+
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = np.prod(grid.ActiveDimensions)
+
+        read_table_offset = get_read_table_offset(f)
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = np.fromfile(f, dtype='>f4', 
+                    count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        if dtype == 'vector':
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        f.close()
+        return data
+
+def get_read_table_offset(f):
+    line = f.readline()
+    while True:
+        splitup = line.strip().split()
+        if 'CELL_DATA' in splitup:
+            f.readline()
+            read_table_offset = f.tell()
+            break
+        line = f.readline()
+    return read_table_offset
+
+




diff -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 -r 0c844cb7df71949206ff497c57ef437d6429d58e yt/frontends/athena/setup.py
--- /dev/null
+++ b/yt/frontends/athena/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('athena', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 -r 0c844cb7df71949206ff497c57ef437d6429d58e yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('frontends', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
+    config.add_subpackage("athena")
     config.add_subpackage("gdf")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")


diff -r 3e35b1adb541c3f1412d3381d57bb5c0f83e3bf2 -r 0c844cb7df71949206ff497c57ef437d6429d58e yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -95,6 +95,9 @@
 from yt.frontends.gdf.api import \
     GDFStaticOutput, GDFFieldInfo, add_gdf_field
 
+from yt.frontends.athena.api import \
+    AthenaStaticOutput, AthenaFieldInfo, add_athena_field
+
 from yt.frontends.art.api import \
     ARTStaticOutput, ARTFieldInfo, add_art_field
 





https://bitbucket.org/yt_analysis/yt-3.0/changeset/6e2b0aaf5fce/
changeset:   6e2b0aaf5fce
branch:      yt
user:        xarthisius
date:        2012-09-05 17:37:15
summary:     Remove unused bits from PlotCollection:save_book and write metadata to pdf file. Partialy fixes #228
affected #:  1 file

diff -r 0c844cb7df71949206ff497c57ef437d6429d58e -r 6e2b0aaf5fce04d5acb6b1fd0eac4a503fbed78f yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -1533,7 +1533,7 @@
     @rootonly
     def save_book(self, filename, author = None, title = None, keywords = None,
                   subject = None, creator = None, producer = None,
-                  creation_data = None):
+                  creation_date = None):
         r"""Save a multipage PDF of all the current plots, rather than
         individual image files.
 
@@ -1580,15 +1580,21 @@
         >>> dd = pf.h.all_data()
         >>> pc.add_phase_object(dd, ["Density", "Temperature", "CellMassMsun"],
         ...                     weight = None)
-        >>> pc.save_book("my_plots.pdf", author="Matthew Turk", 
+        >>> pc.save_book("my_plots.pdf", author="Matthew Turk",
         ...              title="Fun plots")
         """
         from matplotlib.backends.backend_pdf import PdfPages
         outfile = PdfPages(filename)
         for plot in self.plots:
             plot.save_to_pdf(outfile)
-        if info is not None:
-            outfile._file.writeObject(outfile._file.infoObject, info)
+        pdf_keys = ['Title', 'Author', 'Subject', 'Keywords', 'Creator',
+            'Producer', 'CreationDate']
+        pdf_values = [title, author, subject, keywords, creator, producer,
+            creation_date]
+        metadata = outfile.infodict()
+        for key, val in zip(pdf_keys, pdf_values):
+            if isinstance(val, str):
+                metadata[key] = val
         outfile.close()
 
 def wrap_pylab_newplot(func):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1a3c927ef00c/
changeset:   1a3c927ef00c
branch:      yt
user:        xarthisius
date:        2012-09-05 20:00:50
summary:     Add 'proper' author to docstring
affected #:  1 file

diff -r 6e2b0aaf5fce04d5acb6b1fd0eac4a503fbed78f -r 1a3c927ef00cd6469864c6b8759c3574805ad092 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -1580,7 +1580,7 @@
         >>> dd = pf.h.all_data()
         >>> pc.add_phase_object(dd, ["Density", "Temperature", "CellMassMsun"],
         ...                     weight = None)
-        >>> pc.save_book("my_plots.pdf", author="Matthew Turk",
+        >>> pc.save_book("my_plots.pdf", author="Yours Truly",
         ...              title="Fun plots")
         """
         from matplotlib.backends.backend_pdf import PdfPages



https://bitbucket.org/yt_analysis/yt-3.0/changeset/77b5e3d096f5/
changeset:   77b5e3d096f5
branch:      yt
user:        samskillman
date:        2012-09-04 20:52:47
summary:     Remove axes unit label if units are '1', 'u', or 'unitary'. x(1) and y(1) look strange.
affected #:  1 file

diff -r 37540bd65173d499ac8d6397496e716753af2145 -r 77b5e3d096f57a4b11738e5fc268643ba70d4c84 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -657,13 +657,18 @@
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
+            if not md['unit'] in ['1', 'u', 'unitary']:
+                axes_unit_label = '\/\/('+md['unit'].encode('string-escape')+')'
+            else:
+                axes_unit_label = ''
+
             if self.oblique == False:
                 labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
-                          r'\/\/('+md['unit'].encode('string-escape')+r')}$' for i in (0,1)]
+                        axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$',
-                          r'$\rm{Image\/y}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$']
-                
+                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
+                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/dd83c8b4e977/
changeset:   dd83c8b4e977
branch:      yt
user:        samskillman
date:        2012-09-05 18:16:56
summary:     Merging.
affected #:  4 files

diff -r 77b5e3d096f57a4b11738e5fc268643ba70d4c84 -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -28,7 +28,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -43,7 +43,6 @@
 from .fields import AthenaFieldInfo, KnownAthenaFields
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
-import pdb
 
 def _get_convert(fname):
     def _conv(data):
@@ -54,10 +53,13 @@
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
         df = hierarchy.storage_filename
-        if id == 0:
-            gname = 'id0/' + df + '.vtk'
+        if 'id0' not in hierarchy.parameter_file.filename:
+            gname = hierarchy.parameter_file.filename
         else:
-            gname = 'id%i/' % id + df[:-5] + '-id%i'%id + df[-5:] + '.vtk'
+            if id == 0:
+                gname = 'id0/%s.vtk' % df
+            else:
+                gname = 'id%i/%s-id%i%s.vtk' % (id, df[:-5], id, df[-5:] )
         AMRGridPatch.__init__(self, id, filename = gname,
                               hierarchy = hierarchy)
         self.filename = gname
@@ -77,7 +79,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -89,16 +91,16 @@
         grid['vtk_version'] = splitup[-1]
     elif "Really" in splitup:
         grid['time'] = splitup[-1]
-    elif 'PRIMITIVE' in splitup:
+    elif any(x in ['PRIMITIVE','CONSERVED'] for x in splitup):
         grid['time'] = float(splitup[4].rstrip(','))
         grid['level'] = int(splitup[6].rstrip(','))
         grid['domain'] = int(splitup[8].rstrip(','))
     elif "DIMENSIONS" in splitup:
-        grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+        grid['dimensions'] = np.array(splitup[-3:]).astype('int')
     elif "ORIGIN" in splitup:
-        grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+        grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
     elif "SPACING" in splitup:
-        grid['dds'] = na.array(splitup[-3:]).astype('float64')
+        grid['dds'] = np.array(splitup[-3:]).astype('float64')
     elif "CELL_DATA" in splitup:
         grid["ncells"] = int(splitup[-1])
     elif "SCALARS" in splitup:
@@ -110,8 +112,6 @@
         grid['read_field'] = field
         grid['read_type'] = 'vector'
 
-
-
 class AthenaHierarchy(AMRHierarchy):
 
     grid = AthenaGrid
@@ -125,7 +125,7 @@
         self.hierarchy_filename = self.parameter_file.filename
         #self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = file(self.hierarchy_filename,'rb')
-        AMRHierarchy.__init__(self,pf,data_style)
+        AMRHierarchy.__init__(self, pf, data_style)
 
         self._fhandle.close()
 
@@ -139,49 +139,41 @@
         while line != '':
             splitup = line.strip().split()
             if "DIMENSIONS" in splitup:
-                grid_dims = na.array(splitup[-3:]).astype('int')
+                grid_dims = np.array(splitup[-3:]).astype('int')
                 line = f.readline()
-                continue
             elif "CELL_DATA" in splitup:
                 grid_ncells = int(splitup[-1])
                 line = f.readline()
-                if na.prod(grid_dims) != grid_ncells:
+                if np.prod(grid_dims) != grid_ncells:
                     grid_dims -= 1
                     grid_dims[grid_dims==0]=1
-                if na.prod(grid_dims) != grid_ncells:
-                    mylog.error('product of dimensions %i not equal to number of cells %i' % 
-                          (na.prod(grid_dims), grid_ncells))
+                if np.prod(grid_dims) != grid_ncells:
+                    mylog.error('product of dimensions %i not equal to number of cells %i' %
+                          (np.prod(grid_dims), grid_ncells))
                     raise TypeError
                 break
             else:
-                del line
                 line = f.readline()
         read_table = False
         read_table_offset = f.tell()
         while line != '':
-            if len(line) == 0: break
             splitup = line.strip().split()
             if 'SCALARS' in splitup:
                 field = splitup[1]
                 if not read_table:
                     line = f.readline() # Read the lookup table line
                     read_table = True
-                field_map[field] = 'scalar',f.tell() - read_table_offset
+                field_map[field] = ('scalar', f.tell() - read_table_offset)
                 read_table=False
 
             elif 'VECTORS' in splitup:
                 field = splitup[1]
-                vfield = field+'_x'
-                field_map[vfield] = 'vector',f.tell() - read_table_offset
-                vfield = field+'_y'
-                field_map[vfield] = 'vector',f.tell() - read_table_offset
-                vfield = field+'_z'
-                field_map[vfield] = 'vector',f.tell() - read_table_offset
-            del line
+                for ax in 'xyz':
+                    field_map["%s_%s" % (field, ax)] =\
+                            ('vector', f.tell() - read_table_offset)
             line = f.readline()
 
         f.close()
-        del f
 
         self.field_list = field_map.keys()
         self._field_map = field_map
@@ -210,50 +202,48 @@
             if 'TABLE' in line.strip().split():
                 break
             if len(line) == 0: break
-            del line
             line = f.readline()
         f.close()
-        del f
 
-        if na.prod(grid['dimensions']) != grid['ncells']:
+        # It seems some datasets have a mismatch between ncells and 
+        # the actual grid dimensions.
+        if np.prod(grid['dimensions']) != grid['ncells']:
             grid['dimensions'] -= 1
             grid['dimensions'][grid['dimensions']==0]=1
-        if na.prod(grid['dimensions']) != grid['ncells']:
+        if np.prod(grid['dimensions']) != grid['ncells']:
             mylog.error('product of dimensions %i not equal to number of cells %i' % 
-                  (na.prod(grid['dimensions']), grid['ncells']))
+                  (np.prod(grid['dimensions']), grid['ncells']))
             raise TypeError
 
         dxs=[]
-        self.grids = na.empty(self.num_grids, dtype='object')
-        levels = na.zeros(self.num_grids, dtype='int32')
+        self.grids = np.empty(self.num_grids, dtype='object')
+        levels = np.zeros(self.num_grids, dtype='int32')
         single_grid_width = grid['dds']*grid['dimensions']
         grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
-        glis = na.empty((self.num_grids,3), dtype='int64')
+        glis = np.empty((self.num_grids,3), dtype='int64')
         for i in range(self.num_grids):
             procz = i/(grids_per_dim[0]*grids_per_dim[1])
             procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
-            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[1]
+            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[0]
             glis[i, 0] = procx*grid['dimensions'][0]
             glis[i, 1] = procy*grid['dimensions'][1]
             glis[i, 2] = procz*grid['dimensions'][2]
-        gdims = na.ones_like(glis)
+        gdims = np.ones_like(glis)
         gdims[:] = grid['dimensions']
         for i in range(levels.shape[0]):
             self.grids[i] = self.grid(i, self, levels[i],
                                       glis[i],
                                       gdims[i])
-            self.grids[i]._level_id = levels[i]
 
             dx = (self.parameter_file.domain_right_edge-
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx = dx/self.parameter_file.refine_by**(levels[i])
             dxs.append(grid['dds'])
-        dx = na.array(dxs)
+        dx = np.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
-        self.grid_particle_count = na.zeros([self.num_grids, 1], dtype='int64')
-        del levels, glis, gdims
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -266,11 +256,11 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
+#     def _setup_derived_fields(self):
+#         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
@@ -283,10 +273,10 @@
 
     def __init__(self, filename, data_style='athena',
                  storage_filename = None, parameters = {}):
+        self.specified_parameters = parameters
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
         self.storage_filename = filename[4:-4]
-        self.specified_parameters = parameters
 
     def _set_units(self):
         """
@@ -296,42 +286,16 @@
         self.time_units = {}
         if len(self.parameters) == 0:
             self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
         self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['cm'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+
+    def _setup_nounits_units(self):
+        self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
-            self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
-
-        # Here should read through and add fields.
-
-        #default_fields=['density']
-        # for field in self.field_list:
-        #     self.units[field] = 1.0
-        #     self._fieldinfo_known.add_field(field, function=NullFunc, take_log=False,
-        #             units="", projected_units="",
-        #             convert_function=None)
-
-        # This should be improved.
-        # self._handle = h5py.File(self.parameter_filename, "r")
-        # for field_name in self._handle["/field_types"]:
-        #     current_field = self._handle["/field_types/%s" % field_name]
-        #     try:
-        #         self.units[field_name] = current_field.attrs['field_to_cgs']
-        #     except:
-        #         self.units[field_name] = 1.0
-        #     try:
-        #         current_fields_unit = current_field.attrs['field_units'][0]
-        #     except:
-        #         current_fields_unit = ""
-        #     self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
-        #            units=current_fields_unit, projected_units="", 
-        #            convert_function=_get_convert(field_name))
-
-        # self._handle.close()
-        # del self._handle
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
 
     def _parse_parameter_file(self):
         self._handle = open(self.parameter_filename, "rb")
@@ -348,11 +312,15 @@
             if 'TABLE' in line.strip().split():
                 break
             if len(line) == 0: break
-            del line
             line = self._handle.readline()
 
         self.domain_left_edge = grid['left_edge']
-        self.domain_right_edge = -grid['left_edge']
+        if 'domain_right_edge' in self.specified_parameters:
+            self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
+        else:
+            mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
+                    "if it is not equal to -domain_left_edge.")
+            self.domain_right_edge = -self.domain_left_edge
         self.domain_width = self.domain_right_edge-self.domain_left_edge
         self.domain_dimensions = self.domain_width/grid['dds']
         refine_by = None
@@ -360,32 +328,24 @@
         self.refine_by = refine_by
         self.dimensionality = 3
         self.current_time = grid["time"]
-        self.unique_identifier = None
+        self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
         self.num_ghost_zones = 0
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(na.product(self.domain_dimensions/(grid['dimensions']-1)))
+        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
 
-        # if self.cosmological_simulation:
-        #     self.current_redshift = sp["current_redshift"]
-        #     self.omega_lambda = sp["omega_lambda"]
-        #     self.omega_matter = sp["omega_matter"]
-        #     self.hubble_constant = sp["hubble_constant"]
-        # else:
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
         self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
         self._handle.close()
-        del self._handle
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
-            fileh = file(args[0],'rb')
-            if "gridded_data_format" in fileh:
+            if 'vtk' in args[0]:
                 return True
         except:
             pass


diff -r 77b5e3d096f57a4b11738e5fc268643ba70d4c84 -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -38,16 +38,16 @@
     TranslationFunc
 import yt.data_objects.universal_fields
 
-log_translation_dict = {"Density": "density",
-                        "Pressure": "pressure"}
+log_translation_dict = {}
 
-translation_dict = {"x-velocity": "velocity_x",
+translation_dict = {"Density": "density",
+                    "Pressure": "pressure",
+                    "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z"}
-                    
-# translation_dict = {"mag_field_x": "cell_centered_B_x ",
-#                     "mag_field_y": "cell_centered_B_y ",
-#                     "mag_field_z": "cell_centered_B_z "}
+                    "z-velocity": "velocity_z",
+                    "mag_field_x": "cell_centered_B_x ",
+                    "mag_field_y": "cell_centered_B_y ",
+                    "mag_field_z": "cell_centered_B_z "}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -55,33 +55,30 @@
 KnownAthenaFields = FieldInfoContainer()
 add_athena_field = KnownAthenaFields.add_field
 
-add_athena_field("density", function=NullFunc, take_log=True,
-          units=r"\rm{g}/\rm{cm}^3",
-          projected_units =r"\rm{g}/\rm{cm}^2")
+add_athena_field("density", function=NullFunc, take_log=False,
+          units=r"",
+          projected_units =r"")
 
-add_athena_field("specific_energy", function=NullFunc, take_log=True,
-          units=r"\rm{erg}/\rm{g}")
-
-add_athena_field("pressure", function=NullFunc, take_log=True,
-          units=r"\rm{erg}/\rm{g}")
+add_athena_field("pressure", function=NullFunc, take_log=False,
+          units=r"")
 
 add_athena_field("velocity_x", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+          units=r"")
 
 add_athena_field("velocity_y", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+          units=r"")
 
 add_athena_field("velocity_z", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+          units=r"")
 
-add_athena_field("mag_field_x", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
+          units=r"")
 
-add_athena_field("mag_field_y", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
+          units=r"")
 
-add_athena_field("mag_field_z", function=NullFunc, take_log=False,
-          units=r"\rm{cm}/\rm{s}")
+add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
+          units=r"")
 
 for f,v in log_translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=True)


diff -r 77b5e3d096f57a4b11738e5fc268643ba70d4c84 -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -27,7 +27,7 @@
 """
 from yt.utilities.io_handler import \
            BaseIOHandler
-import numpy as na
+import numpy as np
 
 class IOHandlerAthena(BaseIOHandler):
     _data_style = "athena"
@@ -46,24 +46,15 @@
     def _read_data_set(self,grid,field):
         f = file(grid.filename, 'rb')
         dtype, offset = grid.hierarchy._field_map[field]
-        grid_ncells = na.prod(grid.ActiveDimensions)
+        grid_ncells = np.prod(grid.ActiveDimensions)
         grid_dims = grid.ActiveDimensions
-        line = f.readline()
-        while True:
-            splitup = line.strip().split()
-            if 'CELL_DATA' in splitup:
-                f.readline()
-                read_table_offset = f.tell()
-                del line
-                break
-            del line; line = f.readline()
-
-
+        read_table_offset = get_read_table_offset(f)
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
-            data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F').copy()
+            data = np.fromfile(f, dtype='>f4',
+                    count=grid_ncells).reshape(grid_dims,order='F').copy()
         if dtype == 'vector':
-            data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
             if '_x' in field:
                 data = data[0::3].reshape(grid_dims,order='F').copy()
             elif '_y' in field:
@@ -84,33 +75,33 @@
 
         f = file(grid.filename, 'rb')
         dtype, offset = grid.hierarchy._field_map[field]
-        grid_ncells = na.prod(grid.ActiveDimensions)
+        grid_ncells = np.prod(grid.ActiveDimensions)
 
-        line = f.readline()
-        while True:
-            splitup = line.strip().split()
-            if 'CELL_DATA' in splitup:
-                f.readline()
-                read_table_offset = f.tell()
-                del line
-                break
-            del line; line = f.readline()
-
+        read_table_offset = get_read_table_offset(f)
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
-            data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            data = np.fromfile(f, dtype='>f4', 
+                    count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
         if dtype == 'vector':
-            data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
             if '_x' in field:
                 data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
             elif '_y' in field:
                 data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
             elif '_z' in field:
                 data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        f.close()
+        return data
 
-        f.close()
-        if grid.pf.field_ordering == 1:
-            return data.T
-        else:
-            return data
+def get_read_table_offset(f):
+    line = f.readline()
+    while True:
+        splitup = line.strip().split()
+        if 'CELL_DATA' in splitup:
+            f.readline()
+            read_table_offset = f.tell()
+            break
+        line = f.readline()
+    return read_table_offset
 
+


diff -r 77b5e3d096f57a4b11738e5fc268643ba70d4c84 -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -42,7 +42,6 @@
         def __init__(cls, name, b, d):
             type.__init__(cls, name, b, d)
             if hasattr(cls, "_data_style"):
-                print 'Registering Class ', cls ,' with datastyle ', cls._data_style
                 io_registry[cls._data_style] = cls
 
     def __init__(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a1a294fb7205/
changeset:   a1a294fb7205
branch:      yt
user:        samskillman
date:        2012-09-05 18:18:21
summary:     Merging.
affected #:  22 files

diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -399,7 +399,6 @@
 # Now we dump all our SHA512 files out.
 
 echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
-echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
@@ -599,11 +598,11 @@
     elif [ ! -e yt-hg ] 
     then
         YT_DIR="$PWD/yt-hg/"
-        ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
+        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
         # a new webserver, we are now moving to a three-stage process.
         # First we clone the repo, but only up to r0.
-        ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
+        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
     elif [ -e yt-hg ] 
@@ -682,7 +681,12 @@
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
 MY_PWD=`pwd`
 cd $YT_DIR
-( ${HG_EXEC} pull && ${HG_EXEC} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
+( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
+
+echo "Building Fortran kD-tree module."
+cd yt/utilities/kdtree
+( make 2>&1 ) 1>> ${LOG_FILE}
+cd ../../..
 
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,5 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+__version__ = "2.5-dev"


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -51,6 +51,7 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
+    reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3077,7 +3077,7 @@
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
-        self.set_field_parameter("height_vector", self._norm_vec)
+        self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
         self._d = -1.0 * na.dot(self._norm_vec, self.center)


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -220,7 +220,7 @@
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity', 'center', 'height_vector']:
+        if param in ['bulk_velocity', 'center', 'normal']:
             return na.random.random(3) * 1e-2
         else:
             return 0.0




diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -217,50 +217,181 @@
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
+
+
+### spherical coordinates: r (radius)
+def _sph_r(field, data):
+    center = data.get_field_parameter("center")
+      
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The spherical coordinates radius is simply the magnitude of the
+    ## coords vector.
+
+    return na.sqrt(na.sum(coords**2,axis=-1))
+
+def _Convert_sph_r_CGS(data):
+   return data.convert("cm")
+
+add_field("sph_r", function=_sph_r,
+         validators=[ValidateParameter("center")],
+         convert_function = _Convert_sph_r_CGS, units=r"\rm{cm}")
+
+
+### spherical coordinates: theta (angle with respect to normal)
+def _sph_theta(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The angle (theta) with respect to the normal (J), is the arccos
+    ## of the dot product of the normal with the normalized coords
+    ## vector.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    JdotCoords = na.sum(J*coords,axis=-1)
+    
+    return na.arccos( JdotCoords / na.sqrt(na.sum(coords**2,axis=-1)) )
+
+add_field("sph_theta", function=_sph_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### spherical coordinates: phi (angle in the plane perpendicular to the normal)
+def _sph_phi(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+    
+    ## We have freedom with respect to what axis (xprime) to define
+    ## the disk angle. Here I've chosen to use the axis that is
+    ## perpendicular to the normal and the y-axis. When normal ==
+    ## y-hat, then set xprime = z-hat. With this definition, when
+    ## normal == z-hat (as is typical), then xprime == x-hat.
+    ##
+    ## The angle is then given by the arctan of the ratio of the
+    ## yprime-component and the xprime-component of the coords vector.
+
+    xprime = na.cross([0.0,1.0,0.0],normal)
+    if na.sum(xprime) == 0: xprime = na.array([0.0, 0.0, 1.0])
+    yprime = na.cross(normal,xprime)
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    Jx = na.tile(xprime,tile_shape)
+    Jy = na.tile(yprime,tile_shape)
+    
+    Px = na.sum(Jx*coords,axis=-1)
+    Py = na.sum(Jy*coords,axis=-1)
+    
+    return na.arctan2(Py,Px)
+
+add_field("sph_phi", function=_sph_phi,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+
+### cylindrical coordinates: R (radius in the cylinder's plane)
+def _cyl_R(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+      
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The cross product of the normal (J) with the coords vector
+    ## gives a vector of magnitude equal to the cylindrical radius.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    JcrossCoords = na.cross(J,coords)
+    return na.sqrt(na.sum(JcrossCoords**2,axis=-1))
+
+def _Convert_cyl_R_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_R", function=_cyl_R,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: z (height above the cylinder's plane)
+def _cyl_z(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = na.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The dot product of the normal (J) with the coords vector gives
+    ## the cylindrical height.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = na.tile(normal,tile_shape)
+
+    return na.sum(J*coords,axis=-1)  
+
+def _Convert_cyl_z_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_z", function=_cyl_z,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_z_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: theta (angle in the cylinder's plane)
+### [This is identical to the spherical coordinate's 'phi' angle.]
+def _cyl_theta(field, data):
+    return data['sph_phi']
+
+add_field("cyl_theta", function=_cyl_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### The old field DiskAngle is the same as the spherical coordinates'
+### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
+def _DiskAngle(field, data):
+    return data['sph_theta']
+
+add_field("DiskAngle", function=_DiskAngle,
+          take_log=False,
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
+          display_field=False)
+
+
+### The old field Height is the same as the cylindrical coordinates' z
+### field. I'm keeping Height for backwards compatibility.
 def _Height(field, data):
-    # We take the dot product of the radius vector with the height-vector
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    h_vec = h_vec / na.sqrt(h_vec[0]**2.0+
-                            h_vec[1]**2.0+
-                            h_vec[2]**2.0)
-    height = r_vec[0,:] * h_vec[0] \
-           + r_vec[1,:] * h_vec[1] \
-           + r_vec[2,:] * h_vec[2]
-    return na.abs(height)
+    return data['cyl_z']
+
 def _convertHeight(data):
     return data.convert("cm")
 def _convertHeightAU(data):
     return data.convert("au")
 add_field("Height", function=_Height,
           convert_function=_convertHeight,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"cm", display_field=False)
 add_field("HeightAU", function=_Height,
           convert_function=_convertHeightAU,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
-def _DiskAngle(field, data):
-    # We make both r_vec and h_vec into unit vectors
-    center = data.get_field_parameter("center")
-    r_vec = na.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    r_vec = r_vec/na.sqrt((r_vec**2.0).sum(axis=0))
-    h_vec = na.array(data.get_field_parameter("height_vector"))
-    dp = r_vec[0,:] * h_vec[0] \
-       + r_vec[1,:] * h_vec[1] \
-       + r_vec[2,:] * h_vec[2]
-    return na.arccos(dp)
-add_field("DiskAngle", function=_DiskAngle,
-          take_log=False,
-          validators=[ValidateParameter("height_vector"),
-                      ValidateParameter("center")],
-          display_field=False)
 
 def _DynamicalTime(field, data):
     """


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -441,11 +441,13 @@
         mylog.info("Finished rebuilding")
 
     def _populate_grid_objects(self):
+        reconstruct = ytcfg.getboolean("yt","reconstruct_hierarchy")
         for g,f in izip(self.grids, self.filenames):
             g._prepare_grid()
             g._setup_dx()
             g.set_filename(f[0])
-            #if g.Parent is not None: g._guess_properties_from_parent()
+            if reconstruct:
+                if g.Parent is not None: g._guess_properties_from_parent()
         del self.filenames # No longer needed.
         self.max_level = self.grid_levels.max()
 


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -183,6 +183,12 @@
           display_name = "\rm{Total}\/\rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
+def _TotalEnergy(field, data):
+    return data["Total_Energy"] / _convertEnergy(data)
+add_field("TotalEnergy", function=_TotalEnergy,
+          display_name = "\rm{Total}\/\rm{Energy}",
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+
 def _NumberDensity(field, data):
     # We can assume that we at least have Density
     # We should actually be guaranteeing the presence of a .shape attribute,


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -39,7 +39,7 @@
     mpc_conversion, sec_conversion
 from yt.utilities.io_handler import \
     io_registry
-
+from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
      ValidateDataField
@@ -229,13 +229,13 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         if "EOSType" not in self.parameters:
             self.parameters["EOSType"] = -1
-        if self.cosmological_simulation == 1:
-            self._setup_comoving_units()
         if "pc_unitsbase" in self.parameters:
             if self.parameters["pc_unitsbase"] == "CGS":
                 self._setup_cgs_units()
         else:
             self._setup_nounits_units()
+        if self.cosmological_simulation == 1:
+            self._setup_comoving_units()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / \
@@ -252,10 +252,10 @@
         self.conversion_factors['eint'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['ener'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
-        self.conversion_factors['velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['vely'] = self.conversion_factors['velx']
         self.conversion_factors['velz'] = self.conversion_factors['velx']
-        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['particle_vely'] = \
             self.conversion_factors['particle_velx']
         self.conversion_factors['particle_velz'] = \
@@ -265,7 +265,8 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
+            self.units[unit] /= (1.0+self.current_redshift)
+            
     def _setup_cgs_units(self):
         self.conversion_factors['dens'] = 1.0
         self.conversion_factors['pres'] = 1.0
@@ -407,6 +408,7 @@
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']
+            self.hubble_constant *= cm_per_mpc * 1.0e-5 * 1.0e-2 # convert to 'h'
         except:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -51,23 +51,6 @@
             count_list, conv_factors):
         pass
 
-    def _select_particles(self, grid, field):
-        f = self._handle
-        npart = f["/tracer particles"].shape[0]
-        total_selected = 0
-        start = 0
-        stride = 1e6
-        blki = self._particle_fields["particle_blk"]
-        bi = grid.id - grid._id_offset
-        fi = self._particle_fields[field]
-        tr = []
-        while start < npart:
-            end = min(start + stride - 1, npart)
-            gi = f["/tracer particles"][start:end,blki] == bi
-            tr.append(f["/tracer particles"][gi,fi])
-            start = end
-        return na.concatenate(tr)
-
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:




diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -8,7 +8,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.
   All Rights Reserved.
 
   This file is part of yt.
@@ -79,7 +79,7 @@
 class GDFHierarchy(AMRHierarchy):
 
     grid = GDFGrid
-    
+
     def __init__(self, pf, data_style='grid_data_format'):
         self.parameter_file = weakref.proxy(pf)
         self.data_style = data_style
@@ -96,7 +96,7 @@
 
     def _detect_fields(self):
         self.field_list = self._fhandle['field_types'].keys()
-    
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
@@ -104,14 +104,17 @@
 
     def _count_grids(self):
         self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
-       
+
     def _parse_hierarchy(self):
-        f = self._fhandle 
-        dxs=[]
+        f = self._fhandle
+        dxs = []
         self.grids = na.empty(self.num_grids, dtype='object')
         levels = (f['grid_level'][:]).copy()
         glis = (f['grid_left_index'][:]).copy()
         gdims = (f['grid_dimensions'][:]).copy()
+        active_dims = ~((na.max(gdims, axis=0) == 1) &
+                        (self.parameter_file.domain_dimensions == 1))
+
         for i in range(levels.shape[0]):
             self.grids[i] = self.grid(i, self, levels[i],
                                       glis[i],
@@ -120,7 +123,7 @@
 
             dx = (self.parameter_file.domain_right_edge-
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
-            dx = dx/self.parameter_file.refine_by**(levels[i])
+            dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
         dx = na.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
@@ -128,7 +131,7 @@
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
         self.grid_particle_count = f['grid_particle_count'][:]
         del levels, glis, gdims
- 
+
     def _populate_grid_objects(self):
         for g in self.grids:
             g._prepare_grid()
@@ -153,13 +156,13 @@
     _hierarchy_class = GDFHierarchy
     _fieldinfo_fallback = GDFFieldInfo
     _fieldinfo_known = KnownGDFFields
-    
+
     def __init__(self, filename, data_style='grid_data_format',
                  storage_filename = None):
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
         self.filename = filename
-        
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -190,12 +193,12 @@
             except:
                 current_fields_unit = ""
             self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
-                   units=current_fields_unit, projected_units="", 
+                   units=current_fields_unit, projected_units="",
                    convert_function=_get_convert(field_name))
 
         self._handle.close()
         del self._handle
-        
+
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
         sp = self._handle["/simulation_parameters"].attrs
@@ -204,7 +207,7 @@
         self.domain_dimensions = sp["domain_dimensions"][:]
         refine_by = sp["refine_by"]
         if refine_by is None: refine_by = 2
-        self.refine_by = refine_by 
+        self.refine_by = refine_by
         self.dimensionality = sp["dimensionality"]
         self.current_time = sp["current_time"]
         self.unique_identifier = sp["unique_identifier"]
@@ -225,7 +228,7 @@
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
         self._handle.close()
         del self._handle
-            
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
@@ -238,4 +241,4 @@
 
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
-        
+




diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/gui/reason/html/app/controller/Notebook.js
--- a/yt/gui/reason/html/app/controller/Notebook.js
+++ b/yt/gui/reason/html/app/controller/Notebook.js
@@ -73,9 +73,11 @@
     },
 
     addRequest: function(request_id, command) {
+        /*console.log("Adding request " + request_id);*/
         this.getRequestsStore().add({
             request_id: request_id, command: command,
         });
+        reason.pending.update([this.getRequestsStore().count()]);
     },
 
     addCell: function(cell) {
@@ -85,6 +87,7 @@
             var ind = this.getRequestsStore().find(
                 'request_id', cell['result_id']);
             if (ind != -1) {
+                /*console.log("Removing request " + cell['result_id']);*/
                 var rec = this.getRequestsStore().removeAt(ind);
             }
             reason.pending.update([this.getRequestsStore().count()]);


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -110,3 +110,10 @@
         return "You have not declared yourself to be inside the IPython" + \
                "Notebook.  Do so with this command:\n\n" + \
                "ytcfg['yt','ipython_notebook'] = 'True'"
+
+class YTUnitNotRecognized(YTException):
+    def __init__(self, unit):
+        self.unit = unit
+
+    def __str__(self):
+        return "This parameter file doesn't recognize %s" % self.unit


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/utilities/grid_data_format/writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/writer.py
@@ -0,0 +1,169 @@
+"""
+Writing yt data to a GDF file.
+
+Authors: Casey W. Stark <caseywstark at gmail.com>
+Affiliation: UC Berkeley
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Casey W. Stark.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+import os
+
+import h5py
+import numpy as np
+
+from yt import __version__ as yt_version
+
+
+def write_to_gdf(pf, gdf_path, data_author=None, data_comment=None,
+                 particle_type_name="dark_matter"):
+    """
+    Write a parameter file to the given path in the Grid Data Format.
+
+    Parameters
+    ----------
+    pf : StaticOutput object
+        The yt data to write out.
+    gdf_path : string
+        The path of the file to output.
+
+    """
+    # Make sure we have the absolute path to the file first
+    gdf_path = os.path.abspath(gdf_path)
+
+    # Stupid check -- is the file already there?
+    # @todo: make this a specific exception/error.
+    if os.path.exists(gdf_path):
+        raise IOError("A file already exists in the location: %s. Please provide a new one or remove that file." % gdf_path)
+
+    ###
+    # Create and open the file with h5py
+    ###
+    f = h5py.File(gdf_path, "w")
+
+    ###
+    # "gridded_data_format" group
+    ###
+    g = f.create_group("gridded_data_format")
+    g.attrs["data_software"] = "yt"
+    g.attrs["data_software_version"] = yt_version
+    if data_author is not None:
+        g.attrs["data_author"] = data_author
+    if data_comment is not None:
+        g.attrs["data_comment"] = data_comment
+
+    ###
+    # "simulation_parameters" group
+    ###
+    g = f.create_group("simulation_parameters")
+    g.attrs["refine_by"] = pf.refine_by
+    g.attrs["dimensionality"] = pf.dimensionality
+    g.attrs["domain_dimensions"] = pf.domain_dimensions
+    g.attrs["current_time"] = pf.current_time
+    g.attrs["domain_left_edge"] = pf.domain_left_edge
+    g.attrs["domain_right_edge"] = pf.domain_right_edge
+    g.attrs["unique_identifier"] = pf.unique_identifier
+    g.attrs["cosmological_simulation"] = pf.cosmological_simulation
+    # @todo: Where is this in the yt API?
+    #g.attrs["num_ghost_zones"] = pf...
+    # @todo: Where is this in the yt API?
+    #g.attrs["field_ordering"] = pf...
+    # @todo: not yet supported by yt.
+    #g.attrs["boundary_conditions"] = pf...
+
+    if pf.cosmological_simulation:
+        g.attrs["current_redshift"] = pf.current_redshift
+        g.attrs["omega_matter"] = pf.omega_matter
+        g.attrs["omega_lambda"] = pf.omega_lambda
+        g.attrs["hubble_constant"] = pf.hubble_constant
+
+    ###
+    # "field_types" group
+    ###
+    g = f.create_group("field_types")
+
+    # Which field list should we iterate over?
+    for field_name in pf.h.field_list:
+        # create the subgroup with the field's name
+        sg = g.create_group(field_name)
+
+        # grab the display name and units from the field info container.
+        display_name = pf.field_info[field_name].display_name
+        units = pf.field_info[field_name].get_units()
+
+        # check that they actually contain something...
+        if display_name:
+            sg.attrs["field_name"] = display_name
+        else:
+            sg.attrs["field_name"] = field_name
+        if units:
+            sg.attrs["field_units"] = units
+        else:
+            sg.attrs["field_units"] = "None"
+        # @todo: the values must be in CGS already right?
+        sg.attrs["field_to_cgs"] = 1.0
+        # @todo: is this always true?
+        sg.attrs["staggering"] = 0
+
+    ###
+    # "particle_types" group
+    ###
+    g = f.create_group("particle_types")
+
+    # @todo: Particle type iterator
+    sg = g.create_group(particle_type_name)
+    sg["particle_type_name"] = particle_type_name
+
+    ###
+    # root datasets -- info about the grids
+    ###
+    f["grid_dimensions"] = pf.h.grid_dimensions
+    f["grid_left_index"] = pf.h.grid_left_edge
+    f["grid_level"] = pf.h.grid_levels
+    # @todo: Do we need to loop over the grids for this?
+    f["grid_parent_id"] = -1
+    f["grid_particle_count"] = pf.h.grid_particle_count
+
+    ###
+    # "data" group -- where we should spend the most time
+    ###
+    g = f.create_group("data")
+
+    for grid in pf.h.grids:
+        # add group for this grid
+
+        grid_group = g.create_group("grid_%010i" % grid.id)
+        # add group for the particles on this grid
+        particles_group = grid_group.create_group("particles")
+        pt_group = particles_group.create_group(particle_type_name)
+
+        # add the field data to the grid group
+        for field_name in pf.h.field_list:
+            # Check if this is a real field or particle data.
+            field_obj = pf.field_info[field_name]
+
+            if field_obj.particle_type:  # particle data
+                pt_group[field_name] = grid.get_data(field_name)
+            else:  # a field
+                grid_group[field_name] = grid.get_data(field_name)
+
+    # don't forget to close the file.
+    f.close()


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/utilities/parallel_tools/controller_system.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -0,0 +1,69 @@
+"""
+A queueing system based on MPI
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+    
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+from contextmanager import contextlib
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+class WorkSplitter(object):
+    def __init__(self, controller, group1, group2):
+        self.group1 = group1
+        self.group2 = group2
+        self.controller = controller
+
+    @classmethod
+    def setup(cls, ng1, ng2):
+        pp, wg = ProcessorPool.from_sizes(
+            [(1, "controller"), (ng1, "group1"), (ng2, "group2")])
+        groupc = pp['controller']
+        group1 = pp['group1']
+        group2 = pp['group2']
+        obj = cls(groupc, group1, group2)
+        obj.run(wg.name)
+
+    def run(self, name):
+        if name == "controller":
+            self.run_controller()
+        elif name == "group1":
+            self.run_group1()
+        elif name == "group2":
+            self.run_group2()
+        else:
+            raise NotImplementedError
+
+    @abstractmethod
+    def run_controller(self):
+        pass
+
+    @abstractmethod
+    def run_group1(self):
+        pass
+
+    @abstractmethod
+    def run_group2(self):
+        pass


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/utilities/parallel_tools/io_runner.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -0,0 +1,195 @@
+"""
+A simple IO staging mechanism
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+from .parallel_analysis_interface import ProcessorPool
+from yt.utilities.io_handler import BaseIOHandler
+from contextlib import contextmanager
+import time
+
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+
+YT_TAG_MESSAGE = 317 # Cell 317 knows where to go
+
+class IOCommunicator(BaseIOHandler):
+    def __init__(self, pf, wg, pool):
+        mylog.info("Initializing IOCommunicator")
+        self.pf = pf
+        self.wg = wg # We don't need to use this!
+        self.pool = pool
+        self.comm = pool.comm
+        # We read our grids here
+        self.grids = []
+        storage = {}
+        grids = pf.h.grids.tolist()
+        grids.sort(key=lambda a:a.filename)
+        for sto, g in parallel_objects(grids, storage = storage):
+            sto.result = self.comm.rank
+            sto.result_id = g.id
+            self.grids.append(g)
+        self._id_offset = pf.h.grids[0]._id_offset
+        mylog.info("Reading from disk ...")
+        self.initialize_data()
+        mylog.info("Broadcasting ...")
+        self.comm.comm.bcast(storage, root = wg.ranks[0])
+        mylog.info("Done.")
+        self.hooks = []
+
+    def initialize_data(self):
+        pf = self.pf
+        fields = [f for f in pf.h.field_list
+                  if not pf.field_info[f].particle_type]
+        pfields = [f for f in pf.h.field_list
+                   if pf.field_info[f].particle_type]
+        # Preload is only defined for Enzo ...
+        if pf.h.io._data_style == "enzo_packed_3d":
+            self.queue = pf.h.io.queue
+            pf.h.io.preload(self.grids, fields)
+            for g in self.grids:
+                for f in fields:
+                    if f not in self.queue[g.id]:
+                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        self.queue[g.id][f] = d
+                for f in pfields:
+                    self.queue[g.id][f] = self._read(g, f)
+        else:
+            self.queue = {}
+            for g in self.grids:
+                for f in fields + pfields:
+                    self.queue[g.id][f] = pf.h.io._read(g, f)
+
+    def _read(self, g, f):
+        fi = self.pf.field_info[f]
+        if fi.particle_type and g.NumberOfParticles == 0:
+            # because this gets upcast to float
+            return na.array([],dtype='float64')
+        try:
+            temp = self.pf.h.io._read_data_set(g, f)
+        except:# self.pf.hierarchy.io._read_exception as exc:
+            if fi.not_in_all:
+                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+            else:
+                raise
+        return temp
+
+    def wait(self):
+        status = MPI.Status()
+        while 1:
+            if self.comm.comm.Iprobe(MPI.ANY_SOURCE,
+                                YT_TAG_MESSAGE,
+                                status = status):
+                msg = self.comm.comm.recv(
+                        source = status.source, tag = YT_TAG_MESSAGE)
+                if msg['op'] == "end":
+                    mylog.debug("Shutting down IO.")
+                    break
+                self._send_data(msg, status.source)
+                status = MPI.Status()
+            else:
+                time.sleep(1e-2)
+
+    def _send_data(self, msg, dest):
+        grid_id = msg['grid_id']
+        field = msg['field']
+        ts = self.queue[grid_id][field].astype("float64")
+        mylog.debug("Opening send to %s (%s)", dest, ts.shape)
+        self.hooks.append(self.comm.comm.Isend([ts, MPI.DOUBLE], dest = dest))
+
+class IOHandlerRemote(BaseIOHandler):
+    _data_style = "remote"
+
+    def __init__(self, pf, wg, pool):
+        self.pf = pf
+        self.wg = wg # probably won't need
+        self.pool = pool
+        self.comm = pool.comm
+        self.proc_map = self.comm.comm.bcast(None,
+                root = pool['io'].ranks[0])
+        super(IOHandlerRemote, self).__init__()
+
+    def _read_data_set(self, grid, field):
+        dest = self.proc_map[grid.id]
+        msg = dict(grid_id = grid.id, field = field, op="read")
+        mylog.debug("Requesting %s for %s from %s", field, grid, dest)
+        if self.pf.field_info[field].particle_type:
+            data = na.empty(grid.NumberOfParticles, 'float64')
+        else:
+            data = na.empty(grid.ActiveDimensions, 'float64')
+        hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
+        self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
+        mylog.debug("Waiting for data.")
+        MPI.Request.Wait(hook)
+        return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        #sl = tuple(reversed(sl))
+        return self._read_data_set(grid,field)[sl]
+
+    def terminate(self):
+        msg = dict(op='end')
+        if self.wg.comm.rank == 0:
+            for rank in self.pool['io'].ranks:
+                mylog.debug("Sending termination message to %s", rank)
+                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
+
+ at contextmanager
+def remote_io(pf, wg, pool):
+    original_io = pf.h.io
+    pf.h.io = IOHandlerRemote(pf, wg, pool)
+    yield
+    pf.h.io.terminate()
+    pf.h.io = original_io
+
+def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+    pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
+    rv = None
+    if wg.name == "work":
+        pf = load(fn)
+        with remote_io(pf, wg, pool):
+            rv = func(pf, *args, **kwargs)
+    elif wg.name == "io":
+        pf = load(fn)
+        io = IOCommunicator(pf, wg, pool)
+        io.wait()
+    # We should broadcast the result
+    rv = pool.comm.mpi_bcast(rv, root=pool['work'].ranks[0])
+    pool.free_all()
+    mylog.debug("Return value: %s", rv)
+    return rv
+
+# Here is an example of how to use this functionality.
+if __name__ == "__main__":
+    def gq(pf):
+        dd = pf.h.all_data()
+        return dd.quantities["TotalQuantity"]("CellMassMsun")
+    q = io_nodes("DD0087/DD0087", 8, 24, gq)
+    mylog.info(q)
+
+


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -271,7 +271,7 @@
         self.size = size
         self.ranks = ranks
         self.comm = comm
-	self.name = name
+        self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -294,11 +294,9 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-
-	# Default name to the workgroup number.
+        # Default name to the workgroup number.
         if name is None: 
-	    name = string(len(workgroups))
-	    
+            name = string(len(workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -80,11 +80,11 @@
     def pixel_scale(self,plot):
         x0, x1 = plot.xlim
         xx0, xx1 = plot._axes.get_xlim()
-        dx = (xx0 - xx1)/(x1 - x0)
+        dx = (xx1 - xx0)/(x1 - x0)
         
         y0, y1 = plot.ylim
         yy0, yy1 = plot._axes.get_ylim()
-        dy = (yy0 - yy1)/(y1 - y0)
+        dy = (yy1 - yy0)/(y1 - y0)
 
         return (dx,dy)
 
@@ -295,30 +295,31 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, annotate=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True):
         """
-        annotate_grids(alpha=1.0, min_pix=1, annotate=False, periodic=True)
+        annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
-        *annotate* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.
         """
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
-        self.annotate = annotate # put grid numbers in the corner.
+        self.min_pix_ids = min_pix_ids
+        self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
         y0, y1 = plot.ylim
-        width, height = plot.image._A.shape
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         xi = x_dict[plot.data.axis]
         yi = y_dict[plot.data.axis]
-        dx = width / (x1-x0)
-        dy = height / (y1-y0)
+        (dx, dy) = self.pixel_scale(plot)
+        (xpix, ypix) = plot.image._A.shape
         px_index = x_dict[plot.data.axis]
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
@@ -331,29 +332,32 @@
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
-            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx
-            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
-            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
-            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
+            left_edge_x = (GLE[:,px_index]+pxo-x0)*dx + xx0
+            left_edge_y = (GLE[:,py_index]+pyo-y0)*dy + yy0
+            right_edge_x = (GRE[:,px_index]+pxo-x0)*dx + xx0
+            right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
+            visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
+                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
+            if visible.nonzero()[0].size == 0: continue
             verts = na.array(
-                [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
-                 (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
-            visible =  ( right_edge_px - left_edge_px > self.min_pix ) & \
-                       ( right_edge_px - left_edge_px > self.min_pix )
+                [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
+                 (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
-            if verts.size == 0: continue
             edgecolors = (0.0,0.0,0.0,self.alpha)
-            verts[:,:,0]= (xx1-xx0)*(verts[:,:,0]/width) + xx0
-            verts[:,:,1]= (yy1-yy0)*(verts[:,:,1]/height) + yy0
             grid_collection = matplotlib.collections.PolyCollection(
                 verts, facecolors="none",
                 edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
-            if self.annotate:
-                ids = [g.id for g in plot.data._grids]
-                for n in range(len(left_edge_px)):
-                    plot._axes.text(left_edge_px[n]+2,left_edge_py[n]+2,ids[n])
+            if self.draw_ids:
+                visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
+                               ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
+                active_ids = na.unique(plot.data['GridIndices'])
+                for i in na.where(visible_ids)[0]:
+                    plot._axes.text(
+                        left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
+                        left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
+                        "%d" % active_ids[i], clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):


diff -r dd83c8b4e9774d4a6e7937fb52416d95f72e7522 -r a1a294fb7205fa2861e65527b09d815043c7d51e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -189,8 +189,8 @@
     _contour_info = None
     _vector_info = None
     _frb = None
-    def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, 
-                 periodic = True, origin='center-window', oblique=False):
+    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
+                 periodic=True, origin='center-window', oblique=False):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -354,25 +354,48 @@
 
         parameters
         ----------
-        width : float, array of floats, or (float, unit) tuple.
-            the width of the image.
+        width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
+             Width can have four different formats to support windows with variable 
+             x and y widths.  They are:
+             
+             ==================================     =======================
+             format                                 example                
+             ==================================     =======================
+             (float, string)                        (10,'kpc')
+             ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+             float                                  0.2
+             (float, float)                         (0.2, 0.3)
+             ==================================     =======================
+             
+             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
+             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
+             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+             the y axis.  In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
+             in code units.  the width of the image.
         unit : str
             the unit the width has been specified in.
             defaults to code units.  If width is a tuple this 
             argument is ignored
 
         """
-        if iterable(width) and isinstance(width[1],str):
-            unit = width[1]
-            width = width[0]
-        elif not iterable(width):
-            width = (width,width)
+        if iterable(width): 
+            if isinstance(width[1],str):
+                w, unit = width
+                width = (w, w)
+            elif isinstance(width[1], tuple):
+                wx,unitx = width[0]
+                wy,unity = width[1]
+                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        else:
+            width = (width, width)
         Wx, Wy = width
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
         centerx = (self.xlim[1] + self.xlim[0])/2 
         centery = (self.ylim[1] + self.ylim[0])/2 
+        
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,
@@ -428,16 +451,17 @@
         pass
 
 class PWViewer(PlotWindow):
+    _unit = None
+    _colormaps = defaultdict(lambda: 'algae')
+    _callbacks = []
+    _field_transform = {}
     """A viewer for PlotWindows.
 
     """
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._colormaps = defaultdict(lambda: 'algae')
         self.setup_callbacks()
-        self._callbacks = []
-        self._field_transform = {}
         for field in self._frb.data.keys():
             if self.pf.field_info[field].take_log:
                 self._field_transform[field] = log_transform
@@ -554,13 +578,50 @@
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
+    @invalidate_plot
+    def set_axes_unit(self, unit_name):
+        r"""Set the unit for display on the x and y axes of the image.
+
+        Parameters
+        ----------
+        unit_name : string
+            A unit, available for conversion in the parameter file, that the
+            image extents will be displayed in.  If set to None, any previous
+            units will be reset.  If the unit is None, the default is chosen.
+
+        Raises
+        ------
+        YTUnitNotRecognized
+            If the unit is not known, this will be raised.
+
+        Examples
+        --------
+
+        >>> p = ProjectionPlot(pf, "y", "Density")
+        >>> p.show()
+        >>> p.set_axes_unit("kpc")
+        >>> p.show()
+        >>> p.set_axes_unit(None)
+        >>> p.show()
+        """
+        # blind except because it could be in conversion_factors or units
+        try:
+            self.pf[unit_name]
+        except KeyError: 
+            if unit_name is not None:
+                raise YTUnitNotRecognized(unit_name)
+        self._unit = unit_name
+
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
         mi = fval.min()
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        unit = get_smallest_appropriate_unit(x_width, self.pf)
+        if self._unit is None:
+            unit = get_smallest_appropriate_unit(x_width, self.pf)
+        else:
+            unit = self._unit
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -647,10 +708,12 @@
 
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
+            norm_size = 10.0
+            cbar_frac = 0.0
             if aspect > 1.0:
-                size = (10.0, 10.0/aspect)
+                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
             else:
-                size = (10.0*aspect, 10.0)
+                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
@@ -672,10 +735,12 @@
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
 
+            field_name = self.data_source.pf.field_info[f].display_name
+            if field_name is None: field_name = f
             if md['units'] == None or md['units'] == '':
-                label = r'$\rm{'+f.encode('string-escape')+r'}$'
+                label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
             else:
-                label = r'$\rm{'+f.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+                label = r'$\rm{'+field_name.encode('string-escape')+r'}\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 
@@ -790,7 +855,8 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None, origin='center-window'):
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
+                 origin='center-window'):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -810,11 +876,12 @@
              or the axis name itself
         fields : string
              The name of the field(s) to be plotted.
-        center : two or three-element vector of sequence floats, 'c', or 'center'
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
              The coordinate of the center of the image.  If left blanck,
              the image centers on the location of the maximum density
              cell.  If set to 'c' or 'center', the plot is centered on
-             the middle of the domain.
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -834,6 +901,9 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.  
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -855,9 +925,10 @@
         (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None,
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
         
@@ -878,11 +949,12 @@
              or the axis name itself
         fields : string
             The name of the field(s) to be plotted.
-        center : A two or three-element vector of sequence floats, 'c', or 'center'
-            The coordinate of the center of the image.  If left blanck,
-            the image centers on the location of the maximum density
-            cell.  If set to 'c' or 'center', the plot is centered on
-            the middle of the domain.
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+             The coordinate of the center of the image.  If left blanck,
+             the image centers on the location of the maximum density
+             cell.  If set to 'c' or 'center', the plot is centered on
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -902,6 +974,9 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         origin : A string
             The location of the origin of the plot coordinate system.
             Currently, can be set to three options: 'left-domain', corresponding
@@ -927,9 +1002,11 @@
         (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), north_vector=None):
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -957,6 +1034,9 @@
             A tuple containing the width of image and the string key of
             the unit: (width, 'unit').  If set to a float, code units
             are assumed
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
         north-vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
@@ -968,6 +1048,7 @@
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
 
 _metadata_template = """
 %(pf)s<br>
@@ -1147,10 +1228,13 @@
     figure = None
     def __init__(self, field, size):
         self._plot_valid = True
-        self.figure = matplotlib.figure.Figure(figsize = size, frameon = True)
+        fsize, axrect, caxrect = self._get_best_layout(size)
         # Hardcoding the axis dimensions for now
-        self.axes = self.figure.add_axes((.07,.10,.8,.8))
-        self.cax = self.figure.add_axes((.87,.10,.04,.8))
+        
+        self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                               frameon = True)
+        self.axes = self.figure.add_axes(axrect)
+        self.cax = self.figure.add_axes(caxrect)
 
     def save(self, name, canvas = None):
         if name[-4:] == '.png':
@@ -1169,9 +1253,47 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn, bbox_inches='tight')
+        canvas.print_figure(fn)
         return fn
 
+    def _get_best_layout(self, size):
+        aspect = 1.0*size[0]/size[1]
+
+        # add room for a colorbar
+        cbar_inches = 0.7
+        newsize = [size[0] + cbar_inches, size[1]]
+        
+        # add buffers for text, and a bit of whitespace on top
+        text_buffx = 1.0/(newsize[0])
+        text_bottomy = 0.7/size[1]
+        text_topy = 0.3/size[1]
+
+        # calculate how much room the colorbar takes
+        cbar_frac = cbar_inches/newsize[0] 
+        
+        # Calculate y fraction, then use to make x fraction.
+        yfrac = 1.0-text_bottomy-text_topy
+        ysize = yfrac*size[1]
+        xsize = aspect*ysize
+        xfrac = xsize/newsize[0]
+
+        # Now make sure it all fits!
+        xbig = xfrac + text_buffx + 2.0*cbar_frac
+        ybig = yfrac + text_bottomy + text_topy
+
+        if xbig > 1:
+            xsize /= xbig
+            ysize /= xbig
+        if ybig > 1:
+            xsize /= ybig
+            ysize /= ybig
+        xfrac = xsize/newsize[0]
+        yfrac = ysize/newsize[1]
+
+        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
+        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
+        return newsize, axrect, caxrect
+
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)
         f = cStringIO.StringIO()
@@ -1193,3 +1315,5 @@
         self.image = self.axes.imshow(data, origin='lower', extent = extent,
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
+        self.image.axes.ticklabel_format(scilimits=(-4,3))
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8a25dddac85f/
changeset:   8a25dddac85f
branch:      yt
user:        samskillman
date:        2012-09-05 18:32:57
summary:     Adding docstrings about the unit labels when set to 1, u, or unitary.
affected #:  1 file

diff -r a1a294fb7205fa2861e65527b09d815043c7d51e -r 8a25dddac85f759ba83671b1799d69ff0732a717 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -588,6 +588,8 @@
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
+            If unit_name is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
 
         Raises
         ------
@@ -904,6 +906,8 @@
         axes_unit : A string
             The name of the unit for the tick labels on the x and y axes.  
             Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -977,6 +981,8 @@
         axes_unit : A string
             The name of the unit for the tick labels on the x and y axes.  
             Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         origin : A string
             The location of the origin of the plot coordinate system.
             Currently, can be set to three options: 'left-domain', corresponding
@@ -1037,6 +1043,8 @@
         axes_unit : A string
             The name of the unit for the tick labels on the x and y axes.  
             Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         north-vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2825fd89deeb/
changeset:   2825fd89deeb
branch:      yt
user:        ngoldbaum
date:        2012-09-05 20:45:02
summary:     Merged in samskillman/yt (pull request #265)
affected #:  1 file

diff -r 1a3c927ef00cd6469864c6b8759c3574805ad092 -r 2825fd89deeba490c2d5dfc1c0200ed5493f0a1f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -588,6 +588,8 @@
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
+            If unit_name is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
 
         Raises
         ------
@@ -720,13 +722,18 @@
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
+            if not md['unit'] in ['1', 'u', 'unitary']:
+                axes_unit_label = '\/\/('+md['unit'].encode('string-escape')+')'
+            else:
+                axes_unit_label = ''
+
             if self.oblique == False:
                 labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
-                          r'\/\/('+md['unit'].encode('string-escape')+r')}$' for i in (0,1)]
+                        axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$',
-                          r'$\rm{Image\/y}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$']
-                
+                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
+                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
 
@@ -899,6 +906,8 @@
         axes_unit : A string
             The name of the unit for the tick labels on the x and y axes.  
             Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -972,6 +981,8 @@
         axes_unit : A string
             The name of the unit for the tick labels on the x and y axes.  
             Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         origin : A string
             The location of the origin of the plot coordinate system.
             Currently, can be set to three options: 'left-domain', corresponding
@@ -1032,6 +1043,8 @@
         axes_unit : A string
             The name of the unit for the tick labels on the x and y axes.  
             Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         north-vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e5d136902e9c/
changeset:   e5d136902e9c
branch:      yt
user:        samskillman
date:        2012-09-05 23:41:37
summary:     Quick bugfix
affected #:  1 file

diff -r 2825fd89deeba490c2d5dfc1c0200ed5493f0a1f -r e5d136902e9c0e72fa86f20a8c10fdb833667ad4 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -306,7 +306,7 @@
             nz = (self.profile[self._current_field] > 0)
             mi = self.profile[self._current_field][nz].min()
         else:
-            mi = self.profile[self._current_field][nz].min()
+            mi = self.profile[self._current_field].min()
         ma = self.profile[self._current_field].max()
         cbar.bounds = (mi, ma)
         cbar.cmap = 'algae'



https://bitbucket.org/yt_analysis/yt-3.0/changeset/84404a462580/
changeset:   84404a462580
branch:      yt
user:        ngoldbaum
date:        2012-09-06 04:08:07
summary:     The callback list needs to be set in the constructor otherwise all
callbacks that have ever been created will be drawn for (say) the last
frame of an animation.
affected #:  1 file

diff -r e5d136902e9c0e72fa86f20a8c10fdb833667ad4 -r 84404a462580eb7281511bbf31edb7e44c0da68a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -453,7 +453,6 @@
 class PWViewer(PlotWindow):
     _unit = None
     _colormaps = defaultdict(lambda: 'algae')
-    _callbacks = []
     _field_transform = {}
     """A viewer for PlotWindows.
 
@@ -461,6 +460,7 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
+        self._callbacks = []
         self.setup_callbacks()
         for field in self._frb.data.keys():
             if self.pf.field_info[field].take_log:
@@ -928,7 +928,7 @@
         axis = fix_axis(axis)
         (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
-        PWViewerMPL.__init__(self, slc, bounds, origin=origin)
+        self.viewer = PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3c632bca4d4d/
changeset:   3c632bca4d4d
branch:      yt
user:        ngoldbaum
date:        2012-09-06 04:09:51
summary:     Removing the viewer member of SlicePlot, it wasn't necessary.
affected #:  1 file

diff -r 84404a462580eb7281511bbf31edb7e44c0da68a -r 3c632bca4d4d74357202497fc25d834dc97b5f93 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -928,7 +928,7 @@
         axis = fix_axis(axis)
         (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
-        self.viewer = PWViewerMPL.__init__(self, slc, bounds, origin=origin)
+        PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0a36773eb6e9/
changeset:   0a36773eb6e9
branch:      yt
user:        ngoldbaum
date:        2012-09-06 17:13:08
summary:     field_transforms and unit should be class properties as well.
affected #:  1 file

diff -r 3c632bca4d4d74357202497fc25d834dc97b5f93 -r 0a36773eb6e9511e65c772bc2621dbe9a5d19f0b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -451,16 +451,16 @@
         pass
 
 class PWViewer(PlotWindow):
-    _unit = None
     _colormaps = defaultdict(lambda: 'algae')
-    _field_transform = {}
     """A viewer for PlotWindows.
 
     """
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
+        self._unit = None
         self._callbacks = []
+        self._field_transform = {}
         self.setup_callbacks()
         for field in self._frb.data.keys():
             if self.pf.field_info[field].take_log:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/551e1238ab38/
changeset:   551e1238ab38
branch:      yt
user:        ngoldbaum
date:        2012-09-06 18:27:59
summary:     Moving _colormaps as well.
affected #:  1 file

diff -r 0a36773eb6e9511e65c772bc2621dbe9a5d19f0b -r 551e1238ab38bfd9f1951e6a3fe692cc995e5768 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -451,7 +451,6 @@
         pass
 
 class PWViewer(PlotWindow):
-    _colormaps = defaultdict(lambda: 'algae')
     """A viewer for PlotWindows.
 
     """
@@ -461,6 +460,7 @@
         self._unit = None
         self._callbacks = []
         self._field_transform = {}
+        self._colormaps = defaultdict(lambda: 'algae')
         self.setup_callbacks()
         for field in self._frb.data.keys():
             if self.pf.field_info[field].take_log:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/45f9ac33caf2/
changeset:   45f9ac33caf2
branch:      yt
user:        bcrosby
date:        2012-09-05 22:44:10
summary:     Changes to the sorting of halos in mergertree.py to improve performance.
affected #:  1 file

diff -r 2825fd89deeba490c2d5dfc1c0200ed5493f0a1f -r 45f9ac33caf22fe5caca8cd255fdeaa7a67469b9 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -450,9 +450,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,19 +460,22 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(na.ones(thisIDs.size,
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
+
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = na.array([], dtype='int32')
+                parent_masses = na.array([], dtype='int32')
+                parent_halos = na.array([], dtype='int32')
+            else:
+                parent_IDs = na.concatenate(parent_IDs)
+                parent_masses = na.concatenate(parent_masses)
+                parent_halos = na.concatenate(parent_halos)
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
@@ -482,30 +485,33 @@
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
-        for i,cname in enumerate(child_names):
+        child_IDs = []
+        child_masses = []
+        child_halos = []
+        for i,pname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
-                h5fp = h5py.File(cname)
+                h5fp = h5py.File(pname)
                 for group in h5fp:
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(na.ones(thisIDs.size,
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
-        
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
+
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = na.array([], dtype='int32')
+            child_masses = na.array([], dtype='int32')
+            child_halos = na.array([], dtype='int32')
+        else:
+            child_IDs = na.concatenate(child_IDs)
+            child_masses = na.concatenate(child_masses)
+            child_halos = na.concatenate(child_halos)
         child_send = na.ones(child_IDs.size, dtype='bool')
-        del sort
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/16388c698349/
changeset:   16388c698349
branch:      yt
user:        bcrosby
date:        2012-09-05 23:06:50
summary:     Fixing pname and cname typo. Only for consistency, code function isn't affected.
affected #:  1 file

diff -r 45f9ac33caf22fe5caca8cd255fdeaa7a67469b9 -r 16388c69834985b99e28f1b68cac859e7ad7f31c yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -488,9 +488,9 @@
         child_IDs = []
         child_masses = []
         child_halos = []
-        for i,pname in enumerate(child_names):
+        for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
-                h5fp = h5py.File(pname)
+                h5fp = h5py.File(cname)
                 for group in h5fp:
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0e857c676504/
changeset:   0e857c676504
branch:      yt
user:        bcrosby
date:        2012-09-07 00:25:40
summary:     datatypes are now consistent and mass use float rather than int
affected #:  1 file

diff -r 16388c69834985b99e28f1b68cac859e7ad7f31c -r 0e857c676504a42d04893c0812d0ffc86d1cd3c2 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -470,12 +470,12 @@
             # Sort the arrays by particle index in ascending order.
             if len(parent_IDs)==0:
                 parent_IDs = na.array([], dtype='int32')
-                parent_masses = na.array([], dtype='int32')
+                parent_masses = na.array([], dtype='float64')
                 parent_halos = na.array([], dtype='int32')
             else:
-                parent_IDs = na.concatenate(parent_IDs)
-                parent_masses = na.concatenate(parent_masses)
-                parent_halos = na.concatenate(parent_halos)
+                parent_IDs = na.concatenate(parent_IDs).astype('int32')
+                parent_masses = na.concatenate(parent_masses).astype('float64')
+                parent_halos = na.concatenate(parent_halos).astype('int32')
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
@@ -505,12 +505,12 @@
         # Sort the arrays by particle index in ascending order.
         if len(child_IDs)==0:
             child_IDs = na.array([], dtype='int32')
-            child_masses = na.array([], dtype='int32')
+            child_masses = na.array([], dtype='float64')
             child_halos = na.array([], dtype='int32')
         else:
-            child_IDs = na.concatenate(child_IDs)
-            child_masses = na.concatenate(child_masses)
-            child_halos = na.concatenate(child_halos)
+            child_IDs = na.concatenate(child_IDs).astype('int32')
+            child_masses = na.concatenate(child_masses).astype('float64')
+            child_halos = na.concatenate(child_halos).astype('int32')
         child_send = na.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/1a87d0db31fc/
changeset:   1a87d0db31fc
branch:      yt
user:        bcrosby
date:        2012-09-07 20:10:12
summary:     Switched to using extend() rather than append() to add halos and characteristics to an existing list as individual values rather than as a list
affected #:  1 file

diff -r 0e857c676504a42d04893c0812d0ffc86d1cd3c2 -r 1a87d0db31fc5d9d974d14b9f5d20e388959c5be yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -460,28 +460,32 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs.append(thisIDs)
-                        parent_masses.append(thisMasses)
-                        parent_halos.append(na.ones(thisIDs.size,
+                        parent_IDs.extend(thisIDs)
+                        parent_masses.extend(thisMasses)
+                        parent_halos.extend(na.ones(len(thisIDs),
                             dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-
             # Sort the arrays by particle index in ascending order.
             if len(parent_IDs)==0:
-                parent_IDs = na.array([], dtype='int32')
+                parent_IDs = na.array([], dtype='int64')
                 parent_masses = na.array([], dtype='float64')
                 parent_halos = na.array([], dtype='int32')
             else:
-                parent_IDs = na.concatenate(parent_IDs).astype('int32')
-                parent_masses = na.concatenate(parent_masses).astype('float64')
-                parent_halos = na.concatenate(parent_halos).astype('int32')
+                parent_IDs = na.asarray(parent_IDs).astype('int64')
+                parent_masses = na.asarray(parent_masses).astype('float64')
+                parent_halos = na.asarray(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
         parent_send = na.ones(parent_IDs.size, dtype='bool')
-        
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
@@ -495,22 +499,27 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs.append(thisIDs)
-                    child_masses.append(thisMasses)
-                    child_halos.append(na.ones(thisIDs.size,
+                    child_IDs.extend(thisIDs)
+                    child_masses.extend(thisMasses)
+                    child_halos.extend(na.ones(len(thisIDs),
                         dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
-
         # Sort the arrays by particle index in ascending order.
         if len(child_IDs)==0:
-            child_IDs = na.array([], dtype='int32')
+            child_IDs = na.array([], dtype='int64')
             child_masses = na.array([], dtype='float64')
             child_halos = na.array([], dtype='int32')
         else:
-            child_IDs = na.concatenate(child_IDs).astype('int32')
-            child_masses = na.concatenate(child_masses).astype('float64')
-            child_halos = na.concatenate(child_halos).astype('int32')
+            child_IDs = na.asarray(child_IDs).astype('int64')
+            child_masses = na.asarray(child_masses)
+            child_halos = na.asarray(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
+
         child_send = na.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/211e42b5af28/
changeset:   211e42b5af28
branch:      yt
user:        sskory
date:        2012-09-07 21:23:36
summary:     This combination of append/concatenate works for me for the merger tree.
affected #:  1 file

diff -r 1a87d0db31fc5d9d974d14b9f5d20e388959c5be -r 211e42b5af287d7e811e8b8cc08f5f5a341a7b41 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -460,9 +460,9 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs.extend(thisIDs)
-                        parent_masses.extend(thisMasses)
-                        parent_halos.extend(na.ones(len(thisIDs),
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(na.ones(len(thisIDs),
                             dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
@@ -472,9 +472,9 @@
                 parent_masses = na.array([], dtype='float64')
                 parent_halos = na.array([], dtype='int32')
             else:
-                parent_IDs = na.asarray(parent_IDs).astype('int64')
-                parent_masses = na.asarray(parent_masses).astype('float64')
-                parent_halos = na.asarray(parent_halos).astype('int32')
+                parent_IDs = na.concatenate(parent_IDs).astype('int64')
+                parent_masses = na.concatenate(parent_masses).astype('float64')
+                parent_halos = na.concatenate(parent_halos).astype('int32')
                 sort = parent_IDs.argsort()
                 parent_IDs = parent_IDs[sort]
                 parent_masses = parent_masses[sort]
@@ -499,9 +499,9 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs.extend(thisIDs)
-                    child_masses.extend(thisMasses)
-                    child_halos.extend(na.ones(len(thisIDs),
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(na.ones(len(thisIDs),
                         dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
@@ -511,9 +511,9 @@
             child_masses = na.array([], dtype='float64')
             child_halos = na.array([], dtype='int32')
         else:
-            child_IDs = na.asarray(child_IDs).astype('int64')
-            child_masses = na.asarray(child_masses)
-            child_halos = na.asarray(child_halos)
+            child_IDs = na.concatenate(child_IDs).astype('int64')
+            child_masses = na.concatenate(child_masses)
+            child_halos = na.concatenate(child_halos)
             sort = child_IDs.argsort()
             child_IDs = child_IDs[sort]
             child_masses = child_masses[sort]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/62a260cea40d/
changeset:   62a260cea40d
branch:      yt
user:        sskory
date:        2012-09-07 22:29:10
summary:     Swapping out fortran kdtree for the cython one.
affected #:  1 file

diff -r 211e42b5af287d7e811e8b8cc08f5f5a341a7b41 -r 62a260cea40df2a9c2b9784f471852bcab2ee4f7 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
             child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                query = na.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/80c20f67f390/
changeset:   80c20f67f390
branch:      yt
user:        bcrosby
date:        2012-09-08 18:37:48
summary:     Pulled in Stephen's modification. Tested against the standard halo merger tree database created for the Enzo_64 dataset and found identical results.
affected #:  1 file

diff -r 62a260cea40df2a9c2b9784f471852bcab2ee4f7 -r 80c20f67f390ea2eadf9358d7ccba79f2fabb2a6 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -506,7 +506,7 @@
             child_masses = child_masses[sort]
             child_halos = child_halos[sort]
             del sort
-
+        
         child_send = na.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5e78fb4e3812/
changeset:   5e78fb4e3812
branch:      yt
user:        sskory
date:        2012-09-08 19:19:38
summary:     Merged in bcrosby/crosby (pull request #266)
affected #:  1 file

diff -r 551e1238ab38bfd9f1951e6a3fe692cc995e5768 -r 5e78fb4e3812e186e208866d9b7300a244eb6ee5 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
             child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                query = na.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 
@@ -450,9 +437,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,31 +447,38 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(na.ones(len(thisIDs),
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = na.array([], dtype='int64')
+                parent_masses = na.array([], dtype='float64')
+                parent_halos = na.array([], dtype='int32')
+            else:
+                parent_IDs = na.concatenate(parent_IDs).astype('int64')
+                parent_masses = na.concatenate(parent_masses).astype('float64')
+                parent_halos = na.concatenate(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
         parent_send = na.ones(parent_IDs.size, dtype='bool')
-        
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
+        child_IDs = []
+        child_masses = []
+        child_halos = []
         for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
@@ -492,20 +486,28 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(na.ones(len(thisIDs),
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = na.array([], dtype='int64')
+            child_masses = na.array([], dtype='float64')
+            child_halos = na.array([], dtype='int32')
+        else:
+            child_IDs = na.concatenate(child_IDs).astype('int64')
+            child_masses = na.concatenate(child_masses)
+            child_halos = na.concatenate(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
         
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
         child_send = na.ones(child_IDs.size, dtype='bool')
-        del sort
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/67b63e227903/
changeset:   67b63e227903
branch:      yt
user:        MatthewTurk
date:        2012-09-08 22:52:31
summary:     [flash] Only update dx for those dimensions where the dx varies.
affected #:  1 file

diff -r 5e78fb4e3812e186e208866d9b7300a244eb6ee5 -r 67b63e227903587f1aae55b5c6250f081d3afc32 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -136,12 +136,12 @@
         
 
         # This is a possibly slow and verbose fix, and should be re-examined!
-        rdx = (self.parameter_file.domain_right_edge -
-                self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+        rdx = (self.parameter_file.domain_width /
+                self.parameter_file.domain_dimensions)
         nlevels = self.grid_levels.max()
-        dxs = na.zeros((nlevels+1,3),dtype='float64')
+        dxs = na.ones((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
-            dxs[i] = rdx/self.parameter_file.refine_by**i
+            dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c90637c3128c/
changeset:   c90637c3128c
branch:      yt
user:        ngoldbaum
date:        2012-09-07 21:00:01
summary:     Adding a callback that annotates a plot with the current time.
affected #:  1 file

diff -r 551e1238ab38bfd9f1951e6a3fe692cc995e5768 -r c90637c3128c0d5b6e4c846b3d5f601f5829074a yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -456,6 +456,27 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
+class TimeCallback(PlotCallback):
+    _type_name = "time"
+    def __init__(self):
+        """
+        This annotates the plot with the current simulation time.
+        For now, the time is displayed in seconds.
+        """
+        PlotCallback.__init__(self)
+    
+    def __call__(self, plot):
+        current_time = plot.pf.current_time/plot.pf['Time']
+        timestring = format(current_time,'10.7e')
+        base = timestring[:timestring.find('e')]
+        exponent = timestring[timestring.find('e')+1:]
+        if exponent[0] == '+':
+            exponent = exponent[1:]
+        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
+        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
+        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
+        plot._axes.add_artist(at)
+
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None



https://bitbucket.org/yt_analysis/yt-3.0/changeset/60555858b71a/
changeset:   60555858b71a
branch:      yt
user:        ngoldbaum
date:        2012-09-07 21:05:06
summary:     Passing in the format code as a kwarg.
affected #:  1 file

diff -r c90637c3128c0d5b6e4c846b3d5f601f5829074a -r 60555858b71a86a118fe6a31fadfcd8cc776e87b yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -458,16 +458,18 @@
 
 class TimeCallback(PlotCallback):
     _type_name = "time"
-    def __init__(self):
+    def __init__(self, format_code='10.7e'):
         """
         This annotates the plot with the current simulation time.
         For now, the time is displayed in seconds.
+        *format_code* can be optionally set, allowing a custom 
+        c-style format code for the time display.
         """
         PlotCallback.__init__(self)
     
     def __call__(self, plot):
         current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,'10.7e')
+        timestring = format(current_time,format_code)
         base = timestring[:timestring.find('e')]
         exponent = timestring[timestring.find('e')+1:]
         if exponent[0] == '+':



https://bitbucket.org/yt_analysis/yt-3.0/changeset/821990647710/
changeset:   821990647710
branch:      yt
user:        ngoldbaum
date:        2012-09-07 21:15:03
summary:     Properly handling format_code.
affected #:  1 file

diff -r 60555858b71a86a118fe6a31fadfcd8cc776e87b -r 82199064771081fe61b58de835e150916e692860 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -465,11 +465,12 @@
         *format_code* can be optionally set, allowing a custom 
         c-style format code for the time display.
         """
+        self.format_code = format_code
         PlotCallback.__init__(self)
     
     def __call__(self, plot):
         current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,format_code)
+        timestring = format(current_time,self.format_code)
         base = timestring[:timestring.find('e')]
         exponent = timestring[timestring.find('e')+1:]
         if exponent[0] == '+':



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5b0682498d83/
changeset:   5b0682498d83
branch:      yt
user:        MatthewTurk
date:        2012-09-10 17:23:49
summary:     Merged in ngoldbaum/yt-cleancopy (pull request #268)
affected #:  1 file

diff -r 5e78fb4e3812e186e208866d9b7300a244eb6ee5 -r 5b0682498d834a77ec3779928f7d2d2611554c7f yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -456,6 +456,30 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
+class TimeCallback(PlotCallback):
+    _type_name = "time"
+    def __init__(self, format_code='10.7e'):
+        """
+        This annotates the plot with the current simulation time.
+        For now, the time is displayed in seconds.
+        *format_code* can be optionally set, allowing a custom 
+        c-style format code for the time display.
+        """
+        self.format_code = format_code
+        PlotCallback.__init__(self)
+    
+    def __call__(self, plot):
+        current_time = plot.pf.current_time/plot.pf['Time']
+        timestring = format(current_time,self.format_code)
+        base = timestring[:timestring.find('e')]
+        exponent = timestring[timestring.find('e')+1:]
+        if exponent[0] == '+':
+            exponent = exponent[1:]
+        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
+        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
+        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
+        plot._axes.add_artist(at)
+
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None



https://bitbucket.org/yt_analysis/yt-3.0/changeset/971c4d171969/
changeset:   971c4d171969
branch:      yt
user:        xarthisius
date:        2012-09-05 12:48:46
summary:     Add module for Cartesian domain decomposition
affected #:  1 file

diff -r 0c844cb7df71949206ff497c57ef437d6429d58e -r 971c4d171969e388fdf4f10eb288ae9b67023e3e yt/utilities/decompose.py
--- /dev/null
+++ b/yt/utilities/decompose.py
@@ -0,0 +1,157 @@
+"""
+Automagical cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Author: Artur Gawryszczak <gawrysz at gmail.com>
+Affiliation: PCSS
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+SIEVE_PRIMES = \
+    lambda l: l and l[:1] + SIEVE_PRIMES([n for n in l if n % l[0]])
+
+
+def decompose_to_primes(max_prime):
+    """ Decompose number into the primes """
+    for prime in SIEVE_PRIMES(range(2, max_prime)):
+        if prime * prime > max_prime:
+            break
+        while max_prime % prime == 0:
+            yield prime
+            max_prime /= prime
+    if max_prime > 1:
+        yield max_prime
+
+
+def decompose_array(arr, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = arr.shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    patches = split_array(arr, psize)
+    return grid_left_edges, grid_right_edges, patches
+
+
+def evaluate_domain_decomposition(n_d, pieces, ldom):
+    """ Evaluate longest to shortest edge ratio
+        BEWARE: lot's of magic here """
+    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
+    bsize = int(np.sum(
+        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    load_balance = float(np.product(n_d)) / \
+        (float(pieces) * np.product((n_d - 1) / ldom + 1))
+
+    # 0.25 is magic number
+    quality = load_balance / (1 + 0.25 * (bsize / ideal_bsize - 1.0))
+    # \todo add a factor that estimates lower cost when x-direction is
+    # not chopped too much
+    # \deprecated estimate these magic numbers
+    quality *= (1. - (0.001 * ldom[0] + 0.0001 * ldom[1]) / pieces)
+    if np.any(ldom > n_d):
+        quality = 0
+
+    return quality
+
+
+def factorize_number(pieces):
+    """ Return array consiting of prime, its power and number of different
+        decompositions in three dimensions for this prime
+    """
+    factors = [factor for factor in decompose_to_primes(pieces)]
+    temp = np.bincount(factors)
+    return np.array(
+        [(prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) / 2)
+         for prime in np.unique(factors)]
+    )
+
+
+def get_psize(n_d, pieces):
+    """ Calculate the best division of array into px*py*pz subarrays.
+        The goal is to minimize the ratio of longest to shortest edge
+        to minimize the amount of inter-process communication.
+    """
+    fac = factorize_number(pieces)
+    print fac, pieces
+    nfactors = len(fac[:, 2])
+    best = 0.0
+    while np.all(fac[:, 2] > 0):
+        ldom = np.ones(3, dtype=np.int)
+        for nfac in range(nfactors):
+            i = int(np.sqrt(0.25 + 2 * (fac[nfac, 2] - 1)) - 0.5)
+            k = fac[nfac, 2] - int(1 + i * (i + 1) / 2)
+            i = fac[nfac, 1] - i
+            j = fac[nfac, 1] - (i + k)
+            ldom *= fac[nfac, 0] ** np.array([i, j, k])
+
+        quality = evaluate_domain_decomposition(n_d, pieces, ldom)
+        if quality > best:
+            best = quality
+            p_size = ldom
+        # search for next unique combination
+        for j in range(nfactors):
+            if fac[j, 2] > 1:
+                fac[j, 2] -= 1
+                break
+            else:
+                if (j < nfactors - 1):
+                    fac[j, 2] = int((fac[j, 1] + 1) * (fac[j, 1] + 2) / 2)
+                else:
+                    fac[:, 2] = 0  # no more combinations to try
+
+    return p_size
+
+
+def split_array(tab, psize):
+    """ Split array into px*py*pz subarrays using internal numpy routine. """
+    temp = [np.array_split(array, psize[1], axis=1)
+            for array in np.array_split(tab, psize[2], axis=2)]
+    temp = [item for sublist in temp for item in sublist]
+    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
+    temp = [item for sublist in temp for item in sublist]
+    return temp
+
+
+if __name__ == "__main__":
+
+    NPROC = 12
+    ARRAY = np.zeros((128, 128, 129))
+    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
+    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
+
+    for idx in range(NPROC):
+        print LE[idx, :], RE[idx, :], DATA[idx].shape



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f4e3776f28ab/
changeset:   f4e3776f28ab
branch:      yt
user:        xarthisius
date:        2012-09-05 12:49:41
summary:     stream::load_uniform_data can handle arrays of any size and split them into smaller chunks for parallelization
affected #:  1 file

diff -r 971c4d171969e388fdf4f10eb288ae9b67023e3e -r f4e3776f28ab22fa25c426be566555a59b57bc4d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -152,7 +154,6 @@
             self.pf.field_info.add_field(
                     field, lambda a, b: None,
                     convert_function=cf, take_log=False)
-            
 
     def _parse_hierarchy(self):
         self.grid_dimensions = self.stream_handler.dimensions
@@ -296,13 +297,13 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
-                      sim_time=0.0, number_of_particles=0):
+def load_uniform_grid(data, domain_dimensions, bbox, bbox_to_cm=1.0,
+                      nprocs=1, sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
     This should allow a uniform grid of data to be loaded directly into yt and
-    analyzed as would any others.  This comes with several caveats:
+    nalyzed as would any others.  This comes with several caveats:
         * Units will be incorrect unless the data has already been converted to
           cgs.
         * Some functions may behave oddly, and parallelism will be
@@ -313,55 +314,74 @@
     ----------
     data : dict
         This is a dict of numpy arrays, where the keys are the field names.
-    domain_dimensiosn : array_like
+    domain_dimensions : array_like
         This is the domain dimensions of the grid
-    domain_size_in_cm : float
-        The size of the domain, in centimeters
+    bbox : array_like (xdim:zdim, LE:RE)
+        Size of computational domain in centimeters
+    bbox_to_cm : float, optional
+        Conversion factor from bbox units to centimeters
+    nprocs: integer, optional
+        If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
     number_of_particles : int, optional
         If particle fields are included, set this to the number of particles
-        
+
     Examples
     --------
 
-    >>> arr = na.random.random((256, 256, 256))
+    >>> arr = na.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
-                
+    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> pf = load_uniform_grid(data, arr.shape, bbox, nprocs=12)
+
     """
+
+    domain_dimensions = na.array(domain_dimensions)
+    domain_left_edge = na.array(bbox[:, 0], 'float64')
+    domain_right_edge = na.array(bbox[:, 1], 'float64')
+    grid_levels = na.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
     sfh = StreamDictFieldHandler()
-    sfh.update({0:data})
-    domain_dimensions = na.array(domain_dimensions)
-    if na.unique(domain_dimensions).size != 1:
-        print "We don't support variably sized domains yet."
-        raise RuntimeError
-    domain_left_edge = na.zeros(3, 'float64')
-    domain_right_edge = na.ones(3, 'float64')
-    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
-    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
 
-    grid_levels = na.array([0], dtype='int32').reshape((1,1))
+    if nprocs > 1:
+        temp = {}
+        new_data = {}
+        for key in data.keys():
+            psize = get_psize(na.array(data[key].shape), nprocs)
+            grid_left_edges, grid_right_edges, temp[key] = \
+                decompose_array(data[key], psize, bbox)
+        for gid in range(nprocs):
+            new_data[gid] = {}
+            for key in temp.keys():
+                new_data[gid].update({key:temp[key][gid]})
+        sfh.update(new_data)
+        del new_data, temp
+    else:
+        sfh.update({0:data})
+        grid_left_edges = na.zeros(3, "int64").reshape((1,3))
+        grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
+
+        grid_left_edges  = grid_left_edges.astype("float64")
+        grid_left_edges /= domain_dimensions*2**grid_levels
+        grid_left_edges *= domain_right_edge - domain_left_edge
+        grid_left_edges += domain_left_edge
+
+        grid_right_edges  = grid_right_edges.astype("float64")
+        grid_right_edges /= domain_dimensions*2**grid_levels
+        grid_right_edges *= domain_right_edge - domain_left_edge
+        grid_right_edges += domain_left_edge
+
     grid_dimensions = grid_right_edges - grid_left_edges
 
-    grid_left_edges  = grid_left_edges.astype("float64")
-    grid_left_edges /= domain_dimensions*2**grid_levels
-    grid_left_edges *= domain_right_edge - domain_left_edge
-    grid_left_edges += domain_left_edge
-
-    grid_right_edges  = grid_right_edges.astype("float64")
-    grid_right_edges /= domain_dimensions*2**grid_levels
-    grid_right_edges *= domain_right_edge - domain_left_edge
-    grid_right_edges += domain_left_edge
-
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
         na.array([-1], dtype='int64'),
-        number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
-        na.zeros(1).reshape((1,1)),
+        number_of_particles*na.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        na.zeros(nprocs).reshape((nprocs,1)),
         sfh,
     )
 
@@ -375,10 +395,10 @@
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)
-    spf.units["cm"] = domain_size_in_cm
+    spf.units["cm"] = bbox_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
-    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    box_in_mpc = bbox_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d00b3ecbda4f/
changeset:   d00b3ecbda4f
branch:      yt
user:        xarthisius
date:        2012-09-05 13:20:42
summary:     Fix grid_dimensions and parent_ids
affected #:  1 file

diff -r f4e3776f28ab22fa25c426be566555a59b57bc4d -r d00b3ecbda4f13448a7a729e816451771c391429 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -332,7 +332,7 @@
 
     >>> arr = na.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> bbox = na.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
     >>> pf = load_uniform_grid(data, arr.shape, bbox, nprocs=12)
 
     """
@@ -351,6 +351,7 @@
             psize = get_psize(na.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
                 decompose_array(data[key], psize, bbox)
+            grid_dimensions = na.array([grid.shape for grid in temp[key]])
         for gid in range(nprocs):
             new_data[gid] = {}
             for key in temp.keys():
@@ -371,15 +372,15 @@
         grid_right_edges /= domain_dimensions*2**grid_levels
         grid_right_edges *= domain_right_edge - domain_left_edge
         grid_right_edges += domain_left_edge
+        grid_dimensions = grid_right_edges - grid_left_edges
 
-    grid_dimensions = grid_right_edges - grid_left_edges
 
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        na.array([-1], dtype='int64'),
+        -na.ones(nprocs, dtype='int64'),
         number_of_particles*na.ones(nprocs, dtype='int64').reshape(nprocs,1),
         na.zeros(nprocs).reshape((nprocs,1)),
         sfh,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a8600ea68f49/
changeset:   a8600ea68f49
branch:      yt
user:        xarthisius
date:        2012-09-05 14:28:01
summary:     [gdf:writer] grid_parent_id should be array, even if filled with dummy number for now
affected #:  1 file

diff -r d00b3ecbda4f13448a7a729e816451771c391429 -r a8600ea68f497baf16074f05caaea4bff11f8969 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -139,7 +139,7 @@
     f["grid_left_index"] = pf.h.grid_left_edge
     f["grid_level"] = pf.h.grid_levels
     # @todo: Do we need to loop over the grids for this?
-    f["grid_parent_id"] = -1
+    f["grid_parent_id"] = -np.ones(pf.h.grid_dimensions.shape[0])
     f["grid_particle_count"] = pf.h.grid_particle_count
 
     ###



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2cb1493b5aa4/
changeset:   2cb1493b5aa4
branch:      yt
user:        xarthisius
date:        2012-09-05 15:03:46
summary:     remove debug print
affected #:  1 file

diff -r a8600ea68f497baf16074f05caaea4bff11f8969 -r 2cb1493b5aa44adf5e69f9cdb48b607183558f89 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -104,7 +104,6 @@
         to minimize the amount of inter-process communication.
     """
     fac = factorize_number(pieces)
-    print fac, pieces
     nfactors = len(fac[:, 2])
     best = 0.0
     while np.all(fac[:, 2] > 0):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/40139dff7b92/
changeset:   40139dff7b92
branch:      yt
user:        xarthisius
date:        2012-09-05 15:05:50
summary:     [gdf:writer] add dummy variables to field required by yt reader, fix grid_left_index that was erroneously assigned with grid_left_edge
affected #:  1 file

diff -r 2cb1493b5aa44adf5e69f9cdb48b607183558f89 -r 40139dff7b92fa81cb7fce008618810395f8c0cf yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -83,11 +83,11 @@
     g.attrs["unique_identifier"] = pf.unique_identifier
     g.attrs["cosmological_simulation"] = pf.cosmological_simulation
     # @todo: Where is this in the yt API?
-    #g.attrs["num_ghost_zones"] = pf...
+    g.attrs["num_ghost_zones"] = 0
     # @todo: Where is this in the yt API?
-    #g.attrs["field_ordering"] = pf...
+    g.attrs["field_ordering"] = 0
     # @todo: not yet supported by yt.
-    #g.attrs["boundary_conditions"] = pf...
+    g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')
 
     if pf.cosmological_simulation:
         g.attrs["current_redshift"] = pf.current_redshift
@@ -136,9 +136,11 @@
     # root datasets -- info about the grids
     ###
     f["grid_dimensions"] = pf.h.grid_dimensions
-    f["grid_left_index"] = pf.h.grid_left_edge
+    f["grid_left_index"] = np.array(
+            [g.get_global_startindex() for g in pf.h.grids]
+    ).reshape(pf.h.grid_dimensions.shape[0], 3)
     f["grid_level"] = pf.h.grid_levels
-    # @todo: Do we need to loop over the grids for this?
+    # @todo: Fill with proper values
     f["grid_parent_id"] = -np.ones(pf.h.grid_dimensions.shape[0])
     f["grid_particle_count"] = pf.h.grid_particle_count
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/cdc68c51044c/
changeset:   cdc68c51044c
branch:      yt
user:        xarthisius
date:        2012-09-05 15:07:54
summary:     [stream:load_uniform_grid] simplify and retain original API with small change: use unit conversion factor rather than demand domain size in cm
affected #:  1 file

diff -r 40139dff7b92fa81cb7fce008618810395f8c0cf -r cdc68c51044c73a9f681a2d2b4c90fba21536d1a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -297,13 +297,13 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, bbox, bbox_to_cm=1.0,
+def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
                       nprocs=1, sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
     This should allow a uniform grid of data to be loaded directly into yt and
-    nalyzed as would any others.  This comes with several caveats:
+    analyzed as would any others.  This comes with several caveats:
         * Units will be incorrect unless the data has already been converted to
           cgs.
         * Some functions may behave oddly, and parallelism will be
@@ -316,10 +316,11 @@
         This is a dict of numpy arrays, where the keys are the field names.
     domain_dimensions : array_like
         This is the domain dimensions of the grid
-    bbox : array_like (xdim:zdim, LE:RE)
-        Size of computational domain in centimeters
-    bbox_to_cm : float, optional
-        Conversion factor from bbox units to centimeters
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm, if the latter
+        is not provided centimeters are assumed
     nprocs: integer, optional
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
@@ -333,11 +334,13 @@
     >>> arr = na.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
     >>> bbox = na.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
-    >>> pf = load_uniform_grid(data, arr.shape, bbox, nprocs=12)
+    >>> pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
 
     """
 
     domain_dimensions = na.array(domain_dimensions)
+    if bbox is None:
+        bbox = na.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
     domain_left_edge = na.array(bbox[:, 0], 'float64')
     domain_right_edge = na.array(bbox[:, 1], 'float64')
     grid_levels = na.zeros(nprocs, dtype='int32').reshape((nprocs,1))
@@ -360,20 +363,9 @@
         del new_data, temp
     else:
         sfh.update({0:data})
-        grid_left_edges = na.zeros(3, "int64").reshape((1,3))
-        grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
-
-        grid_left_edges  = grid_left_edges.astype("float64")
-        grid_left_edges /= domain_dimensions*2**grid_levels
-        grid_left_edges *= domain_right_edge - domain_left_edge
-        grid_left_edges += domain_left_edge
-
-        grid_right_edges  = grid_right_edges.astype("float64")
-        grid_right_edges /= domain_dimensions*2**grid_levels
-        grid_right_edges *= domain_right_edge - domain_left_edge
-        grid_right_edges += domain_left_edge
-        grid_dimensions = grid_right_edges - grid_left_edges
-
+        grid_left_edges = domain_left_edge
+        grid_right_edges = domain_right_edge
+        grid_dimensions = domain_dimensions.reshape(nprocs,3)
 
     handler = StreamHandler(
         grid_left_edges,
@@ -396,10 +388,10 @@
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)
-    spf.units["cm"] = bbox_to_cm
+    spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
-    box_in_mpc = bbox_to_cm / mpc_conversion['cm']
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf



https://bitbucket.org/yt_analysis/yt-3.0/changeset/387113039a96/
changeset:   387113039a96
branch:      yt
user:        xarthisius
date:        2012-09-05 15:17:14
summary:     update docstring
affected #:  1 file

diff -r cdc68c51044c73a9f681a2d2b4c90fba21536d1a -r 387113039a96cb27f8081240801d2de161be04a0 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -319,8 +319,7 @@
     sim_unit_to_cm : float
         Conversion factor from simulation units to centimeters
     bbox : array_like (xdim:zdim, LE:RE), optional
-        Size of computational domain in units sim_unit_to_cm, if the latter
-        is not provided centimeters are assumed
+        Size of computational domain in units sim_unit_to_cm
     nprocs: integer, optional
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional



https://bitbucket.org/yt_analysis/yt-3.0/changeset/64c090a6d57f/
changeset:   64c090a6d57f
branch:      yt
user:        MatthewTurk
date:        2012-09-10 17:24:04
summary:     Merged in xarthisius/yt (pull request #263)
affected #:  3 files

diff -r 5b0682498d834a77ec3779928f7d2d2611554c7f -r 64c090a6d57f30cd7dfa87fe242bb7128f4f791a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -152,7 +154,6 @@
             self.pf.field_info.add_field(
                     field, lambda a, b: None,
                     convert_function=cf, take_log=False)
-            
 
     def _parse_hierarchy(self):
         self.grid_dimensions = self.stream_handler.dimensions
@@ -296,8 +297,8 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
-                      sim_time=0.0, number_of_particles=0):
+def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                      nprocs=1, sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -313,55 +314,66 @@
     ----------
     data : dict
         This is a dict of numpy arrays, where the keys are the field names.
-    domain_dimensiosn : array_like
+    domain_dimensions : array_like
         This is the domain dimensions of the grid
-    domain_size_in_cm : float
-        The size of the domain, in centimeters
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    nprocs: integer, optional
+        If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
     number_of_particles : int, optional
         If particle fields are included, set this to the number of particles
-        
+
     Examples
     --------
 
-    >>> arr = na.random.random((256, 256, 256))
+    >>> arr = na.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
-                
+    >>> bbox = na.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
     """
+
+    domain_dimensions = na.array(domain_dimensions)
+    if bbox is None:
+        bbox = na.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = na.array(bbox[:, 0], 'float64')
+    domain_right_edge = na.array(bbox[:, 1], 'float64')
+    grid_levels = na.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
     sfh = StreamDictFieldHandler()
-    sfh.update({0:data})
-    domain_dimensions = na.array(domain_dimensions)
-    if na.unique(domain_dimensions).size != 1:
-        print "We don't support variably sized domains yet."
-        raise RuntimeError
-    domain_left_edge = na.zeros(3, 'float64')
-    domain_right_edge = na.ones(3, 'float64')
-    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
-    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
 
-    grid_levels = na.array([0], dtype='int32').reshape((1,1))
-    grid_dimensions = grid_right_edges - grid_left_edges
-
-    grid_left_edges  = grid_left_edges.astype("float64")
-    grid_left_edges /= domain_dimensions*2**grid_levels
-    grid_left_edges *= domain_right_edge - domain_left_edge
-    grid_left_edges += domain_left_edge
-
-    grid_right_edges  = grid_right_edges.astype("float64")
-    grid_right_edges /= domain_dimensions*2**grid_levels
-    grid_right_edges *= domain_right_edge - domain_left_edge
-    grid_right_edges += domain_left_edge
+    if nprocs > 1:
+        temp = {}
+        new_data = {}
+        for key in data.keys():
+            psize = get_psize(na.array(data[key].shape), nprocs)
+            grid_left_edges, grid_right_edges, temp[key] = \
+                decompose_array(data[key], psize, bbox)
+            grid_dimensions = na.array([grid.shape for grid in temp[key]])
+        for gid in range(nprocs):
+            new_data[gid] = {}
+            for key in temp.keys():
+                new_data[gid].update({key:temp[key][gid]})
+        sfh.update(new_data)
+        del new_data, temp
+    else:
+        sfh.update({0:data})
+        grid_left_edges = domain_left_edge
+        grid_right_edges = domain_right_edge
+        grid_dimensions = domain_dimensions.reshape(nprocs,3)
 
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        na.array([-1], dtype='int64'),
-        number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
-        na.zeros(1).reshape((1,1)),
+        -na.ones(nprocs, dtype='int64'),
+        number_of_particles*na.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        na.zeros(nprocs).reshape((nprocs,1)),
         sfh,
     )
 
@@ -375,10 +387,10 @@
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)
-    spf.units["cm"] = domain_size_in_cm
+    spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
-    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf


diff -r 5b0682498d834a77ec3779928f7d2d2611554c7f -r 64c090a6d57f30cd7dfa87fe242bb7128f4f791a yt/utilities/decompose.py
--- /dev/null
+++ b/yt/utilities/decompose.py
@@ -0,0 +1,156 @@
+"""
+Automagical cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Author: Artur Gawryszczak <gawrysz at gmail.com>
+Affiliation: PCSS
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+SIEVE_PRIMES = \
+    lambda l: l and l[:1] + SIEVE_PRIMES([n for n in l if n % l[0]])
+
+
+def decompose_to_primes(max_prime):
+    """ Decompose number into the primes """
+    for prime in SIEVE_PRIMES(range(2, max_prime)):
+        if prime * prime > max_prime:
+            break
+        while max_prime % prime == 0:
+            yield prime
+            max_prime /= prime
+    if max_prime > 1:
+        yield max_prime
+
+
+def decompose_array(arr, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = arr.shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    patches = split_array(arr, psize)
+    return grid_left_edges, grid_right_edges, patches
+
+
+def evaluate_domain_decomposition(n_d, pieces, ldom):
+    """ Evaluate longest to shortest edge ratio
+        BEWARE: lot's of magic here """
+    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
+    bsize = int(np.sum(
+        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    load_balance = float(np.product(n_d)) / \
+        (float(pieces) * np.product((n_d - 1) / ldom + 1))
+
+    # 0.25 is magic number
+    quality = load_balance / (1 + 0.25 * (bsize / ideal_bsize - 1.0))
+    # \todo add a factor that estimates lower cost when x-direction is
+    # not chopped too much
+    # \deprecated estimate these magic numbers
+    quality *= (1. - (0.001 * ldom[0] + 0.0001 * ldom[1]) / pieces)
+    if np.any(ldom > n_d):
+        quality = 0
+
+    return quality
+
+
+def factorize_number(pieces):
+    """ Return array consiting of prime, its power and number of different
+        decompositions in three dimensions for this prime
+    """
+    factors = [factor for factor in decompose_to_primes(pieces)]
+    temp = np.bincount(factors)
+    return np.array(
+        [(prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) / 2)
+         for prime in np.unique(factors)]
+    )
+
+
+def get_psize(n_d, pieces):
+    """ Calculate the best division of array into px*py*pz subarrays.
+        The goal is to minimize the ratio of longest to shortest edge
+        to minimize the amount of inter-process communication.
+    """
+    fac = factorize_number(pieces)
+    nfactors = len(fac[:, 2])
+    best = 0.0
+    while np.all(fac[:, 2] > 0):
+        ldom = np.ones(3, dtype=np.int)
+        for nfac in range(nfactors):
+            i = int(np.sqrt(0.25 + 2 * (fac[nfac, 2] - 1)) - 0.5)
+            k = fac[nfac, 2] - int(1 + i * (i + 1) / 2)
+            i = fac[nfac, 1] - i
+            j = fac[nfac, 1] - (i + k)
+            ldom *= fac[nfac, 0] ** np.array([i, j, k])
+
+        quality = evaluate_domain_decomposition(n_d, pieces, ldom)
+        if quality > best:
+            best = quality
+            p_size = ldom
+        # search for next unique combination
+        for j in range(nfactors):
+            if fac[j, 2] > 1:
+                fac[j, 2] -= 1
+                break
+            else:
+                if (j < nfactors - 1):
+                    fac[j, 2] = int((fac[j, 1] + 1) * (fac[j, 1] + 2) / 2)
+                else:
+                    fac[:, 2] = 0  # no more combinations to try
+
+    return p_size
+
+
+def split_array(tab, psize):
+    """ Split array into px*py*pz subarrays using internal numpy routine. """
+    temp = [np.array_split(array, psize[1], axis=1)
+            for array in np.array_split(tab, psize[2], axis=2)]
+    temp = [item for sublist in temp for item in sublist]
+    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
+    temp = [item for sublist in temp for item in sublist]
+    return temp
+
+
+if __name__ == "__main__":
+
+    NPROC = 12
+    ARRAY = np.zeros((128, 128, 129))
+    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
+    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
+
+    for idx in range(NPROC):
+        print LE[idx, :], RE[idx, :], DATA[idx].shape


diff -r 5b0682498d834a77ec3779928f7d2d2611554c7f -r 64c090a6d57f30cd7dfa87fe242bb7128f4f791a yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -83,11 +83,11 @@
     g.attrs["unique_identifier"] = pf.unique_identifier
     g.attrs["cosmological_simulation"] = pf.cosmological_simulation
     # @todo: Where is this in the yt API?
-    #g.attrs["num_ghost_zones"] = pf...
+    g.attrs["num_ghost_zones"] = 0
     # @todo: Where is this in the yt API?
-    #g.attrs["field_ordering"] = pf...
+    g.attrs["field_ordering"] = 0
     # @todo: not yet supported by yt.
-    #g.attrs["boundary_conditions"] = pf...
+    g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')
 
     if pf.cosmological_simulation:
         g.attrs["current_redshift"] = pf.current_redshift
@@ -136,10 +136,12 @@
     # root datasets -- info about the grids
     ###
     f["grid_dimensions"] = pf.h.grid_dimensions
-    f["grid_left_index"] = pf.h.grid_left_edge
+    f["grid_left_index"] = np.array(
+            [g.get_global_startindex() for g in pf.h.grids]
+    ).reshape(pf.h.grid_dimensions.shape[0], 3)
     f["grid_level"] = pf.h.grid_levels
-    # @todo: Do we need to loop over the grids for this?
-    f["grid_parent_id"] = -1
+    # @todo: Fill with proper values
+    f["grid_parent_id"] = -np.ones(pf.h.grid_dimensions.shape[0])
     f["grid_particle_count"] = pf.h.grid_particle_count
 
     ###



https://bitbucket.org/yt_analysis/yt-3.0/changeset/f683d37da087/
changeset:   f683d37da087
branch:      yt
user:        MatthewTurk
date:        2012-09-10 17:37:59
summary:     Removing the old, unused EnzoTable stuff from Enzo outputs and fix detection
and setup of 1D and 2D enzo fields.  This fixes the problem Sam identified with
DivV/z-velocity not working in 2D fields.
affected #:  2 files

diff -r 64c090a6d57f30cd7dfa87fe242bb7128f4f791a -r f683d37da0870bbf34d450c338c5cab049b7b7d3 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -702,23 +702,6 @@
         StaticOutput.__init__(self, filename, data_style, file_style=file_style)
         if "InitialTime" not in self.parameters:
             self.current_time = 0.0
-        rp = os.path.join(self.directory, "rates.out")
-        if os.path.exists(rp):
-            try:
-                self.rates = EnzoTable(rp, rates_out_key)
-            except:
-                pass
-        cp = os.path.join(self.directory, "cool_rates.out")
-        if os.path.exists(cp):
-            try:
-                self.cool = EnzoTable(cp, cool_out_key)
-            except:
-                pass
-
-        # Now fixes for different types of Hierarchies
-        # This includes changing the fieldinfo class!
-        if self["TopGridRank"] == 1: self._setup_1d()
-        elif self["TopGridRank"] == 2: self._setup_2d()
 
     def _setup_1d(self):
         self._hierarchy_class = EnzoHierarchy1D
@@ -732,9 +715,9 @@
         self._hierarchy_class = EnzoHierarchy2D
         self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([self["DomainLeftEdge"], [0.0]])
+            na.concatenate([self.domain_left_edge, [0.0]])
         self.domain_right_edge = \
-            na.concatenate([self["DomainRightEdge"], [1.0]])
+            na.concatenate([self.domain_right_edge, [1.0]])
 
     def get_parameter(self,parameter,type=None):
         """
@@ -870,6 +853,11 @@
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
+        if self.dimensionality == 1:
+            self._setup_1d()
+        elif self.dimensionality == 2:
+            self._setup_2d()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file


diff -r 64c090a6d57f30cd7dfa87fe242bb7128f4f791a -r f683d37da0870bbf34d450c338c5cab049b7b7d3 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -610,7 +610,6 @@
                     dtype='float64')
 add_enzo_2d_field("z-velocity", function=_zvel)
 
-
 #
 # Now we do overrides for 1D fields
 #



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b45aa6c3c142/
changeset:   b45aa6c3c142
branch:      yt
user:        samskillman
date:        2012-09-10 17:46:37
summary:     Merged in MatthewTurk/yt (pull request #269)
affected #:  1 file

diff -r f683d37da0870bbf34d450c338c5cab049b7b7d3 -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -136,12 +136,12 @@
         
 
         # This is a possibly slow and verbose fix, and should be re-examined!
-        rdx = (self.parameter_file.domain_right_edge -
-                self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+        rdx = (self.parameter_file.domain_width /
+                self.parameter_file.domain_dimensions)
         nlevels = self.grid_levels.max()
-        dxs = na.zeros((nlevels+1,3),dtype='float64')
+        dxs = na.ones((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
-            dxs[i] = rdx/self.parameter_file.refine_by**i
+            dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e6986d875c43/
changeset:   e6986d875c43
branch:      yt
user:        MatthewTurk
date:        2012-09-10 19:46:03
summary:     Merging na/np switchover from Anthony.
affected #:  112 files

diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def voigt(a,u):
     """
@@ -65,15 +65,15 @@
             J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
                       Sep 1990 (better overflow checking)
     """
-    x = na.asarray(u).astype(na.float64)
-    y = na.asarray(a).astype(na.float64)
+    x = np.asarray(u).astype(np.float64)
+    y = np.asarray(a).astype(np.float64)
 
-    w = na.array([0.462243670,   0.286675505,   0.109017206, 
+    w = np.array([0.462243670,   0.286675505,   0.109017206, 
                   0.0248105209,  0.00324377334, 0.000228338636, 
                   7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
                   2.22939365e-13])
 
-    t = na.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
+    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
                   2.25497400,  2.78880606,  3.34785457, 3.94476404, 
                   4.60368245,  5.38748089])
 
@@ -94,31 +94,31 @@
     y2 = y * y
 
     # limits are y<1.,  x<4 or y<1.8(x+1),  x>4 (no checking performed)
-    u1 = na.exp(-x * x + y2) * na.cos(2. * x * y)
+    u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
 
     # Clenshaw's Algorithm
-    bno1 = na.zeros(x.shape)
-    bno2 = na.zeros(x.shape)
-    x1 = na.clip((x / 5.), -na.inf, 1.)
+    bno1 = np.zeros(x.shape)
+    bno2 = np.zeros(x.shape)
+    x1 = np.clip((x / 5.), -np.inf, 1.)
     coef = 4. * x1 * x1 - 2.
     for i in range(33, -1, -1):
         bn = coef * bno1 - bno2 + c[i]
-        bno2 = na.copy(bno1)
-        bno1 = na.copy(bn)
+        bno2 = np.copy(bno1)
+        bno1 = np.copy(bn)
 
     f = x1 * (bn - bno2)
     dno1 = 1. - 2. * x * f
     dno2 = f
 
-    q = na.abs(x) > 5
+    q = np.abs(x) > 5
     if q.any():
-        x14 = na.power(na.clip(x[q], -na.inf, 500.),  14)
-        x12 = na.power(na.clip(x[q], -na.inf, 1000.), 12)
-        x10 = na.power(na.clip(x[q], -na.inf, 5000.), 10)
-        x8  = na.power(na.clip(x[q], -na.inf, 50000.), 8)
-        x6  = na.power(na.clip(x[q], -na.inf, 1.e6),   6)
-        x4  = na.power(na.clip(x[q], -na.inf, 1.e9),   4)
-        x2  = na.power(na.clip(x[q], -na.inf, 1.e18),  2)
+        x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
+        x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
+        x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
+        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
         dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
@@ -135,12 +135,12 @@
             if (i % 2) == 1:
                 q = -q
                 yn = yn * y2
-                g = dn.astype(na.float64) * yn
+                g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if na.max(na.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8: break
 
     k1 = u1 - 1.12837917 * funct
-    k1 = k1.astype(na.float64).clip(0)
+    k1 = k1.astype(np.float64).clip(0)
     return k1
 
 def tau_profile(lam0, fval, gamma, vkms, column_density, 
@@ -191,19 +191,19 @@
     ## create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
-            na.arange(n_lambda, dtype=na.float) * dlambda - \
+            np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2    # wavelength vector (angstroms)
     nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
 
     ## tau_0
-    tau_X = na.sqrt(na.pi) * e**2 / (me * ccgs) * \
+    tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
     tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq
     x = (nua - nu1) / nudop
-    a = gamma / (4 * na.pi * nudop)   # damping parameter 
+    a = gamma / (4 * np.pi * nudop)   # damping parameter 
     phi = voigt(a, x)                 # profile
     tauphi = tau0 * phi               # profile scaled with tau0
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from absorption_line import tau_profile
 
@@ -48,7 +48,7 @@
         self.tau_field = None
         self.flux_field = None
         self.spectrum_line_list = None
-        self.lambda_bins = na.linspace(lambda_min, lambda_max, n_lambda)
+        self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
         self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
         self.line_list = []
         self.continuum_list = []
@@ -114,13 +114,13 @@
             field_data[field] = input[field].value
         input.close()
 
-        self.tau_field = na.zeros(self.lambda_bins.size)
+        self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
-        self.flux_field = na.exp(-self.tau_field)
+        self.flux_field = np.exp(-self.tau_field)
 
         if output_file.endswith('.h5'):
             self._write_spectrum_hdf5(output_file)
@@ -148,20 +148,20 @@
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = na.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
-            left_index = na.digitize((this_wavelength *
-                                     na.power((tau_min * continuum['normalization'] /
+            right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
+            left_index = np.digitize((this_wavelength *
+                                     np.power((tau_min * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
-            valid_continuua = na.where(((column_density /
+            valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > tau_min) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
-                line_tau = na.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
+                line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
                                      this_wavelength[lixel]), continuum['index']) * \
                                      column_density[lixel] / continuum['normalization']
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
@@ -184,10 +184,10 @@
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
-            thermal_b = km_per_cm * na.sqrt((2 * boltzmann_constant_cgs *
+            thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['Temperature']) /
                                             (amu_cgs * line['atomic_mass']))
-            center_bins = na.digitize((delta_lambda + line['wavelength']),
+            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                       self.lambda_bins)
 
             # ratio of line width to bin width
@@ -201,7 +201,7 @@
                            spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
 
             # loop over all lines wider than the bin width
-            valid_lines = na.where((width_ratio >= 1.0) &
+            valid_lines = np.where((width_ratio >= 1.0) &
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 
 from yt.utilities.linear_interpolators import \
@@ -44,13 +44,13 @@
     mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
     if center is None: center = pf.h.find_max("Density")[1]
     fields = ensure_list(fields)
-    r,theta,phi = na.mgrid[0:rmax:nr*1j,
-                           0:na.pi:ntheta*1j,
-                           0:2*na.pi:nphi*1j]
+    r,theta,phi = np.mgrid[0:rmax:nr*1j,
+                           0:np.pi:ntheta*1j,
+                           0:2*np.pi:nphi*1j]
     new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*na.sin(theta)*na.cos(phi) + center[0]
-    new_grid['y'] = r*na.sin(theta)*na.sin(phi) + center[1]
-    new_grid['z'] = r*na.cos(theta)             + center[2]
+    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
+    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
+    new_grid['z'] = r*np.cos(theta)             + center[2]
     sphere = pf.h.sphere(center, rmax)
     return arbitrary_regrid(new_grid, sphere, fields, smoothed)
 
@@ -62,10 +62,10 @@
     This has not been well-tested other than for regular spherical regridding.
     """
     fields = ensure_list(fields)
-    new_grid['handled'] = na.zeros(new_grid['x'].shape, dtype='bool')
+    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
-        new_grid[field] = na.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = na.argsort(data_source.gridLevels)
+        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
+    grid_order = np.argsort(data_source.gridLevels)
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):
@@ -73,12 +73,12 @@
         cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
 
         # makes x0,x1,y0,y1,z0,z1
-        bounds = na.concatenate(zip(cg.left_edge, cg.right_edge)) 
+        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
 
         
         # Now we figure out which of our points are inside this grid
         # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = na.ones(new_grid['handled'].shape, dtype='bool') # everything at first
+        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
         for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
             # &= does a logical_and on the array
             point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
@@ -116,7 +116,7 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  na.log10(sph_grid[field][:,i,:]))
+                  np.log10(sph_grid[field][:,i,:]))
     pylab.savefig("polar/latitude_%03i.png" % i)
 
 for i in range(n_phi):
@@ -124,6 +124,6 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  na.log10(sph_grid[field][:,:,i]))
+                  np.log10(sph_grid[field][:,:,i]))
     pylab.savefig("polar/longitude_%03i.png" % i)
 """


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.convenience import \
     simulation
@@ -132,12 +132,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (na.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.fabs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:na.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -146,7 +146,7 @@
                     current_slice = cosmology_splice[-1]
                     while current_slice['next'] is not None and \
                             (z < current_slice['next']['redshift'] or \
-                                 na.abs(z - current_slice['next']['redshift']) <
+                                 np.abs(z - current_slice['next']['redshift']) <
                                  z_Tolerance):
                         current_slice = current_slice['next']
 
@@ -164,7 +164,7 @@
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:na.fabs(far_redshift -
+            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
                                                                     obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
@@ -246,9 +246,9 @@
         outputs = []
 
         while z > near_redshift:
-            rounded = na.round(z, decimals=decimals)
+            rounded = np.round(z, decimals=decimals)
             if rounded - z < 0:
-                rounded += na.power(10.0, (-1.0*decimals))
+                rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
@@ -289,7 +289,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -299,9 +299,9 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
-            output['deltazMax'] = na.fabs(z2 - z)
+            output['deltazMax'] = np.fabs(z2 - z)
 
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
@@ -329,7 +329,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -339,10 +339,10 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(na.fabs(z2 - z), deltaz_min)
+            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -364,7 +364,7 @@
         distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
           self.cosmology.HubbleConstantNow / 100.0
 
-        while ((na.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
@@ -374,6 +374,6 @@
             iteration += 1
             if (iteration > max_Iterations):
                 mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, na.fabs(z2 - z)))
+                            (z, np.fabs(z2 - z)))
                 break
-        return na.fabs(z2 - z)
+        return np.fabs(z2 - z)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
@@ -24,25 +24,25 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def common_volume(n_cube_1, n_cube_2, periodic=None):
     "Return the n-volume in common between the two n-cubes."
 
     # Check for proper args.
-    if ((len(na.shape(n_cube_1)) != 2) or
-        (na.shape(n_cube_1)[1] != 2) or
-        (na.shape(n_cube_1) != na.shape(n_cube_2))):
+    if ((len(np.shape(n_cube_1)) != 2) or
+        (np.shape(n_cube_1)[1] != 2) or
+        (np.shape(n_cube_1) != np.shape(n_cube_2))):
         print "Arguments must be 2 (n, 2) numpy array."
         return 0
 
     if ((periodic is not None) and
-        (na.shape(n_cube_1) != na.shape(periodic))):
+        (np.shape(n_cube_1) != np.shape(periodic))):
         print "periodic argument must be (n, 2) numpy array."
         return 0
 
     nCommon = 1.0
-    for q in range(na.shape(n_cube_1)[0]):
+    for q in range(np.shape(n_cube_1)[0]):
         if (periodic is None):
             nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
         else:
@@ -97,10 +97,10 @@
             return min(flen1, flen2)
 
         # Adjust for periodicity
-        seg1[0] = na.mod(seg1[0], scale) + periodic[0]
+        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
         seg1[1] = seg1[0] + len1
         if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = na.mod(seg2[0], scale) + periodic[0]
+        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
         seg2[1] = seg2[0] + len2
         if (seg2[1] > periodic[1]): seg2[1] -= scale
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.analysis_modules.halo_profiler.api import \
@@ -77,7 +77,7 @@
 
     # Write out cube of masks from each slice.
     if cube_file is not None:
-        _write_halo_mask(cube_file, na.array(light_cone_mask))
+        _write_halo_mask(cube_file, np.array(light_cone_mask))
 
     # Write out a text list of all halos in the image.
     if map_file is not None:
@@ -86,7 +86,7 @@
     # Write out final mask.
     if mask_file is not None:
         # Final mask is simply the product of the mask from each slice.
-        final_mask = na.ones(shape=(pixels, pixels))
+        final_mask = np.ones(shape=(pixels, pixels))
         for mask in light_cone_mask:
             final_mask *= mask
         _write_halo_mask(mask_file, final_mask)
@@ -103,7 +103,7 @@
     output = h5py.File(filename, 'a')
     if 'HaloMask' in output.keys():
         del output['HaloMask']
-    output.create_dataset('HaloMask', data=na.array(halo_mask))
+    output.create_dataset('HaloMask', data=np.array(halo_mask))
     output.close()
 
 @parallel_root_only
@@ -155,21 +155,21 @@
     # Make boolean mask and cut out halos.
     dx = slice['box_width_fraction'] / pixels
     x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = na.ones(shape=(pixels, pixels), dtype=bool)
+    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
 
     # Cut out any pixel that has any part at all in the circle.
     for q in range(len(all_halo_radius)):
-        dif_xIndex = na.array(int(all_halo_x[q]/dx) -
-                              na.array(range(pixels))) != 0
-        dif_yIndex = na.array(int(all_halo_y[q]/dx) -
-                              na.array(range(pixels))) != 0
+        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
+                              np.array(range(pixels))) != 0
+        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
+                              np.array(range(pixels))) != 0
 
-        xDistance = (na.abs(x - all_halo_x[q]) -
+        xDistance = (np.abs(x - all_halo_x[q]) -
                      (0.5 * dx)) * dif_xIndex
-        yDistance = (na.abs(x - all_halo_y[q]) -
+        yDistance = (np.abs(x - all_halo_y[q]) -
                      (0.5 * dx)) * dif_yIndex
 
-        distance = na.array([na.sqrt(w**2 + xDistance**2)
+        distance = np.array([np.sqrt(w**2 + xDistance**2)
                              for w in yDistance])
         haloMask *= (distance >= all_halo_radius[q])
 
@@ -231,11 +231,11 @@
                                Mpc_units)
             halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
 
-    halo_x = na.array(halo_x)
-    halo_y = na.array(halo_y)
-    halo_depth = na.array(halo_depth)
-    halo_radius = na.array(halo_radius)
-    halo_mass = na.array(halo_mass)
+    halo_x = np.array(halo_x)
+    halo_y = np.array(halo_y)
+    halo_depth = np.array(halo_depth)
+    halo_radius = np.array(halo_radius)
+    halo_mass = np.array(halo_mass)
 
     # Adjust halo centers along line of sight.
     depth_center = slice['projection_center'][slice['projection_axis']]
@@ -247,15 +247,15 @@
     add_left = (halo_depth + halo_radius) > 1 # should be box width
     add_right = (halo_depth - halo_radius) < 0
 
-    halo_depth = na.concatenate([halo_depth,
+    halo_depth = np.concatenate([halo_depth,
                                  (halo_depth[add_left]-1),
                                  (halo_depth[add_right]+1)])
-    halo_x = na.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = na.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = na.concatenate([halo_radius,
+    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
+    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
+    halo_radius = np.concatenate([halo_radius,
                                   halo_radius[add_left],
                                   halo_radius[add_right]])
-    halo_mass = na.concatenate([halo_mass,
+    halo_mass = np.concatenate([halo_mass,
                                 halo_mass[add_left],
                                 halo_mass[add_right]])
 
@@ -284,19 +284,19 @@
         del mask
     del halo_depth
 
-    all_halo_x = na.array([])
-    all_halo_y = na.array([])
-    all_halo_radius = na.array([])
-    all_halo_mass = na.array([])
+    all_halo_x = np.array([])
+    all_halo_y = np.array([])
+    all_halo_radius = np.array([])
+    all_halo_mass = np.array([])
 
     # Tile halos of width box fraction is greater than one.
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(slice['box_width_fraction']))):
-        for y in range(int(na.ceil(slice['box_width_fraction']))):
-            all_halo_x = na.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = na.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = na.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = na.concatenate([all_halo_mass, halo_mass])
+    for x in range(int(np.ceil(slice['box_width_fraction']))):
+        for y in range(int(np.ceil(slice['box_width_fraction']))):
+            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
+            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
+            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
+            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
 
     del halo_x, halo_y, halo_radius, halo_mass
 
@@ -310,8 +310,8 @@
 
     # Wrap off-edge centers back around to
     # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += na.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += na.ceil(slice['box_width_fraction'])
+    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
+    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
 
     # After shifting, some centers have fractional coverage
     # on both sides of the box.
@@ -319,9 +319,9 @@
 
     # Centers hanging off the right edge.
     add_x_right = all_halo_x + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= na.ceil(slice['box_width_fraction'])
+    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
     add_x_halo_y = all_halo_y[add_x_right]
     add_x_halo_radius = all_halo_radius[add_x_right]
     add_x_halo_mass = all_halo_mass[add_x_right]
@@ -330,7 +330,7 @@
     # Centers hanging off the left edge.
     add_x_left = all_halo_x - all_halo_radius < 0
     add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += na.ceil(slice['box_width_fraction'])
+    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
     add2_x_halo_y = all_halo_y[add_x_left]
     add2_x_halo_radius = all_halo_radius[add_x_left]
     add2_x_halo_mass = all_halo_mass[add_x_left]
@@ -338,10 +338,10 @@
 
     # Centers hanging off the top edge.
     add_y_right = all_halo_y + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_y_halo_x = all_halo_x[add_y_right]
     add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= na.ceil(slice['box_width_fraction'])
+    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
     add_y_halo_radius = all_halo_radius[add_y_right]
     add_y_halo_mass = all_halo_mass[add_y_right]
     del add_y_right
@@ -350,24 +350,24 @@
     add_y_left = all_halo_y - all_halo_radius < 0
     add2_y_halo_x = all_halo_x[add_y_left]
     add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += na.ceil(slice['box_width_fraction'])
+    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
     add2_y_halo_radius = all_halo_radius[add_y_left]
     add2_y_halo_mass = all_halo_mass[add_y_left]
     del add_y_left
 
     # Add the hanging centers back to the projection data.
-    all_halo_x = na.concatenate([all_halo_x,
+    all_halo_x = np.concatenate([all_halo_x,
                                  add_x_halo_x, add2_x_halo_x,
                                  add_y_halo_x, add2_y_halo_x])
-    all_halo_y = na.concatenate([all_halo_y,
+    all_halo_y = np.concatenate([all_halo_y,
                                  add_x_halo_y, add2_x_halo_y,
                                  add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = na.concatenate([all_halo_radius,
+    all_halo_radius = np.concatenate([all_halo_radius,
                                       add_x_halo_radius,
                                       add2_x_halo_radius,
                                       add_y_halo_radius,
                                       add2_y_halo_radius])
-    all_halo_mass = na.concatenate([all_halo_mass,
+    all_halo_mass = np.concatenate([all_halo_mass,
                                     add_x_halo_mass,
                                     add2_x_halo_mass,
                                     add_y_halo_mass,


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -198,7 +198,7 @@
 
         # Calculate projection sizes, and get
         # random projection axes and centers.
-        na.random.seed(self.original_random_seed)
+        np.random.seed(self.original_random_seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -250,9 +250,9 @@
                self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
                 # Random axis and center.
                 self.light_cone_solution[q]['projection_axis'] = \
-                  na.random.randint(0, 3)
+                  np.random.randint(0, 3)
                 self.light_cone_solution[q]['projection_center'] = \
-                  [na.random.random() for i in range(3)]
+                  [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
@@ -342,7 +342,7 @@
                                                    njobs=njobs,
                                                    dynamic=dynamic)
             # Collapse cube into final mask.
-            self.halo_mask = na.ones(shape=(self.pixels, self.pixels),
+            self.halo_mask = np.ones(shape=(self.pixels, self.pixels),
                                      dtype=bool)
             for mask in halo_mask_cube:
                 self.halo_mask *= mask
@@ -428,7 +428,7 @@
                 boxSizeProper = self.simulation.box_size / \
                   (self.simulation.hubble_constant * (1.0 + output['redshift']))
                 pixelarea = (boxSizeProper/self.pixels)**2 #in proper cm^2
-                factor = pixelarea/(4.0*na.pi*dL**2)
+                factor = pixelarea/(4.0*np.pi*dL**2)
                 mylog.info("Distance to slice = %e" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
@@ -461,7 +461,7 @@
                 else:
                     my_image = all_storage[my_slice]['field'] / \
                       all_storage[my_slice]['weight_field']
-                only_on_root(write_image, na.log10(my_image),
+                only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
             self.projection_stack.append(all_storage[my_slice]['field'])
@@ -491,7 +491,7 @@
 
         # Write image.
         if save_final_image:
-            only_on_root(write_image, na.log10(light_cone_projection),
+            only_on_root(write_image, np.log10(light_cone_projection),
                          "%s_%s.png" % (filename, field), cmap_name=cmap_name)
 
         # Write stack to hdf5 file.
@@ -561,7 +561,7 @@
         box_fraction_used = 0.0
 
         # Seed random number generator with new seed.
-        na.random.seed(int(new_seed))
+        np.random.seed(int(new_seed))
 
         for q, output in enumerate(self.light_cone_solution):
             # It is necessary to make the same number of calls to the random
@@ -578,9 +578,9 @@
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
-                newAxis = na.random.randint(0, 3)
+                newAxis = np.random.randint(0, 3)
 
-                newCenter = [na.random.random() for i in range(3)]
+                newCenter = [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice, but with depth center shifted.
@@ -600,8 +600,8 @@
             box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
 
             # Make list of rectangle corners to calculate common volume.
-            newCube = na.zeros(shape=(len(newCenter), 2))
-            oldCube = na.zeros(shape=(len(newCenter), 2))
+            newCube = np.zeros(shape=(len(newCenter), 2))
+            oldCube = np.zeros(shape=(len(newCenter), 2))
             for w in range(len(newCenter)):
                 if (w == self.master_solution[q]['projection_axis']):
                     oldCube[w] = [self.master_solution[q]['projection_center'][w] -
@@ -630,7 +630,7 @@
                                   0.5 * self.master_solution[q]['box_width_fraction']]
 
             my_volume += common_volume(oldCube, newCube,
-                                           periodic=na.array([[0, 1],
+                                           periodic=np.array([[0, 1],
                                                               [0, 1],
                                                               [0, 1]]))
             total_volume += output['box_depth_fraction'] * \
@@ -691,7 +691,7 @@
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
         # Make list of redshifts to include as a dataset attribute.
-        redshiftList = na.array([my_slice['redshift'] \
+        redshiftList = np.array([my_slice['redshift'] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
@@ -727,16 +727,16 @@
 
         if write_data:
             mylog.info("Saving %s to %s." % (field_node, filename))
-            self.projection_stack = na.array(self.projection_stack)
+            self.projection_stack = np.array(self.projection_stack)
             field_dataset = output.create_dataset(field_node,
                                                   data=self.projection_stack)
             field_dataset.attrs['redshifts'] = redshiftList
             field_dataset.attrs['observer_redshift'] = \
-              na.float(self.observer_redshift)
+              np.float(self.observer_redshift)
             field_dataset.attrs['field_of_view_in_arcminutes'] = \
-              na.float(self.field_of_view_in_arcminutes)
+              np.float(self.field_of_view_in_arcminutes)
             field_dataset.attrs['image_resolution_in_arcseconds'] = \
-              na.float(self.image_resolution_in_arcseconds)
+              np.float(self.image_resolution_in_arcseconds)
 
         if (len(self.projection_weight_field_stack) > 0):
             if node_exists:
@@ -754,16 +754,16 @@
             if write_data:
                 mylog.info("Saving %s to %s." % (weight_field_node, filename))
                 self.projection_weight_field_stack = \
-                  na.array(self.projection_weight_field_stack)
+                  np.array(self.projection_weight_field_stack)
                 weight_field_dataset = \
                   output.create_dataset(weight_field_node,
                                         data=self.projection_weight_field_stack)
                 weight_field_dataset.attrs['redshifts'] = redshiftList
                 weight_field_dataset.attrs['observer_redshift'] = \
-                  na.float(self.observer_redshift)
+                  np.float(self.observer_redshift)
                 weight_field_dataset.attrs['field_of_view_in_arcminutes'] = \
-                  na.float(self.field_of_view_in_arcminutes)
+                  np.float(self.field_of_view_in_arcminutes)
                 weight_field_dataset.attrs['image_resolution_in_arcseconds'] = \
-                  na.float(self.image_resolution_in_arcseconds)
+                  np.float(self.image_resolution_in_arcseconds)
 
         output.close()


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -98,15 +98,15 @@
     original_weight_field = copy.deepcopy(proj['weight_field'])
 
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
-        for y in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
+    for x in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
+        for y in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
             if ((x + y) > 0):
-                proj['px'] = na.concatenate([proj['px'], original_px+x])
-                proj['py'] = na.concatenate([proj['py'], original_py+y])
-                proj['pdx'] = na.concatenate([proj['pdx'], original_pdx])
-                proj['pdy'] = na.concatenate([proj['pdy'], original_pdy])
-                proj[field] = na.concatenate([proj[field], original_field])
-                proj['weight_field'] = na.concatenate([proj['weight_field'],
+                proj['px'] = np.concatenate([proj['px'], original_px+x])
+                proj['py'] = np.concatenate([proj['py'], original_py+y])
+                proj['pdx'] = np.concatenate([proj['pdx'], original_pdx])
+                proj['pdy'] = np.concatenate([proj['pdy'], original_pdy])
+                proj[field] = np.concatenate([proj[field], original_field])
+                proj['weight_field'] = np.concatenate([proj['weight_field'],
                                                        original_weight_field])
 
     # Delete originals.
@@ -129,17 +129,17 @@
     proj['py'] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj['px'][proj['px'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
-    proj['py'][proj['py'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
+    proj['px'][proj['px'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
+    proj['py'][proj['py'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
     add_x_right = proj['px'] + 0.5 * proj['pdx'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_x_px = proj['px'][add_x_right]
-    add_x_px -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_x_px -= np.ceil(lightConeSlice['box_width_fraction'])
     add_x_py = proj['py'][add_x_right]
     add_x_pdx = proj['pdx'][add_x_right]
     add_x_pdy = proj['pdy'][add_x_right]
@@ -150,7 +150,7 @@
     # Cells hanging off the left edge.
     add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
     add2_x_px = proj['px'][add_x_left]
-    add2_x_px += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_x_px += np.ceil(lightConeSlice['box_width_fraction'])
     add2_x_py = proj['py'][add_x_left]
     add2_x_pdx = proj['pdx'][add_x_left]
     add2_x_pdy = proj['pdy'][add_x_left]
@@ -160,10 +160,10 @@
 
     # Cells hanging off the top edge.
     add_y_right = proj['py'] + 0.5 * proj['pdy'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_y_px = proj['px'][add_y_right]
     add_y_py = proj['py'][add_y_right]
-    add_y_py -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_y_py -= np.ceil(lightConeSlice['box_width_fraction'])
     add_y_pdx = proj['pdx'][add_y_right]
     add_y_pdy = proj['pdy'][add_y_right]
     add_y_field = proj[field][add_y_right]
@@ -174,7 +174,7 @@
     add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
     add2_y_px = proj['px'][add_y_left]
     add2_y_py = proj['py'][add_y_left]
-    add2_y_py += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_y_py += np.ceil(lightConeSlice['box_width_fraction'])
     add2_y_pdx = proj['pdx'][add_y_left]
     add2_y_pdy = proj['pdy'][add_y_left]
     add2_y_field = proj[field][add_y_left]
@@ -182,17 +182,17 @@
     del add_y_left
 
     # Add the hanging cells back to the projection data.
-    proj['px'] = na.concatenate([proj['px'], add_x_px, add_y_px,
+    proj['px'] = np.concatenate([proj['px'], add_x_px, add_y_px,
                                  add2_x_px, add2_y_px])
-    proj['py'] = na.concatenate([proj['py'], add_x_py, add_y_py,
+    proj['py'] = np.concatenate([proj['py'], add_x_py, add_y_py,
                                  add2_x_py, add2_y_py])
-    proj['pdx'] = na.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
+    proj['pdx'] = np.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
                                   add2_x_pdx, add2_y_pdx])
-    proj['pdy'] = na.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
+    proj['pdy'] = np.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
                                   add2_x_pdy, add2_y_pdy])
-    proj[field] = na.concatenate([proj[field], add_x_field, add_y_field,
+    proj[field] = np.concatenate([proj[field], add_x_field, add_y_field,
                                   add2_x_field, add2_y_field])
-    proj['weight_field'] = na.concatenate([proj['weight_field'],
+    proj['weight_field'] = np.concatenate([proj['weight_field'],
                                            add_x_weight_field, add_y_weight_field,
                                            add2_x_weight_field, add2_y_weight_field])
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
@@ -24,7 +24,7 @@
 """
 
 import copy
-import numpy as na
+import numpy as np
 import random as rand
 import sys
 
@@ -128,7 +128,7 @@
         rand.seed(seed)
         state = rand.getstate()
 
-    fail_digits = str(int(na.log10(failures))+1)
+    fail_digits = str(int(np.log10(failures))+1)
 
     while (len(unique_seeds) < solutions):
         # Create new random seed.
@@ -221,7 +221,7 @@
         mylog.error("Light cone solutions do not have equal volumes, will use the smaller one.")
 
     for q in range(len(solution1)):
-        cube1 = na.zeros(shape=(len(solution1[q]['projection_center']), 2))
+        cube1 = np.zeros(shape=(len(solution1[q]['projection_center']), 2))
         volume1 = 1.0
         for w in range(len(cube1)):
             if (w == solution1[q]['projection_axis']):
@@ -232,7 +232,7 @@
             cube1[w] = [solution1[q]['projection_center'][w] - 0.5 * width,
                         solution1[q]['projection_center'][w] + 0.5 * width]
 
-        cube2 = na.zeros(shape=(len(solution2[q]['projection_center']), 2))
+        cube2 = np.zeros(shape=(len(solution2[q]['projection_center']), 2))
         volume2 = 1.0
         for w in range(len(cube2)):
             if (w == solution2[q]['projection_axis']):
@@ -245,7 +245,7 @@
 
         total_volume += min(volume1, volume2)
         my_volume += common_volume(cube1, cube2,
-                                   periodic=na.array([[0, 1],
+                                   periodic=np.array([[0, 1],
                                                       [0, 1],
                                                       [0, 1]]))
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -124,7 +124,7 @@
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        na.random.seed(seed)
+        np.random.seed(seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -162,9 +162,9 @@
                     (box_fraction_used +
                      self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                 # Random start point
-                self.light_ray_solution[q]['start'] = na.random.random(3)
-                theta = na.pi * na.random.random()
-                phi = 2 * na.pi * na.random.random()
+                self.light_ray_solution[q]['start'] = np.random.random(3)
+                theta = np.pi * np.random.random()
+                phi = 2 * np.pi * np.random.random()
                 box_fraction_used = 0.0
             else:
                 # Use end point of previous segment and same theta and phi.
@@ -174,9 +174,9 @@
             self.light_ray_solution[q]['end'] = \
               self.light_ray_solution[q]['start'] + \
                 self.light_ray_solution[q]['traversal_box_fraction'] * \
-                na.array([na.cos(phi) * na.sin(theta),
-                          na.sin(phi) * na.sin(theta),
-                          na.cos(theta)])
+                np.array([np.cos(phi) * np.sin(theta),
+                          np.sin(phi) * np.sin(theta),
+                          np.cos(theta)])
             box_fraction_used += \
               self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -365,30 +365,30 @@
             sub_data = {}
             sub_data['segment_redshift'] = my_segment['redshift']
             for field in all_fields:
-                sub_data[field] = na.array([])
+                sub_data[field] = np.array([])
 
             # Get data for all subsegments in segment.
             for sub_segment in sub_segments:
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = pf.h.ray(sub_segment[0], sub_segment[1])
-                sub_data['dl'] = na.concatenate([sub_data['dl'],
+                sub_data['dl'] = np.concatenate([sub_data['dl'],
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
                 for field in fields:
-                    sub_data[field] = na.concatenate([sub_data[field],
+                    sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = na.array([sub_ray['x-velocity'],
+                    sub_vel = np.array([sub_ray['x-velocity'],
                                         sub_ray['y-velocity'],
                                         sub_ray['z-velocity']])
                     sub_data['los_velocity'] = \
-                      na.concatenate([sub_data['los_velocity'],
-                                      (na.rollaxis(sub_vel, 1) *
+                      np.concatenate([sub_data['los_velocity'],
+                                      (np.rollaxis(sub_vel, 1) *
                                        line_of_sight).sum(axis=1)])
                     del sub_vel
 
@@ -470,20 +470,20 @@
         if fields is None: fields = []
 
         # Create position array from halo list.
-        halo_centers = na.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, na.array(map(lambda halo: halo[field],
+        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
+        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
                                                        halo_list))) \
                                   for field in fields])
 
-        nearest_distance = na.zeros(data['x'].shape)
-        field_data = dict([(field, na.zeros(data['x'].shape)) \
+        nearest_distance = np.zeros(data['x'].shape)
+        field_data = dict([(field, np.zeros(data['x'].shape)) \
                            for field in fields])
         for index in xrange(nearest_distance.size):
-            nearest = na.argmin(periodic_distance(na.array([data['x'][index],
+            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
                                                             data['y'][index],
                                                             data['z'][index]]),
                                                   halo_centers))
-            nearest_distance[index] = periodic_distance(na.array([data['x'][index],
+            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
                                                                   data['y'][index],
                                                                   data['z'][index]]),
                                                         halo_centers[nearest])
@@ -532,41 +532,41 @@
         for field in [field for field in datum.keys()
                       if field not in exceptions]:
             if field in new_data:
-                new_data[field] = na.concatenate([new_data[field], datum[field]])
+                new_data[field] = np.concatenate([new_data[field], datum[field]])
             else:
-                new_data[field] = na.copy(datum[field])
+                new_data[field] = np.copy(datum[field])
     return new_data
 
 def vector_length(start, end):
     "Calculate vector length."
 
-    return na.sqrt(na.power((end - start), 2).sum())
+    return np.sqrt(np.power((end - start), 2).sum())
 
 def periodic_distance(coord1, coord2):
     "Calculate length of shortest vector between to points in periodic domain."
     dif = coord1 - coord2
 
-    dim = na.ones(coord1.shape,dtype=int)
+    dim = np.ones(coord1.shape,dtype=int)
     def periodic_bind(num):
-        pos = na.abs(num % dim)
-        neg = na.abs(num % -dim)
-        return na.min([pos,neg],axis=0)
+        pos = np.abs(num % dim)
+        neg = np.abs(num % -dim)
+        return np.min([pos,neg],axis=0)
 
     dif = periodic_bind(dif)
-    return na.sqrt((dif * dif).sum(axis=-1))
+    return np.sqrt((dif * dif).sum(axis=-1))
 
 def periodic_ray(start, end, left=None, right=None):
     "Break up periodic ray into non-periodic segments."
 
     if left is None:
-        left = na.zeros(start.shape)
+        left = np.zeros(start.shape)
     if right is None:
-        right = na.ones(start.shape)
+        right = np.ones(start.shape)
     dim = right - left
 
     vector = end - start
-    wall = na.zeros(start.shape)
-    close = na.zeros(start.shape, dtype=object)
+    wall = np.zeros(start.shape)
+    close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
     right_bound = vector > 0
@@ -574,15 +574,15 @@
     bound = vector != 0.0
 
     wall[left_bound] = left[left_bound]
-    close[left_bound] = na.max
+    close[left_bound] = np.max
     wall[right_bound] = right[right_bound]
-    close[right_bound] = na.min
-    wall[no_bound] = na.inf
-    close[no_bound] = na.min
+    close[right_bound] = np.min
+    wall[no_bound] = np.inf
+    close[no_bound] = np.min
 
     segments = []
-    this_start = na.copy(start)
-    this_end = na.copy(end)
+    this_start = np.copy(start)
+    this_end = np.copy(end)
     t = 0.0
     tolerance = 1e-6
 
@@ -596,14 +596,14 @@
             this_start[hit_right] -= dim[hit_right]
             this_end[hit_right] -= dim[hit_right]
 
-        nearest = na.array([close[q]([this_end[q], wall[q]]) \
+        nearest = np.array([close[q]([this_end[q], wall[q]]) \
                                 for q in range(start.size)])
         dt = ((nearest - this_start) / vector)[bound].min()
         now = this_start + vector * dt
-        close_enough = na.abs(now - nearest) < 1e-10
+        close_enough = np.abs(now - nearest) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([na.copy(this_start), na.copy(now)])
-        this_start = na.copy(now)
+        segments.append([np.copy(this_start), np.copy(now)])
+        this_start = np.copy(now)
         t += dt
 
     return segments


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -31,7 +31,7 @@
 import h5py
 import itertools
 import math
-import numpy as na
+import numpy as np
 import random
 import sys
 import os.path as path
@@ -123,13 +123,13 @@
         cy = self["particle_position_y"]
         cz = self["particle_position_z"]
         if isinstance(self, FOFHalo):
-            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
         else:
             c_vec = self.maximum_density_location() - self.pf.domain_center
         cx = (cx - c_vec[0])
         cy = (cy - c_vec[1])
         cz = (cz - c_vec[2])
-        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
         return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
     def maximum_density(self):
@@ -158,7 +158,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[1:]
-        return na.array([
+        return np.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
                 self._max_dens[self.id][3]])
@@ -193,7 +193,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return na.array([vx, vy, vz]) / pm.sum()
+        return np.array([vx, vy, vz]) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -216,8 +216,8 @@
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
         vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
         s = vx ** 2. + vy ** 2. + vz ** 2.
-        ms = na.mean(s)
-        return na.sqrt(ms) * pm.size
+        ms = np.mean(s)
+        return np.sqrt(ms) * pm.size
 
     def maximum_radius(self, center_of_mass=True):
         r"""Returns the maximum radius in the halo for all particles,
@@ -246,13 +246,13 @@
             center = self.center_of_mass()
         else:
             center = self.maximum_density_location()
-        rx = na.abs(self["particle_position_x"] - center[0])
-        ry = na.abs(self["particle_position_y"] - center[1])
-        rz = na.abs(self["particle_position_z"] - center[2])
+        rx = np.abs(self["particle_position_x"] - center[0])
+        ry = np.abs(self["particle_position_y"] - center[1])
+        rz = np.abs(self["particle_position_z"] - center[2])
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
-                + na.minimum(ry, DW[1] - ry) ** 2.0
-                + na.minimum(rz, DW[2] - rz) ** 2.0)
+        r = np.sqrt(np.minimum(rx, DW[0] - rx) ** 2.0
+                + np.minimum(ry, DW[1] - ry) ** 2.0
+                + np.minimum(rz, DW[2] - rz) ** 2.0)
         return r.max()
 
     def __getitem__(self, key):
@@ -393,7 +393,7 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins + 1)[over])
+            vir_bin = max(np.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
@@ -419,8 +419,8 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        dist = na.empty(thissize, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
         mark = 0
         # Find the distances to the particles. I don't like this much, but I
@@ -432,15 +432,15 @@
             mark += 1
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(min(dist) * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(min(dist) * .99 + TINY),
             math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1)
         # Find out which bin each particle goes into, and add the particle
         # mass to that bin.
-        inds = na.digitize(dist, self.radial_bins) - 1
+        inds = np.digitize(dist, self.radial_bins) - 1
         if self["particle_position_x"].size > 1:
-            for index in na.unique(inds):
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                na.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["ParticleMassMsun"][inds == index])
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -450,12 +450,12 @@
         (self.radial_bins * cm)**3.0)
         
     def _get_ellipsoid_parameters_basic(self):
-        na.seterr(all='ignore')
+        np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
         # neglecting to check if the 4 particles in the same plane,
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
-        if na.size(self["particle_position_x"]) < 4:
+        if np.size(self["particle_position_x"]) < 4:
             mylog.warning("Too few particles for ellipsoid parameters.")
             return (0, 0, 0, 0, 0, 0, 0)
         # Calculate the parameters that describe the ellipsoid of
@@ -466,19 +466,19 @@
 		    self["particle_position_y"],
 		    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = na.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
 	position = [position[0] - com[0],
 		    position[1] - com[1],
 		    position[2] - com[2]]
 	# different cases of particles being on other side of boundary
-	for axis in range(na.size(DW)):
-	    cases = na.array([position[axis],
+	for axis in range(np.size(DW)):
+	    cases = np.array([position[axis],
 	  		      position[axis] + DW[axis],
 			      position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
-            position[axis] = na.choose(na.abs(cases).argmin(axis=0), cases)
+            position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
 	# find the furthest particle's index
-	r = na.sqrt(position[0]**2 +
+	r = np.sqrt(position[0]**2 +
 		    position[1]**2 +
 		    position[2]**2)
         A_index = r.argmax()
@@ -490,24 +490,24 @@
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = na.empty((na.size(position[0]), 3), dtype='float64')
+	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
-        rr = na.array([position[0],
+        rr = np.array([position[0],
 		       position[1],
 		       position[2]]).T # Similar to tB_vector in old code.
-        tC_vector = na.cross(e0_vector_copy, rr)
+        tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
-            te2[:,dim] *= na.sum(tC_vector**2., axis = 1)**(-0.5)
-        te1 = na.cross(te2, e0_vector_copy)
-        length = na.abs(-na.sum(rr * te1, axis = 1) * \
-            (1. - na.sum(rr * e0_vector_copy, axis = 1)**2. * \
+            te2[:,dim] *= np.sum(tC_vector**2., axis = 1)**(-0.5)
+        te1 = np.cross(te2, e0_vector_copy)
+        length = np.abs(-np.sum(rr * te1, axis = 1) * \
+            (1. - np.sum(rr * e0_vector_copy, axis = 1)**2. * \
             mag_A**-2.)**(-0.5))
         # This problem apparently happens sometimes, that the NaNs are turned
         # into infs, which messes up the nanargmax below.
-        length[length == na.inf] = 0.
-        tB_index = na.nanargmax(length) # ignores NaNs created above.
+        length[length == np.inf] = 0.
+        tB_index = np.nanargmax(length) # ignores NaNs created above.
         mag_B = length[tB_index]
         e1_vector = te1[tB_index]
         e2_vector = te2[tB_index]
@@ -518,24 +518,24 @@
             temp_e0[:,dim] = e0_vector[dim]
             temp_e1[:,dim] = e1_vector[dim]
             temp_e2[:,dim] = e2_vector[dim]
-        length = na.abs(na.sum(rr * temp_e2, axis = 1) * (1 - \
-            na.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
-            na.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
-        length[length == na.inf] = 0.
-        tC_index = na.nanargmax(length)
+        length = np.abs(np.sum(rr * temp_e2, axis = 1) * (1 - \
+            np.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
+            np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+        length[length == np.inf] = 0.
+        tC_index = np.nanargmax(length)
         mag_C = length[tC_index]
         # tilt is calculated from the rotation about x axis
         # needed to align e1 vector with the y axis
         # after e0 is aligned with x axis
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0_vector[1] / e0_vector[0])
+        t1 = np.arctan(e0_vector[1] / e0_vector[0])
         RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
         r1 = (e0_vector * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
-        r2 = na.dot(RY, na.dot(RZ, e1_vector))
-        tilt = na.arctan(r2[2]/r2[1])
+        r2 = np.dot(RY, np.dot(RZ, e1_vector))
+        tilt = np.arctan(r2[2]/r2[1])
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
@@ -572,11 +572,11 @@
 
         #Halo.__init__(self,halo_list,index,
         self.size=Np 
-        self.CoM=na.array([X,Y,Z])
+        self.CoM=np.array([X,Y,Z])
         self.max_dens_point=-1
         self.group_total_mass=-1
         self.max_radius=Rvir
-        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.bulk_vel=np.array([VX,VY,VZ])*1e5
         self.rms_vel=-1
         self.group_total_mass = -1 #not implemented 
     
@@ -651,7 +651,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -704,7 +704,7 @@
         self.bin_count = bins
         period = self.data.pf.domain_right_edge - \
             self.data.pf.domain_left_edge
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
         cen = self.center_of_mass()
         # Cosmology
         h = self.data.pf.hubble_constant
@@ -716,7 +716,7 @@
         # If I own some of this halo operate on the particles.
         if self.indices is not None:
             # Get some pertinent information about the halo.
-            dist = na.empty(self.indices.size, dtype='float64')
+            dist = np.empty(self.indices.size, dtype='float64')
             mark = 0
             # Find the distances to the particles.
             # I don't like this much, but I
@@ -737,15 +737,15 @@
         dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(dist_min * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(dist_min * .99 + TINY),
             math.log10(dist_max * 1.01 + 2 * TINY), num=self.bin_count + 1)
         if self.indices is not None and self.indices.size > 1:
             # Find out which bin each particle goes into, and add the particle
             # mass to that bin.
-            inds = na.digitize(dist, self.radial_bins) - 1
-            for index in na.unique(inds):
+            inds = np.digitize(dist, self.radial_bins) - 1
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    na.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["ParticleMassMsun"][inds == index])
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -831,7 +831,7 @@
         self.saved_fields = {}
         self.particle_mask = None
         self.ds_sort = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -871,7 +871,7 @@
                     # The result of searchsorted is an array with the positions
                     # of the indexes in pid as they are in sp_pid. This is
                     # because each element of pid is in sp_pid only once.
-                    self.particle_mask = na.searchsorted(sp_pid, pid)
+                    self.particle_mask = np.searchsorted(sp_pid, pid)
                 # We won't store this field below in saved_fields because
                 # that would mean keeping two copies of it, one in the yt
                 # machinery and one here.
@@ -890,9 +890,9 @@
             return None
         elif field == 'particle_index' or field == 'particle_type':
             # the only integer field
-            field_data = na.empty(size, dtype='int64')
+            field_data = np.empty(size, dtype='int64')
         else:
-            field_data = na.empty(size, dtype='float64')
+            field_data = np.empty(size, dtype='float64')
         f.close()
         # Apparently, there's a bug in h5py that was keeping the file pointer
         # f closed, even though it's re-opened below. This del seems to fix
@@ -943,7 +943,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -1025,7 +1025,7 @@
         self.tilt = tilt
         self.bin_count = None
         self.overdensity = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -1084,7 +1084,7 @@
                 self.particle_fields[field] = \
                     self._data_source[field][ii].astype('float64')
             del self._data_source[field]
-        self._base_indices = na.arange(tot_part)[ii]
+        self._base_indices = np.arange(tot_part)[ii]
         gc.collect()
 
     def _get_dm_indices(self):
@@ -1099,10 +1099,10 @@
             return slice(None)
 
     def _parse_output(self):
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount(self.tags + 1)
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount(self.tags + 1)
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         dens = self.densities[sort_indices]
         cp = 0
         for i in unique_ids:
@@ -1112,7 +1112,7 @@
                 continue
             group_indices = grab_indices[cp:cp_c]
             self._groups.append(self._halo_class(self, i, group_indices))
-            md_i = na.argmax(dens[cp:cp_c])
+            md_i = np.argmax(dens[cp:cp_c])
             px, py, pz = \
                 [self.particle_fields['particle_position_%s' % ax][group_indices]
                                             for ax in 'xyz']
@@ -1201,7 +1201,7 @@
         """
         # Set up a vector to multiply other
         # vectors by to project along proj_dim
-        vec = na.array([1., 1., 1.])
+        vec = np.array([1., 1., 1.])
         vec[proj_dim] = 0.
         period = self.pf.domain_right_edge - self.pf.domain_left_edge
         period = period * vec
@@ -1367,9 +1367,9 @@
         splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
         for num in splits:
             if 'nan' not in num:
-                formats += na.array(eval(num)).dtype,
+                formats += np.array(eval(num)).dtype,
             else:
-                formats += na.dtype('float'),
+                formats += np.dtype('float'),
         assert len(formats) == len(names)
 
         #Jc = 1.98892e33/pf['mpchcm']*1e5
@@ -1384,7 +1384,7 @@
                     Rs=1.0/pf['kpchcm'],
                     JX=Jc,JY=Jc,JZ=Jc)
         dtype = {'names':names,'formats':formats}
-        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
         #convert position units  
         for name in names:
             halo_table[name]=halo_table[name]*conv.get(name,1)
@@ -1470,7 +1470,7 @@
                self.particle_fields["particle_position_y"] / self.period[1],
                self.particle_fields["particle_position_z"] / self.period[2],
                self.link)
-        self.densities = na.ones(self.tags.size, dtype='float64') * -1
+        self.densities = np.ones(self.tags.size, dtype='float64') * -1
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
@@ -1518,12 +1518,12 @@
             size = int(line[2])
             fnames = locations[halo]
             # Everything else
-            CoM = na.array([float(line[7]), float(line[8]), float(line[9])])
-            max_dens_point = na.array([float(line[3]), float(line[4]),
+            CoM = np.array([float(line[7]), float(line[8]), float(line[9])])
+            max_dens_point = np.array([float(line[3]), float(line[4]),
                 float(line[5]), float(line[6])])
             group_total_mass = float(line[1])
             max_radius = float(line[13])
-            bulk_vel = na.array([float(line[10]), float(line[11]),
+            bulk_vel = np.array([float(line[10]), float(line[11]),
                 float(line[12])])
             rms_vel = float(line[14])
             if len(line) == 15:
@@ -1541,7 +1541,7 @@
                 e1_vec0 = float(line[18])
                 e1_vec1 = float(line[19])
                 e1_vec2 = float(line[20])
-                e1_vec = na.array([e1_vec0, e1_vec1, e1_vec2])
+                e1_vec = np.array([e1_vec0, e1_vec1, e1_vec2])
                 tilt = float(line[21])
                 self._groups.append(LoadedHalo(self.pf, halo, size = size,
                     CoM = CoM,
@@ -1596,7 +1596,7 @@
             y = float(line[columns['y']])
             z = float(line[columns['z']])
             r = float(line[columns['r']])
-            cen = na.array([x, y, z])
+            cen = np.array([x, y, z])
             # Now we see if there's anything else.
             if extra:
                 temp_dict = {}
@@ -1631,7 +1631,7 @@
         self.rearrange = rearrange
         self.period = period
         self.old_period = period.copy()
-        self.period = na.array([1.] * 3)
+        self.period = np.array([1.] * 3)
         self._data_source = data_source
         self.premerge = premerge
         self.tree = tree
@@ -1645,20 +1645,20 @@
         if (self.particle_fields["particle_index"] < 0).any():
             mylog.error("Negative values in particle_index field. Parallel HOP will fail.")
             exit = True
-        if na.unique(self.particle_fields["particle_index"]).size != \
+        if np.unique(self.particle_fields["particle_index"]).size != \
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
             self.particle_fields['ParticleMassMsun'])
-        na.divide(self.particle_fields["particle_position_x"],
+        np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
-        na.divide(self.particle_fields["particle_position_y"],
+        np.divide(self.particle_fields["particle_position_y"],
             self.old_period[1], self.particle_fields["particle_position_y"])
-        na.divide(self.particle_fields["particle_position_z"],
+        np.divide(self.particle_fields["particle_position_z"],
             self.old_period[2], self.particle_fields["particle_position_z"])
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
@@ -1688,20 +1688,20 @@
         self.period = self.old_period.copy()
         # Precompute the bulk velocity in parallel.
         yt_counters("Precomp bulk vel.")
-        self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
+        self.bulk_vel = np.zeros((self.group_count, 3), dtype='float64')
         yt_counters("bulk vel. reading data")
         pm = obj.mass
         # Fix this back to un-normalized units.
-        na.multiply(pm, self.total_mass, pm)
+        np.multiply(pm, self.total_mass, pm)
         xv = self._data_source["particle_velocity_x"][self._base_indices]
         yv = self._data_source["particle_velocity_y"][self._base_indices]
         zv = self._data_source["particle_velocity_z"][self._base_indices]
         yt_counters("bulk vel. reading data")
         yt_counters("bulk vel. computing")
         select = (self.tags >= 0)
-        calc = len(na.where(select == True)[0])
+        calc = len(np.where(select == True)[0])
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             ms = pm[select]
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
@@ -1710,13 +1710,13 @@
             sort = subchain.argsort()
             vel = vel[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i + 1]], axis=0)
+                self.bulk_vel[u] = np.sum(vel[marks[i]:marks[i + 1]], axis=0)
             del vel, subchain, sort_subchain
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
@@ -1729,27 +1729,27 @@
         # Now calculate the RMS velocity of the groups in parallel, very
         # similarly to the bulk velocity and re-using some of the arrays.
         yt_counters("rms vel computing")
-        rms_vel_temp = na.zeros((self.group_count, 2), dtype='float64')
+        rms_vel_temp = np.zeros((self.group_count, 2), dtype='float64')
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
             vel[:, 2] = zv[select] * ms
             vel = vel[sort]
             for i, u in enumerate(uniq_subchain):
                 # This finds the sum locally.
-                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i + 1]] - \
+                rms_vel_temp[u][0] = np.sum(((vel[marks[i]:marks[i + 1]] - \
                     self.bulk_vel[u]) / self.Tot_M[u]) ** 2.)
                 # I could use self.group_sizes...
                 rms_vel_temp[u][1] = marks[i + 1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
         rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
-        self.rms_vel = na.empty(self.group_count, dtype='float64')
+        self.rms_vel = np.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
             self.rms_vel[groupID] = \
-                na.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
+                np.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
                 self.group_sizes[groupID]
         del rms_vel_temp
         yt_counters("rms vel computing")
@@ -1764,16 +1764,16 @@
         """
         Each task will make an entry for all groups, but it may be empty.
         """
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount((self.tags + 1).tolist())
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount((self.tags + 1).tolist())
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         del sort_indices
         cp = 0
         index = 0
         # We want arrays for parallel HOP
-        self._groups = na.empty(self.group_count, dtype='object')
-        self._max_dens = na.empty((self.group_count, 4), dtype='float64')
+        self._groups = np.empty(self.group_count, dtype='object')
+        self._max_dens = np.empty((self.group_count, 4), dtype='float64')
         if self.group_count == 0:
             mylog.info("There are no halos found.")
             return
@@ -1861,7 +1861,7 @@
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
-        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge)) / 2.0
+        self.center = (np.array(ds.right_edge) + np.array(ds.left_edge)) / 2.0
 
     def _parse_halolist(self, threshold_adjustment):
         groups = []
@@ -1871,7 +1871,7 @@
         for halo in self._groups:
             this_max_dens = halo.maximum_density_location()
             # if the most dense particle is in the box, keep it
-            if na.all((this_max_dens >= LE) & (this_max_dens <= RE)):
+            if np.all((this_max_dens >= LE) & (this_max_dens <= RE)):
                 # Now we add the halo information to OURSELVES, taken from the
                 # self.hop_list
                 # We need to mock up the HOPHaloList thingie, so we need to
@@ -2128,8 +2128,8 @@
         >>> halos = parallelHF(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding=0.0)
@@ -2141,7 +2141,7 @@
         if self.tree != 'F' and self.tree != 'C':
             mylog.error("No kD Tree specified!")
         period = pf.domain_right_edge - pf.domain_left_edge
-        topbounds = na.array([[0., 0., 0.], period])
+        topbounds = np.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
@@ -2190,14 +2190,14 @@
             # approximation, but it's OK with the safety factor
             padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
-            self.padding = (na.ones(3, dtype='float64') * padding,
-                na.ones(3, dtype='float64') * padding)
+            self.padding = (np.ones(3, dtype='float64') * padding,
+                np.ones(3, dtype='float64') * padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
                 (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
-            LE_padding = na.empty(3, dtype='float64')
-            RE_padding = na.empty(3, dtype='float64')
+            LE_padding = np.empty(3, dtype='float64')
+            RE_padding = np.empty(3, dtype='float64')
             avg_spacing = (float(vol) / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
@@ -2215,9 +2215,9 @@
                     self._data_source.left_edge[(dim + 2) % 3])
                 bin_width = base_padding
                 num_bins = int(math.ceil(width / bin_width))
-                bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+                bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
                     self._data_source.left_edge[dim]
-                counts, bins = na.histogram(data, bins)
+                counts, bins = np.histogram(data, bins)
                 # left side.
                 start = 0
                 count = counts[0]
@@ -2250,8 +2250,8 @@
             total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
-            self.padding = (na.zeros(3, dtype='float64'),
-                na.zeros(3, dtype='float64'))
+            self.padding = (np.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
@@ -2282,8 +2282,8 @@
         n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
-        my_points = na.empty((n_random, 3), dtype='float64')
-        uni = na.array(random.sample(xrange(xp.size), n_random))
+        my_points = np.empty((n_random, 3), dtype='float64')
+        uni = np.array(random.sample(xrange(xp.size), n_random))
         uni = uni[uni.argsort()]
         my_points[:, 0] = xp[uni]
         del xp
@@ -2297,10 +2297,10 @@
         mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
-            root_points = na.empty((tot_random, 3), dtype='float64')
+            root_points = np.empty((tot_random, 3), dtype='float64')
             root_points.shape = (1, 3 * tot_random)
         else:
-            root_points = na.empty([])
+            root_points = np.empty([])
         my_points.shape = (1, n_random * 3)
         root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
@@ -2315,9 +2315,9 @@
         num_bins = 1000
         width = bounds[1][dim] - bounds[0][dim]
         bin_width = width / num_bins
-        bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+        bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
             bounds[0][dim]
-        counts, bins = na.histogram(points[:, dim], bins)
+        counts, bins = np.histogram(points[:, dim], bins)
         # Find the bin that passes the cut points.
         midpoints = [bounds[0][dim]]
         sum = 0
@@ -2341,7 +2341,7 @@
         subpoints = []
         subbounds = []
         for pair in zip(midpoints[:-1], midpoints[1:]):
-            select = na.bitwise_and(points[:, dim] >= pair[0],
+            select = np.bitwise_and(points[:, dim] >= pair[0],
                 points[:, dim] < pair[1])
             subpoints.append(points[select])
             nb = bounds.copy()
@@ -2363,7 +2363,7 @@
         ms = -self.Tot_M.copy()
         del self.Tot_M
         Cx = self.CoM[:, 0].copy()
-        sorted = na.lexsort([Cx, ms])
+        sorted = np.lexsort([Cx, ms])
         del Cx, ms
         self._groups = self._groups[sorted]
         self._max_dens = self._max_dens[sorted]
@@ -2426,8 +2426,8 @@
         >>> halos = HaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
@@ -2520,8 +2520,8 @@
         >>> halos = FOFHaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self.pf = pf
         self.hierarchy = pf.h
@@ -2544,7 +2544,7 @@
             avg_spacing = (float(vol) / n_parts) ** (1. / 3.)
             linking_length = link * avg_spacing
         else:
-            linking_length = na.abs(link)
+            linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -25,7 +25,7 @@
 
 from collections import defaultdict
 import itertools, sys
-import numpy as na
+import numpy as np
 import gc
 
 from yt.funcs import *
@@ -88,23 +88,23 @@
         for taskID in global_bounds:
             thisLE, thisRE = global_bounds[taskID]
             if self.mine != taskID:
-                vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
             if self.mine == taskID:
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2]]))
         # Find the neighbors we share corners with. Yes, this is lazy with
         # a double loop, but it works and this is definitely not a performance
         # bottleneck.
@@ -119,13 +119,13 @@
                 # Also test to see if the distance to this corner is within
                 # max_padding, which is more likely the case with load-balancing
                 # turned on.
-                dx = min( na.fabs(my_vertex[0] - vertex[0]), \
-                    self.period[0] - na.fabs(my_vertex[0] - vertex[0]))
-                dy = min( na.fabs(my_vertex[1] - vertex[1]), \
-                    self.period[1] - na.fabs(my_vertex[1] - vertex[1]))
-                dz = min( na.fabs(my_vertex[2] - vertex[2]), \
-                    self.period[2] - na.fabs(my_vertex[2] - vertex[2]))
-                d = na.sqrt(dx*dx + dy*dy + dz*dz)
+                dx = min( np.fabs(my_vertex[0] - vertex[0]), \
+                    self.period[0] - np.fabs(my_vertex[0] - vertex[0]))
+                dy = min( np.fabs(my_vertex[1] - vertex[1]), \
+                    self.period[1] - np.fabs(my_vertex[1] - vertex[1]))
+                dz = min( np.fabs(my_vertex[2] - vertex[2]), \
+                    self.period[2] - np.fabs(my_vertex[2] - vertex[2]))
+                d = np.sqrt(dx*dx + dy*dy + dz*dz)
                 if d <= self.max_padding:
                     self.neighbors.add(int(vertex[3]))
         # Faces and edges.
@@ -219,13 +219,13 @@
         annulus data.
         """
         if round == 'first':
-            max_pad = na.max(self.padding)
+            max_pad = np.max(self.padding)
             self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
             for neighbor in self.neighbors:
-                self.max_padding = na.maximum(self.global_padding[neighbor], \
+                self.max_padding = np.maximum(self.global_padding[neighbor], \
                     self.max_padding)
 
     def _communicate_padding_data(self):
@@ -247,7 +247,7 @@
         # This will reduce the size of the loop over particles.
         yt_counters("Picking padding data to send.")
         send_count = self.is_inside_annulus.sum()
-        points = na.empty((send_count, 3), dtype='float64')
+        points = np.empty((send_count, 3), dtype='float64')
         points[:,0] = self.xpos[self.is_inside_annulus]
         points[:,1] = self.ypos[self.is_inside_annulus]
         points[:,2] = self.zpos[self.is_inside_annulus]
@@ -280,9 +280,9 @@
         recv_size = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_points[opp_neighbor] = na.empty((opp_size, 3), dtype='float64')
-            recv_mass[opp_neighbor] = na.empty(opp_size, dtype='float64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_points[opp_neighbor] = np.empty((opp_size, 3), dtype='float64')
+            recv_mass[opp_neighbor] = np.empty(opp_size, dtype='float64')
             recv_size += opp_size
         yt_counters("Initalizing recv arrays.")
         # Setup the receiving slots.
@@ -306,11 +306,11 @@
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
         # Now we add the data to ourselves.
-        self.index_pad = na.empty(recv_size, dtype='int64')
-        self.xpos_pad = na.empty(recv_size, dtype='float64')
-        self.ypos_pad = na.empty(recv_size, dtype='float64')
-        self.zpos_pad = na.empty(recv_size, dtype='float64')
-        self.mass_pad = na.empty(recv_size, dtype='float64')
+        self.index_pad = np.empty(recv_size, dtype='int64')
+        self.xpos_pad = np.empty(recv_size, dtype='float64')
+        self.ypos_pad = np.empty(recv_size, dtype='float64')
+        self.zpos_pad = np.empty(recv_size, dtype='float64')
+        self.mass_pad = np.empty(recv_size, dtype='float64')
         so_far = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
@@ -335,7 +335,7 @@
         yt_counters("Flipping coordinates around the periodic boundary.")
         self.size = self.index.size + self.index_pad.size
         # Now that we have the full size, initialize the chainID array
-        self.chainID = na.ones(self.size,dtype='int64') * -1
+        self.chainID = np.ones(self.size,dtype='int64') * -1
         # Clean up explicitly, but these should be empty dicts by now.
         del recv_real_indices, hooks, recv_points, recv_mass
         yt_counters("Communicate discriminated padding")
@@ -348,10 +348,10 @@
         if self.tree == 'F':
             # Yes, we really do need to initialize this many arrays.
             # They're deleted in _parallelHOP.
-            fKD.dens = na.zeros(self.size, dtype='float64', order='F')
-            fKD.mass = na.concatenate((self.mass, self.mass_pad))
+            fKD.dens = np.zeros(self.size, dtype='float64', order='F')
+            fKD.mass = np.concatenate((self.mass, self.mass_pad))
             del self.mass
-            fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
+            fKD.pos = np.empty((3, self.size), dtype='float64', order='F')
             # This actually copies the data into the fortran space.
             self.psize = self.xpos.size
             fKD.pos[0, :self.psize] = self.xpos
@@ -364,7 +364,7 @@
             fKD.pos[2, self.psize:] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
+            fKD.qv = np.asfortranarray(np.empty(3, dtype='float64'))
             fKD.nn = self.num_neighbors
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
@@ -375,8 +375,8 @@
             # Now call the fortran.
             create_tree(0)
         elif self.tree == 'C':
-            self.mass = na.concatenate((self.mass, self.mass_pad))
-            self.pos = na.empty((self.size, 3), dtype='float64')
+            self.mass = np.concatenate((self.mass, self.mass_pad))
+            self.pos = np.empty((self.size, 3), dtype='float64')
             self.psize = self.xpos.size
             self.pos[:self.psize, 0] = self.xpos
             self.pos[:self.psize, 1] = self.ypos
@@ -407,7 +407,7 @@
         # Test to see if the points are in the 'real' region
         (LE, RE) = self.bounds
         if round == 'first':
-            points = na.empty((self.real_size, 3), dtype='float64')
+            points = np.empty((self.real_size, 3), dtype='float64')
             points[:,0] = self.xpos
             points[:,1] = self.ypos
             points[:,2] = self.zpos
@@ -426,21 +426,21 @@
         temp_LE = LE + self.max_padding
         temp_RE = RE - self.max_padding
         if round == 'first':
-            inner = na.invert( (points >= temp_LE).all(axis=1) * \
+            inner = np.invert( (points >= temp_LE).all(axis=1) * \
                 (points < temp_RE).all(axis=1) )
         elif round == 'second' or round == 'third':
             if self.tree == 'F':
-                inner = na.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
+                inner = np.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
                     (fKD.pos.T < temp_RE).all(axis=1) )
             elif self.tree == 'C':
-                inner = na.invert( (self.pos >= temp_LE).all(axis=1) * \
+                inner = np.invert( (self.pos >= temp_LE).all(axis=1) * \
                     (self.pos < temp_RE).all(axis=1) )
         if round == 'first':
             del points
         # After inverting the logic above, we want points that are both
         # inside the real region, but within one padding of the boundary,
         # and this will do it.
-        self.is_inside_annulus = na.bitwise_and(self.is_inside, inner)
+        self.is_inside_annulus = np.bitwise_and(self.is_inside, inner)
         del inner
         # Below we make a mapping of real particle index->local ID
         # Unf. this has to be a dict, because any task can have
@@ -449,10 +449,10 @@
         # as the full number of particles.
         # We can skip this the first two times around.
         if round == 'third':
-            temp = na.arange(self.size)
-            my_part = na.bitwise_or(na.invert(self.is_inside), self.is_inside_annulus)
-            my_part = na.bitwise_and(my_part, (self.chainID != -1))
-            catted_indices = na.concatenate(
+            temp = np.arange(self.size)
+            my_part = np.bitwise_or(np.invert(self.is_inside), self.is_inside_annulus)
+            my_part = np.bitwise_and(my_part, (self.chainID != -1))
+            catted_indices = np.concatenate(
                 (self.index, self.index_pad))[my_part]
             self.rev_index = dict.fromkeys(catted_indices)
             self.rev_index.update(itertools.izip(catted_indices, temp[my_part]))
@@ -468,11 +468,11 @@
         keeping the all of this data, just using it.
         """
         yt_counters("densestNN")
-        self.densestNN = na.empty(self.size,dtype='int64')
+        self.densestNN = np.empty(self.size,dtype='int64')
         # We find nearest neighbors in chunks.
         chunksize = 10000
         if self.tree == 'F':
-            fKD.chunk_tags = na.asfortranarray(na.empty((self.num_neighbors, chunksize), dtype='int64'))
+            fKD.chunk_tags = np.asfortranarray(np.empty((self.num_neighbors, chunksize), dtype='int64'))
             start = 1 # Fortran counting!
             finish = 0
             while finish < self.size:
@@ -486,8 +486,8 @@
                 chunk_NNtags = (fKD.chunk_tags[:,:finish-start+1] - 1).transpose()
                 # Find the densest nearest neighbors by referencing the already
                 # calculated density.
-                n_dens = na.take(self.density,chunk_NNtags)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density,chunk_NNtags)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start + 1): # +1 for fortran counting.
                     j = start + i - 1 # -1 for fortran counting.
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -502,9 +502,9 @@
                 # be as memory efficient - fragmenting?
                 chunk_NNtags = self.kdtree.find_chunk_nearest_neighbors(start, \
                     finish, num_neighbors=self.num_neighbors)
-                n_dens = na.take(self.density, chunk_NNtags)
-                max_loc = na.argmax(n_dens, axis=1)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density, chunk_NNtags)
+                max_loc = np.argmax(n_dens, axis=1)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start):
                     j = start + i
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -520,8 +520,8 @@
         """
         yt_counters("build_chains")
         chainIDmax = 0
-        self.densest_in_chain = na.ones(10000, dtype='float64') * -1 # chainID->density, one to one
-        self.densest_in_chain_real_index = na.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
+        self.densest_in_chain = np.ones(10000, dtype='float64') * -1 # chainID->density, one to one
+        self.densest_in_chain_real_index = np.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
         for i in xrange(int(self.size)):
             # If it's already in a group, move on, or if this particle is
             # in the padding, move on because chains can only terminate in
@@ -536,7 +536,7 @@
             # in the next loop.
             if chainIDnew == chainIDmax:
                 chainIDmax += 1
-        self.padded_particles = na.array(self.padded_particles, dtype='int64')
+        self.padded_particles = np.array(self.padded_particles, dtype='int64')
         self.densest_in_chain = self.__clean_up_array(self.densest_in_chain)
         self.densest_in_chain_real_index = self.__clean_up_array(self.densest_in_chain_real_index)
         yt_counters("build_chains")
@@ -598,9 +598,9 @@
         yt_counters("preconnect_chains")
         yt_counters("local chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] = na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] = np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -626,8 +626,8 @@
         elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
             for i in xrange(self.size):
@@ -685,7 +685,7 @@
         # link is to itself. At that point we've found the densest chain
         # in this set of sets and we keep a record of that.
         yt_counters("preconnect pregrouping.")
-        final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
+        final_chain_map = np.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
         for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
@@ -701,9 +701,9 @@
                 self.chainID[i] = final_chain_map[self.chainID[i]]
         del final_chain_map
         # Now make the chainID assignments consecutive.
-        map = na.empty(self.densest_in_chain.size, dtype='int64')
-        dic_new = na.empty(chain_count - removed, dtype='float64')
-        dicri_new = na.empty(chain_count - removed, dtype='int64')
+        map = np.empty(self.densest_in_chain.size, dtype='int64')
+        dic_new = np.empty(chain_count - removed, dtype='float64')
+        dicri_new = np.empty(chain_count - removed, dtype='int64')
         new = 0
         for i,dic in enumerate(self.densest_in_chain):
             if dic > 0:
@@ -763,9 +763,9 @@
         mylog.info("Sorting chains...")
         yt_counters("global chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] =na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] =np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -779,14 +779,14 @@
         mylog.info("Pre-linking chains 'by hand'...")
         yt_counters("global chain hand-linking.")
         # If there are no repeats, we can skip this mess entirely.
-        uniq = na.unique(self.densest_in_chain_real_index)
+        uniq = np.unique(self.densest_in_chain_real_index)
         if uniq.size != self.densest_in_chain_real_index.size:
             # Find only the real particle indices that are repeated to reduce
             # the dict workload below.
             dicri = self.densest_in_chain_real_index[self.densest_in_chain_real_index.argsort()]
-            diff = na.ediff1d(dicri)
+            diff = np.ediff1d(dicri)
             diff = (diff == 0) # Picks out the places where the ids are equal
-            diff = na.concatenate((diff, [False])) # Makes it the same length
+            diff = np.concatenate((diff, [False])) # Makes it the same length
             # This has only the repeated IDs. Sets are faster at searches than
             # arrays.
             dicri = set(dicri[diff])
@@ -837,11 +837,11 @@
         for opp_neighbor in self.neighbors:
             opp_size = self.global_padded_count[opp_neighbor]
             to_recv_count += opp_size
-            temp_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            temp_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            temp_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            temp_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # The arrays we'll actually keep around...
-        self.recv_real_indices = na.empty(to_recv_count, dtype='int64')
-        self.recv_chainIDs = na.empty(to_recv_count, dtype='int64')
+        self.recv_real_indices = np.empty(to_recv_count, dtype='int64')
+        self.recv_chainIDs = np.empty(to_recv_count, dtype='int64')
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -899,9 +899,9 @@
         """
         yt_counters("connect_chains_across_tasks")
         # Remote (lower dens) chain -> local (higher) chain.
-        chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
+        chainID_translate_map_local = np.arange(self.nchains, dtype='int64')
         # Build the stuff to send.
-        self.uphill_real_indices = na.concatenate((
+        self.uphill_real_indices = np.concatenate((
             self.index, self.index_pad))[self.padded_particles]
         self.uphill_chainIDs = self.chainID[self.padded_particles]
         del self.padded_particles
@@ -991,7 +991,7 @@
         """
         yt_counters("communicate_annulus_chainIDs")
         # Pick the particles in the annulus.
-        real_indices = na.concatenate(
+        real_indices = np.concatenate(
             (self.index, self.index_pad))[self.is_inside_annulus]
         chainIDs = self.chainID[self.is_inside_annulus]
         # We're done with this here.
@@ -1012,8 +1012,8 @@
         recv_chainIDs = dict.fromkeys(self.neighbors)
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -1062,8 +1062,8 @@
         # Plus 2 because we're looking for that neighbor, but only keeping 
         # nMerge + 1 neighbor tags, skipping ourselves.
         if self.tree == 'F':
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge+2
         elif self.tree == 'C':
@@ -1160,9 +1160,9 @@
                 top_keys.append(top_key)
                 bot_keys.append(bot_key)
                 vals.append(data[top_key][bot_key])
-        top_keys = na.array(top_keys, dtype='int64')
-        bot_keys = na.array(bot_keys, dtype='int64')
-        vals = na.array(vals, dtype='float64')
+        top_keys = np.array(top_keys, dtype='int64')
+        bot_keys = np.array(bot_keys, dtype='int64')
+        vals = np.array(vals, dtype='float64')
 
         data.clear()
 
@@ -1179,14 +1179,14 @@
         # We need to find out which pairs of self.top_keys, self.bot_keys are
         # both < self.peakthresh, and create arrays that will store this
         # relationship.
-        both = na.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
+        both = np.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
             (self.densest_in_chain[self.bot_keys] < self.peakthresh))
         g_high = self.top_keys[both]
         g_low = self.bot_keys[both]
         g_dens = self.vals[both]
         del both
-        self.reverse_map = na.ones(self.densest_in_chain.size) * -1
-        densestbound = na.ones(self.densest_in_chain.size) * -1.0
+        self.reverse_map = np.ones(self.densest_in_chain.size) * -1
+        densestbound = np.ones(self.densest_in_chain.size) * -1.0
         for i, gl in enumerate(g_low):
             if g_dens[i] > densestbound[gl]:
                 densestbound[gl] = g_dens[i]
@@ -1200,7 +1200,7 @@
             if self.densest_in_chain[chainID] >= self.peakthresh:
                 self.reverse_map[chainID] = groupID
                 groupID += 1
-        group_equivalancy_map = na.empty(groupID, dtype='object')
+        group_equivalancy_map = np.empty(groupID, dtype='object')
         for i in xrange(groupID):
             group_equivalancy_map[i] = set([])
         # Loop over all of the chain linkages.
@@ -1259,7 +1259,7 @@
         # Shack.'
         Set_list = []
         # We only want the holes that are modulo mine.
-        keys = na.arange(groupID, dtype='int64')
+        keys = np.arange(groupID, dtype='int64')
         size = self.comm.size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
@@ -1298,7 +1298,7 @@
         del group_equivalancy_map, final_set, keys, select, groupIDs, current_sets
         del mine_groupIDs, not_mine_groupIDs, new_set, to_add_set, liter
         # Convert this list of sets into a look-up table
-        lookup = na.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
+        lookup = np.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
         for i,item in enumerate(Set_list):
             item_min = min(item)
             for groupID in item:
@@ -1353,7 +1353,7 @@
             # There are no groups, probably.
             pass
         # Make a secondary map to make the IDs consecutive.
-        values = na.arange(len(temp))
+        values = np.arange(len(temp))
         secondary_map = dict(itertools.izip(temp, values))
         del values
         # Update reverse_map
@@ -1386,8 +1386,8 @@
                 self.chainID[i] = -1
         del self.is_inside
         # Create a densest_in_group, analogous to densest_in_chain.
-        keys = na.arange(group_count)
-        vals = na.zeros(group_count)
+        keys = np.arange(group_count)
+        vals = np.zeros(group_count)
         self.densest_in_group = dict(itertools.izip(keys,vals))
         self.densest_in_group_real_index = self.densest_in_group.copy()
         del keys, vals
@@ -1409,12 +1409,12 @@
         velocity, to save time in HaloFinding.py (fewer barriers!).
         """
         select = (self.chainID != -1)
-        calc = len(na.where(select == True)[0])
-        loc = na.empty((calc, 3), dtype='float64')
+        calc = len(np.where(select == True)[0])
+        loc = np.empty((calc, 3), dtype='float64')
         if self.tree == 'F':
-            loc[:, 0] = na.concatenate((self.xpos, self.xpos_pad))[select]
-            loc[:, 1] = na.concatenate((self.ypos, self.ypos_pad))[select]
-            loc[:, 2] = na.concatenate((self.zpos, self.zpos_pad))[select]
+            loc[:, 0] = np.concatenate((self.xpos, self.xpos_pad))[select]
+            loc[:, 1] = np.concatenate((self.ypos, self.ypos_pad))[select]
+            loc[:, 2] = np.concatenate((self.zpos, self.zpos_pad))[select]
             self.__max_memory()
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
         elif self.tree == 'C':
@@ -1424,15 +1424,15 @@
         # I think this will be faster than several vector operations that need
         # to pull the entire chainID array out of memory several times.
         yt_counters("max dens point")
-        max_dens_point = na.zeros((self.group_count,4),dtype='float64')
-        for i,part in enumerate(na.arange(self.size)[select]):
+        max_dens_point = np.zeros((self.group_count,4),dtype='float64')
+        for i,part in enumerate(np.arange(self.size)[select]):
             groupID = self.chainID[part]
             if part < self.real_size:
                 real_index = self.index[part]
             else:
                 real_index = self.index_pad[part - self.real_size]
             if real_index == self.densest_in_group_real_index[groupID]:
-                max_dens_point[groupID] = na.array([self.density[part], \
+                max_dens_point[groupID] = np.array([self.density[part], \
                 loc[i, 0], loc[i, 1], loc[i, 2]])
         del self.index, self.index_pad, self.densest_in_group_real_index
         # Now we broadcast this, effectively, with an allsum. Even though
@@ -1443,25 +1443,25 @@
         yt_counters("max dens point")
         # Now CoM.
         yt_counters("CoM")
-        CoM_M = na.zeros((self.group_count,3),dtype='float64')
-        Tot_M = na.zeros(self.group_count, dtype='float64')
-        #c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
+        CoM_M = np.zeros((self.group_count,3),dtype='float64')
+        Tot_M = np.zeros(self.group_count, dtype='float64')
+        #c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
         if calc:
-            c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
-            size = na.bincount(self.chainID[select]).astype('int64')
+            c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
+            size = np.bincount(self.chainID[select]).astype('int64')
         else:
             # This task has no particles in groups!
-            size = na.zeros(self.group_count, dtype='int64')
+            size = np.zeros(self.group_count, dtype='int64')
         # In case this task doesn't have all the groups, add trailing zeros.
         if size.size != self.group_count:
-            size = na.concatenate((size, na.zeros(self.group_count - size.size, dtype='int64')))
+            size = np.concatenate((size, np.zeros(self.group_count - size.size, dtype='int64')))
         if calc:
             cc = loc - c_vec
-            cc = cc - na.floor(cc)
-            ms = na.concatenate((self.mass, self.mass_pad))[select]
+            cc = cc - np.floor(cc)
+            ms = np.concatenate((self.mass, self.mass_pad))[select]
             # Most of the time, the masses will be all the same, and we can try
             # to save some effort.
-            ms_u = na.unique(ms)
+            ms_u = np.unique(ms)
             if ms_u.size == 1:
                 single = True
                 Tot_M = size.astype('float64') * ms_u
@@ -1475,13 +1475,13 @@
             sort = subchain.argsort()
             cc = cc[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                CoM_M[u] = na.sum(cc[marks[i]:marks[i+1]], axis=0)
+                CoM_M[u] = np.sum(cc[marks[i]:marks[i+1]], axis=0)
             if not single:
                 for i,groupID in enumerate(subchain):
                     Tot_M[groupID] += ms[i]
@@ -1490,31 +1490,31 @@
                 # Don't divide by zero.
                 if groupID in self.I_own:
                     CoM_M[groupID] /= Tot_M[groupID]
-                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
+                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - np.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
         self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
         CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
         self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
-        self.CoM = na.empty((self.group_count,3), dtype='float64')
+        self.CoM = np.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
         yt_counters("CoM")
         self.__max_memory()
         # Now we find the maximum radius for all groups.
         yt_counters("max radius")
-        max_radius = na.zeros(self.group_count, dtype='float64')
+        max_radius = np.zeros(self.group_count, dtype='float64')
         if calc:
             com = self.CoM[subchain]
-            rad = na.fabs(com - loc)
-            dist = (na.minimum(rad, self.period - rad)**2.).sum(axis=1)
+            rad = np.fabs(com - loc)
+            dist = (np.minimum(rad, self.period - rad)**2.).sum(axis=1)
             dist = dist[sort]
             for i, u in enumerate(uniq_subchain):
-                max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
+                max_radius[u] = np.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
         self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
-        self.max_radius = na.sqrt(self.max_radius)
+        self.max_radius = np.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")
         self.__max_memory()
@@ -1558,7 +1558,7 @@
         chain_count = self._build_chains()
         # This array tracks whether or not relationships for this particle
         # need to be examined twice, in preconnect_chains and in connect_chains
-        self.search_again = na.ones(self.size, dtype='bool')
+        self.search_again = np.ones(self.size, dtype='bool')
         if self.premerge:
             chain_count = self._preconnect_chains(chain_count)
         mylog.info('Gobally assigning chainIDs...')
@@ -1625,7 +1625,7 @@
         try:
             arr[key] = value
         except IndexError:
-            arr = na.concatenate((arr, na.ones(10000, dtype=type)*-1))
+            arr = np.concatenate((arr, np.ones(10000, dtype=type)*-1))
             arr[key] = value
         return arr
     


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math, time
 
 from yt.funcs import *
@@ -186,7 +186,7 @@
         f = open(self.halo_file,'r')
         line = f.readline()
         if line == "":
-            self.haloes = na.array([])
+            self.haloes = np.array([])
             return
         while line[0] == '#':
             line = f.readline()
@@ -198,16 +198,16 @@
                 self.haloes.append(float(line[self.mass_column]))
             line = f.readline()
         f.close()
-        self.haloes = na.array(self.haloes)
+        self.haloes = np.array(self.haloes)
 
     def bin_haloes(self):
         """
         With the list of virial masses, find the halo mass function.
         """
-        bins = na.logspace(self.log_mass_min,
+        bins = np.logspace(self.log_mass_min,
             self.log_mass_max,self.num_sigma_bins)
         avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = na.histogram(self.haloes,bins)
+        dis, bins = np.histogram(self.haloes,bins)
         # add right to left
         for i,b in enumerate(dis):
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
@@ -246,13 +246,13 @@
 
         # output arrays
         # 1) log10 of mass (Msolar, NOT Msolar/h)
-        self.Rarray = na.empty(self.num_sigma_bins,dtype='float64')
+        self.Rarray = np.empty(self.num_sigma_bins,dtype='float64')
         # 2) mass (Msolar/h)
-        self.logmassarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.logmassarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 3) spatial scale corresponding to that radius (Mpc/h)
-        self.massarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.massarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 4) sigma(M, z=0, where mass is in Msun/h)
-        self.sigmaarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')
 
         # get sigma_8 normalization
         R = 8.0;  # in units of Mpc/h (comoving)
@@ -305,9 +305,9 @@
         
         # output arrays
         # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-        self.dn_M_z = na.empty(self.num_sigma_bins, dtype='float64')
+        self.dn_M_z = np.empty(self.num_sigma_bins, dtype='float64')
         # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-        self.nofmz_cum = na.zeros(self.num_sigma_bins, dtype='float64')
+        self.nofmz_cum = np.zeros(self.num_sigma_bins, dtype='float64')
         
         for j in xrange(self.num_sigma_bins - 1):
             i = (self.num_sigma_bins - 2) - j
@@ -360,7 +360,7 @@
 
         Rcom = self.R;  # this is R in comoving Mpc/h
 
-        f = k*k*self.PofK(k)*na.power( abs(self.WofK(Rcom,k)), 2.0);
+        f = k*k*self.PofK(k)*np.power( abs(self.WofK(Rcom,k)), 2.0);
 
         return f
 
@@ -369,7 +369,7 @@
         /* returns power spectrum as a function of wavenumber k */
         """
 
-        thisPofK = na.power(k, self.primordial_index) * na.power( self.TofK(k), 2.0);
+        thisPofK = np.power(k, self.primordial_index) * np.power( self.TofK(k), 2.0);
 
         return thisPofK;
 
@@ -389,7 +389,7 @@
 
         x = R*k;
 
-        thisWofK = 3.0 * ( na.sin(x) - x*na.cos(x) ) / (x*x*x);
+        thisWofK = 3.0 * ( np.sin(x) - x*np.cos(x) ) / (x*x*x);
 
         return thisWofK;
 
@@ -660,22 +660,22 @@
         self.y_freestream = 17.2*self.f_hdm*(1+0.488*math.pow(self.f_hdm,-7.0/6.0))* \
             SQR(self.num_degen_hdm*self.qq/self.f_hdm);
         temp1 = math.pow(self.growth_k0, 1.0-self.p_cb);
-        temp2 = na.power(self.growth_k0/(1+self.y_freestream),0.7);
-        self.growth_cb = na.power(1.0+temp2, self.p_cb/0.7)*temp1;
-        self.growth_cbnu = na.power(na.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
+        temp2 = np.power(self.growth_k0/(1+self.y_freestream),0.7);
+        self.growth_cb = np.power(1.0+temp2, self.p_cb/0.7)*temp1;
+        self.growth_cbnu = np.power(np.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
     
         # Compute the master function
         self.gamma_eff = self.omhh*(self.alpha_gamma+(1-self.alpha_gamma)/ \
             (1+SQR(SQR(kk*self.sound_horizon_fit*0.43))));
         self.qq_eff = self.qq*self.omhh/self.gamma_eff;
     
-        tf_sup_L = na.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
-        tf_sup_C = 14.4+325/(1+60.5*na.power(self.qq_eff,1.11));
+        tf_sup_L = np.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
+        tf_sup_C = 14.4+325/(1+60.5*np.power(self.qq_eff,1.11));
         self.tf_sup = tf_sup_L/(tf_sup_L+tf_sup_C*SQR(self.qq_eff));
     
         self.qq_nu = 3.92*self.qq*math.sqrt(self.num_degen_hdm/self.f_hdm);
         self.max_fs_correction = 1+1.2*math.pow(self.f_hdm,0.64)*math.pow(self.num_degen_hdm,0.3+0.6*self.f_hdm)/ \
-            (na.power(self.qq_nu,-1.6)+na.power(self.qq_nu,0.8));
+            (np.power(self.qq_nu,-1.6)+np.power(self.qq_nu,0.8));
         self.tf_master = self.tf_sup*self.max_fs_correction;
     
         # Now compute the CDM+HDM+baryon transfer functions
@@ -707,21 +707,21 @@
     changes by less than *error*. Hopefully someday we can do something
     better than this!
     """
-    xvals = na.logspace(0,na.log10(initial_guess), initial_guess+1)-.9
+    xvals = np.logspace(0,np.log10(initial_guess), initial_guess+1)-.9
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
-    # Trapezoid rule, but with different dxes between values, so na.trapz
+    # Trapezoid rule, but with different dxes between values, so np.trapz
     # will not work.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area0 = na.sum(areas)
+    area0 = np.sum(areas)
     # Next guess.
     next_guess = 10 * initial_guess
-    xvals = na.logspace(0,na.log10(next_guess), 2*initial_guess**2+1)-.99
+    xvals = np.logspace(0,np.log10(next_guess), 2*initial_guess**2+1)-.99
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
     # Trapezoid rule.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area1 = na.sum(areas)
+    area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
     area_final = area1
@@ -729,12 +729,12 @@
     one_pow = 3
     while diff > error:
         next_guess *= 10
-        xvals = na.logspace(0,na.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
+        xvals = np.logspace(0,np.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
         yvals = fcn(xvals)
         xdiffs = xvals[1:] - xvals[:-1]
         # Trapezoid rule.
         areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-        area_next = na.sum(areas)
+        area_next = np.sum(areas)
         diff = area_next - area_last
         area_last = area_next
         one_pow+=1


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -41,7 +41,7 @@
 # 8. Parentage is described by a fraction of particles that pass from one to
 #    the other; we have both descendent fractions and ancestory fractions. 
 
-import numpy as na
+import numpy as np
 import h5py
 import time
 import pdb
@@ -119,7 +119,7 @@
             x,y,z = [float(f) for f in line.split(None, 3)[:-1]]
             hp.append([x,y,z])
         if hp != []:
-            self.halo_positions = na.array(hp)
+            self.halo_positions = np.array(hp)
             self.halo_kdtree = KDTree(self.halo_positions)
         else:
             self.halo_positions = None
@@ -158,7 +158,7 @@
 class HaloParticleList(object):
     def __init__(self, halo_id, position, particle_ids):
         self.halo_id = halo_id
-        self.position = na.array(position)
+        self.position = np.array(position)
         self.particle_ids = particle_ids
         self.number_of_particles = particle_ids.size
 
@@ -168,7 +168,7 @@
     def find_relative_parentage(self, child):
         # Return two values: percent this halo gave to the other, and percent
         # of the other that comes from this halo
-        overlap = na.intersect1d(self.particle_ids, child.particle_ids).size
+        overlap = np.intersect1d(self.particle_ids, child.particle_ids).size
         of_child_from_me = float(overlap)/child.particle_ids.size
         of_mine_from_me = float(overlap)/self.particle_ids.size
         return of_child_from_me, of_mine_from_me


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os, glob, time, gc, md5, sys
 import h5py
 import types
@@ -171,7 +171,7 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
-        self.with_halos = na.ones(len(restart_files), dtype='bool')
+        self.with_halos = np.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
         self.halo_finder_function = halo_finder_function # which halo finder to use
         self.halo_finder_threshold = halo_finder_threshold # overdensity threshold
@@ -346,7 +346,7 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            child_points = na.array(child_points)
+            child_points = np.array(child_points)
             kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
@@ -362,7 +362,7 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                query = na.array([row[1] / self.period[0],
+                query = np.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
                 NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
@@ -387,7 +387,7 @@
         # The +1 is an extra element in the array that collects garbage
         # values. This is allowing us to eliminate a try/except later.
         # This extra array element will be cut off eventually.
-        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+        self.child_mass_arr = np.zeros(len(candidates)*NumNeighbors + 1,
             dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
@@ -449,19 +449,19 @@
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
                         parent_IDs.append(thisIDs)
                         parent_masses.append(thisMasses)
-                        parent_halos.append(na.ones(len(thisIDs),
+                        parent_halos.append(np.ones(len(thisIDs),
                             dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
             # Sort the arrays by particle index in ascending order.
             if len(parent_IDs)==0:
-                parent_IDs = na.array([], dtype='int64')
-                parent_masses = na.array([], dtype='float64')
-                parent_halos = na.array([], dtype='int32')
+                parent_IDs = np.array([], dtype='int64')
+                parent_masses = np.array([], dtype='float64')
+                parent_halos = np.array([], dtype='int32')
             else:
-                parent_IDs = na.concatenate(parent_IDs).astype('int64')
-                parent_masses = na.concatenate(parent_masses).astype('float64')
-                parent_halos = na.concatenate(parent_halos).astype('int32')
+                parent_IDs = np.concatenate(parent_IDs).astype('int64')
+                parent_masses = np.concatenate(parent_masses).astype('float64')
+                parent_halos = np.concatenate(parent_halos).astype('int32')
                 sort = parent_IDs.argsort()
                 parent_IDs = parent_IDs[sort]
                 parent_masses = parent_masses[sort]
@@ -471,7 +471,7 @@
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
-        parent_send = na.ones(parent_IDs.size, dtype='bool')
+        parent_send = np.ones(parent_IDs.size, dtype='bool')
 
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
@@ -488,26 +488,26 @@
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
                     child_IDs.append(thisIDs)
                     child_masses.append(thisMasses)
-                    child_halos.append(na.ones(len(thisIDs),
+                    child_halos.append(np.ones(len(thisIDs),
                         dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
         # Sort the arrays by particle index in ascending order.
         if len(child_IDs)==0:
-            child_IDs = na.array([], dtype='int64')
-            child_masses = na.array([], dtype='float64')
-            child_halos = na.array([], dtype='int32')
+            child_IDs = np.array([], dtype='int64')
+            child_masses = np.array([], dtype='float64')
+            child_halos = np.array([], dtype='int32')
         else:
-            child_IDs = na.concatenate(child_IDs).astype('int64')
-            child_masses = na.concatenate(child_masses)
-            child_halos = na.concatenate(child_halos)
+            child_IDs = np.concatenate(child_IDs).astype('int64')
+            child_masses = np.concatenate(child_masses)
+            child_halos = np.concatenate(child_halos)
             sort = child_IDs.argsort()
             child_IDs = child_IDs[sort]
             child_masses = child_masses[sort]
             child_halos = child_halos[sort]
             del sort
         
-        child_send = na.ones(child_IDs.size, dtype='bool')
+        child_send = np.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,
@@ -620,8 +620,8 @@
     def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
             parent_masses, parent_send = None, child_send = None):
         # Pick out IDs that are in both arrays.
-        parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
-        child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+        parent_in_child = np.in1d(parent_IDs, child_IDs, assume_unique = True)
+        child_in_parent = np.in1d(child_IDs, parent_IDs, assume_unique = True)
         # Pare down the arrays to just matched particle IDs.
         parent_halos_cut = parent_halos[parent_in_child]
         child_halos_cut = child_halos[child_in_parent]


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -24,7 +24,7 @@
 """
 
 from copy import deepcopy
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -105,11 +105,11 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = na.log10(temp_profile[field])
+            temp_profile[field] = np.log10(temp_profile[field])
 
     virial = dict((field, 0.0) for field in fields)
 
-    if (not (na.array(overDensity) >= virial_overdensity).any()) and \
+    if (not (np.array(overDensity) >= virial_overdensity).any()) and \
             must_be_virialized:
         mylog.debug("This halo is not virialized!")
         return [False, {}]
@@ -123,7 +123,7 @@
     elif (overDensity[-1] >= virial_overdensity):
         index = -2
     else:
-        for q in (na.arange(len(overDensity),0,-1)-1):
+        for q in (np.arange(len(overDensity),0,-1)-1):
             if (overDensity[q] < virial_overdensity) and (overDensity[q-1] >= virial_overdensity):
                 index = q - 1
                 break
@@ -144,7 +144,7 @@
 
     if use_log:
         for field in virial.keys():
-            virial[field] = na.power(10, virial[field])
+            virial[field] = np.power(10, virial[field])
 
     for vfilter in virial_filters:
         if eval("%s %s %s" % (virial[vfilter[0]],vfilter[1],vfilter[2])):


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os
 import h5py
 import types
@@ -684,7 +684,7 @@
                 max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
                                                                                  lazy_reader=True)
                 max_grid = self.pf.h.grids[mg]
-                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                max_cell = np.unravel_index(maxi, max_grid.ActiveDimensions)
                 sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
                                                              max_grid['y-velocity'][max_cell],
                                                              max_grid['z-velocity'][max_cell]])
@@ -845,7 +845,7 @@
                               (self.projection_output_dir, halo['id'],
                                dataset_name, axis_labels[w])
                             if (frb[hp['field']] != 0).any():
-                                write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                                write_image(np.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
                             else:
                                 mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
                                             (hp['field'], halo['id']))
@@ -1076,7 +1076,7 @@
                     profile[field].append(float(onLine[q]))
 
         for field in fields:
-            profile[field] = na.array(profile[field])
+            profile[field] = np.array(profile[field])
 
         profile_obj._data = profile
 
@@ -1171,7 +1171,7 @@
         for halo in self.filtered_halos:
             for halo_field in halo_fields:
                 if isinstance(halo[halo_field], types.ListType):
-                    field_data = na.array(halo[halo_field])
+                    field_data = np.array(halo[halo_field])
                     field_data.tofile(out_file, sep="\t", format=format)
                 else:
                     if halo_field == 'id':
@@ -1179,7 +1179,7 @@
                     else:
                         out_file.write("%s" % halo[halo_field])
                 out_file.write("\t")
-            field_data = na.array([halo[field] for field in fields])
+            field_data = np.array([halo[field] for field in fields])
             field_data.tofile(out_file, sep="\t", format=format)
             out_file.write("\n")
         out_file.close()
@@ -1207,7 +1207,7 @@
             value_list = []
             for halo in self.filtered_halos:
                 value_list.append(halo[halo_field])
-            value_list = na.array(value_list)
+            value_list = np.array(value_list)
             out_file.create_dataset(halo_field, data=value_list)
         out_file.close()
 
@@ -1215,7 +1215,7 @@
         fid = open(filename, "w")
         fields = [field for field in sorted(profile.keys()) if field != "UsedBins"]
         fid.write("\t".join(["#"] + fields + ["\n"]))
-        field_data = na.array([profile[field] for field in fields])
+        field_data = np.array([profile[field] for field in fields])
         for line in range(field_data.shape[1]):
             field_data[:, line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -1300,17 +1300,17 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px,
+        plot.field_data['px'] = np.concatenate([plot['px'], add_x_px, add_y_px,
                                                 add2_x_px, add2_y_px])
-        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py,
+        plot.field_data['py'] = np.concatenate([plot['py'], add_x_py, add_y_py,
                                                 add2_x_py, add2_y_py])
-        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
+        plot.field_data['pdx'] = np.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
                                                  add2_x_pdx, add2_y_pdx])
-        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
+        plot.field_data['pdy'] = np.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
                                                  add2_x_pdy, add2_y_pdy])
-        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field,
+        plot.field_data[field] = np.concatenate([plot[field], add_x_field, add_y_field,
                                                  add2_x_field, add2_y_field])
-        plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
+        plot.field_data['weight_field'] = np.concatenate([plot['weight_field'],
                                                           add_x_weight_field, add_y_weight_field,
                                                           add2_x_weight_field, add2_y_weight_field])
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
@@ -24,7 +24,7 @@
 """
 
 import h5py, os.path
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.data_containers import YTFieldData
@@ -57,7 +57,7 @@
         self.Level = level
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
-        self.start_index = na.min([grid.get_global_startindex() for grid in
+        self.start_index = np.min([grid.get_global_startindex() for grid in
                              base_pf.h.select_grids(level)], axis=0).astype('int64')
         self.dds = base_pf.h.select_grids(level)[0].dds.copy()
         dims = (self.RightEdge-self.LeftEdge)/self.dds
@@ -106,11 +106,11 @@
         self.pf = pf
         self.always_copy = always_copy
         self.min_level = min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                              pf.h.select_grids(min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.h.select_grids(min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                                    pf.h.select_grids(min_level)], axis=0).astype('float64')
         if offset is None: offset = (max_right + min_left)/2.0
         self.left_edge_offset = offset
@@ -151,7 +151,7 @@
         # Grid objects on this level...
         if grids is None: grids = self.pf.h.select_grids(level+self.min_level)
         level_node.attrs['delta'] = grids[0].dds*self.mult_factor
-        level_node.attrs['relativeRefinementFactor'] = na.array([2]*3, dtype='int32')
+        level_node.attrs['relativeRefinementFactor'] = np.array([2]*3, dtype='int32')
         level_node.attrs['numGrids'] = len(grids)
         for i,g in enumerate(grids):
             self.export_grid(afile, level_node, g, i, field)
@@ -169,8 +169,8 @@
         int_origin, lint, origin, dds = self._convert_grid(grid)
         grid_node.attrs['integerOrigin'] = int_origin
         grid_node.attrs['origin'] = origin
-        grid_node.attrs['ghostzoneFlags'] = na.zeros(6, dtype='int32')
-        grid_node.attrs['numGhostzones'] = na.zeros(3, dtype='int32')
+        grid_node.attrs['ghostzoneFlags'] = np.zeros(6, dtype='int32')
+        grid_node.attrs['numGhostzones'] = np.zeros(3, dtype='int32')
         grid_node.attrs['dims'] = grid.ActiveDimensions[::-1].astype('int32')
         if not self.always_copy and self.pf.h.data_style == 6 \
            and field in self.pf.h.field_list:
@@ -203,11 +203,11 @@
         # First we set up our translation between original and extracted
         self.data_style = data_style
         self.min_level = pf.min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
         level_dx = pf.base_pf.h.select_grids(pf.min_level)[0].dds[0]
         dims = ((max_right-min_left)/level_dx)
@@ -247,12 +247,12 @@
         # Here we need to set up the grid info, which for the Enzo hierarchy
         # is done like:
         # self.grid_dimensions.flat[:] = ei
-        # self.grid_dimensions -= na.array(si, self.float_type)
+        # self.grid_dimensions -= np.array(si, self.float_type)
         # self.grid_dimensions += 1
         # self.grid_left_edge.flat[:] = LE
         # self.grid_right_edge.flat[:] = RE
         # self.grid_particle_count.flat[:] = np
-        # self.grids = na.array(self.grids, dtype='object')
+        # self.grids = np.array(self.grids, dtype='object')
         #
         # For now, we make the presupposition that all of our grids are
         # strictly nested and we are not doing any cuts.  However, we do
@@ -285,7 +285,7 @@
 
         self.grid_left_edge = self._convert_coords(self.grid_left_edge)
         self.grid_right_edge = self._convert_coords(self.grid_right_edge)
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
 
     def _fill_grid_arrays(self, grid, i):
         # This just fills in the grid arrays for a single grid --


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -22,7 +22,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/level_sets/clump_tools.py
--- a/yt/analysis_modules/level_sets/clump_tools.py
+++ b/yt/analysis_modules/level_sets/clump_tools.py
@@ -23,8 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
-nar = na.array
+import numpy as np
+nar = np.array
 
 counter = 0
 def recursive_all_clumps(clump,list,level,parentnumber):
@@ -89,7 +89,7 @@
     yt.visualization.plot_modification.ClumpContourCallback"""
     minDensity = [c['Density'].min() for c in clump_list]
     
-    args = na.argsort(minDensity)
+    args = np.argsort(minDensity)
     list = nar(clump_list)[args]
     reverse = range(list.size-1,-1,-1)
     return list[reverse]


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -24,7 +24,7 @@
 """
 
 from itertools import chain
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.data_point_utilities as data_point_utilities
@@ -63,12 +63,12 @@
     tr = []
     for k in joins.keys():
         v = joins.pop(k)
-        tr.append((k, na.array(list(v), dtype="int64")))
+        tr.append((k, np.array(list(v), dtype="int64")))
     return tr
 
 def identify_contours(data_source, field, min_val, max_val,
                           cached_fields=None):
-    cur_max_id = na.sum([g.ActiveDimensions.prod() for g in data_source._grids])
+    cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
     pbar = get_pbar("First pass", len(data_source._grids))
     grids = sorted(data_source._grids, key=lambda g: -g.Level)
     total_contours = 0
@@ -76,27 +76,27 @@
     for gi,grid in enumerate(grids):
         pbar.update(gi+1)
         cm = data_source._get_cut_mask(grid)
-        if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='bool')
+        if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
         old_field_parameters = grid.field_parameters
         grid.field_parameters = data_source.field_parameters
-        local_ind = na.where( (grid[field] > min_val)
+        local_ind = np.where( (grid[field] > min_val)
                             & (grid[field] < max_val) & cm )
         grid.field_parameters = old_field_parameters
         if local_ind[0].size == 0: continue
-        kk = na.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
-        grid["tempContours"] = na.ones(grid.ActiveDimensions, dtype='int64') * -1
+        kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
+        grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
         grid["tempContours"][local_ind] = kk[:]
         cur_max_id -= local_ind[0].size
-        xi_u,yi_u,zi_u = na.where(grid["tempContours"] > -1)
-        cor_order = na.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
+        xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
+        cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
         fd_orig = grid["tempContours"].copy()
         xi = xi_u[cor_order]
         yi = yi_u[cor_order]
         zi = zi_u[cor_order]
         while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
             pass
-        total_contours += na.unique(grid["tempContours"][grid["tempContours"] > -1]).size
-        new_contours = na.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
+        total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
+        new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
         tree += zip(new_contours, new_contours)
     tree = set(tree)
     pbar.finish()
@@ -110,10 +110,10 @@
         boundary_tree = amr_utils.construct_boundary_relationships(fd)
         tree.update(((a, b) for a, b in boundary_tree))
     pbar.finish()
-    sort_new = na.array(list(tree), dtype='int64')
+    sort_new = np.array(list(tree), dtype='int64')
     mylog.info("Coalescing %s joins", sort_new.shape[0])
     joins = coalesce_join_tree(sort_new)
-    #joins = [(i, na.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
+    #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
     pbar = get_pbar("Joining ", len(joins))
     # This process could and should be done faster
     print "Joining..."
@@ -136,9 +136,9 @@
     data_source.get_data("tempContours", in_grids=True)
     contour_ind = {}
     i = 0
-    for contour_id in na.unique(data_source["tempContours"]):
+    for contour_id in np.unique(data_source["tempContours"]):
         if contour_id == -1: continue
-        contour_ind[i] = na.where(data_source["tempContours"] == contour_id)
+        contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
         mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
         i += 1
     mylog.info("Identified %s contours between %0.5e and %0.5e",


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/radial_column_density/radial_column_density.py
--- a/yt/analysis_modules/radial_column_density/radial_column_density.py
+++ b/yt/analysis_modules/radial_column_density/radial_column_density.py
@@ -105,14 +105,14 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.center = na.asarray(center)
+        self.center = np.asarray(center)
         self.max_radius = max_radius
         self.steps = steps
         self.base = base
         self.Nside = Nside
         self.ang_divs = ang_divs
-        self.real_ang_divs = int(na.abs(ang_divs))
-        self.phi, self.theta = na.mgrid[0.0:2*na.pi:ang_divs, 0:na.pi:ang_divs]
+        self.real_ang_divs = int(np.abs(ang_divs))
+        self.phi, self.theta = np.mgrid[0.0:2*np.pi:ang_divs, 0:np.pi:ang_divs]
         self.phi1d = self.phi[:,0]
         self.theta1d = self.theta[0,:]
         self.dphi = self.phi1d[1] - self.phi1d[0]
@@ -135,20 +135,20 @@
         # but this will work for now.
         right = self.pf.domain_right_edge - self.center
         left = self.center - self.pf.domain_left_edge
-        min_r = na.min(right)
-        min_l = na.min(left)
-        self.max_radius = na.min([self.max_radius, min_r, min_l])
+        min_r = np.min(right)
+        min_l = np.min(left)
+        self.max_radius = np.min([self.max_radius, min_r, min_l])
     
     def _make_bins(self):
         # We'll make the bins start from the smallest cell size to the
         # specified radius. Column density inside the same cell as our 
         # center is kind of ill-defined, anyway.
         if self.base == 'lin':
-            self.bins = na.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
+            self.bins = np.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
                 self.steps)
         elif self.base == 'log':
-            self.bins = na.logspace(na.log10(self.pf.h.get_smallest_dx()),
-                na.log10(self.max_radius), self.steps)
+            self.bins = np.logspace(np.log10(self.pf.h.get_smallest_dx()),
+                np.log10(self.max_radius), self.steps)
     
     def _build_surfaces(self, field):
         # This will be index by bin index.
@@ -172,17 +172,17 @@
             Values of zero are found outside the maximum radius and
             in the cell of the user-specified center point.
             This setting is useful if the field is going to be logged
-            (e.g. na.log10) where zeros are inconvenient.
+            (e.g. np.log10) where zeros are inconvenient.
             Default = None
         """
         x = data['x']
         sh = x.shape
-        ad = na.prod(sh)
+        ad = np.prod(sh)
         if type(data) == type(FieldDetector()):
-            return na.ones(sh)
+            return np.ones(sh)
         y = data['y']
         z = data['z']
-        pos = na.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
+        pos = np.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
         del x, y, z
         vals = self._interpolate_value(pos)
         del pos
@@ -199,25 +199,25 @@
         # according to the points angle.
         # 1. Find the angle from the center point to the position.
         vec = pos - self.center
-        phi = na.arctan2(vec[:, 1], vec[:, 0])
+        phi = np.arctan2(vec[:, 1], vec[:, 0])
         # Convert the convention from [-pi, pi) to [0, 2pi).
         sel = (phi < 0)
-        phi[sel] += 2 * na.pi
+        phi[sel] += 2 * np.pi
         # Find the radius.
-        r = na.sqrt(na.sum(vec * vec, axis = 1))
+        r = np.sqrt(np.sum(vec * vec, axis = 1))
         # Keep track of the points outside of self.max_radius, which we'll
         # handle separately before we return.
         outside = (r > self.max_radius)
-        theta = na.arccos(vec[:, 2] / r)
+        theta = np.arccos(vec[:, 2] / r)
         # 2. Find the bin for this position.
-        digi = na.digitize(r, self.bins)
+        digi = np.digitize(r, self.bins)
         # Find the values on the inner and outer surfaces.
-        in_val = na.zeros_like(r)
-        out_val = na.zeros_like(r)
+        in_val = np.zeros_like(r)
+        out_val = np.zeros_like(r)
         # These two will be used for interpolation.
-        in_r = na.zeros_like(r)
-        out_r = na.zeros_like(r)
-        for bin in na.unique(digi):
+        in_r = np.zeros_like(r)
+        out_r = np.zeros_like(r)
+        for bin in np.unique(digi):
             sel = (digi == bin)
             # Special case if we're outside the largest sphere.
             if bin == len(self.bins):
@@ -229,7 +229,7 @@
                 continue
             # Special case if we're inside the smallest sphere.
             elif bin == 0:
-                in_val[sel] = na.zeros_like(phi[sel])
+                in_val[sel] = np.zeros_like(phi[sel])
                 in_r[sel] = 0.
                 out_val[sel] = self._interpolate_surface_value(1,
                     phi[sel], theta[sel])
@@ -244,11 +244,11 @@
                     phi[sel], theta[sel])
                 out_r[sel] = self.bins[bin]
         # Interpolate using a linear fit in column density / r space.
-        val = na.empty_like(r)
+        val = np.empty_like(r)
         # Special case for inside smallest sphere.
         sel = (digi == 0)
         val[sel] = (1. - (out_r[sel] - r[sel]) / out_r[sel]) * out_val[sel]
-        na.invert(sel, sel) # In-place operation!
+        np.invert(sel, sel) # In-place operation!
         val[sel] = (out_val[sel] - in_val[sel]) / (out_r[sel] - in_r[sel]) * \
             (r[sel] - in_r[sel]) + in_val[sel]
         # Fix the things to zero that should be zero.
@@ -259,8 +259,8 @@
         # Given a surface bin and an angle, interpolate the value on
         # that surface to the angle.
         # 1. Find the four values closest to the angle.
-        phi_bin = na.digitize(phi, self.phi1d)
-        theta_bin = na.digitize(theta, self.theta1d)
+        phi_bin = np.digitize(phi, self.phi1d)
+        theta_bin = np.digitize(theta, self.theta1d)
         val00 = self.surfaces[bin][phi_bin - 1, theta_bin - 1]
         val01 = self.surfaces[bin][phi_bin - 1, theta_bin]
         val10 = self.surfaces[bin][phi_bin, theta_bin - 1]


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -47,18 +47,18 @@
 
         self.bounds = bounds
         self.ev_bounds = ev_bounds
-        self.ev_vals = na.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
+        self.ev_vals = np.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
         
     def _get_interpolator(self, ev_min, ev_max):
         """
         Integrates from ev_min to ev_max and returns an interpolator.
         """
-        e_is, e_ie = na.digitize([ev_min, ev_max], self.ev_vals)
-        bin_table = na.trapz(self.table[...,e_is-1:e_ie],
+        e_is, e_ie = np.digitize([ev_min, ev_max], self.ev_vals)
+        bin_table = np.trapz(self.table[...,e_is-1:e_ie],
                              2.41799e17*
             (self.ev_vals[e_is:e_ie+1]-self.ev_vals[e_is-1:e_is]),
                              axis=-1)
-        bin_table = na.log10(bin_table.clip(1e-80,bin_table.max()))
+        bin_table = np.log10(bin_table.clip(1e-80,bin_table.max()))
         return BilinearFieldInterpolator(
             bin_table, self.bounds, self.field_names[:],
             truncate=True)
@@ -73,8 +73,8 @@
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : na.log10(data["NumberDensity"]),
-                  'Temperature'   : na.log10(data["Temperature"])}
+            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+                  'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
@@ -91,8 +91,8 @@
     e_n_bins, e_min, e_max = e_spec
     T_n_bins, T_min, T_max = T_spec
     # The second one is the fast-varying one
-    rho_is, e_is = na.mgrid[0:rho_n_bins,0:e_n_bins]
-    table = na.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
+    rho_is, e_is = np.mgrid[0:rho_n_bins,0:e_n_bins]
+    table = np.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
     mylog.info("Parsing Cloudy files")
     for i,ri,ei in zip(range(rho_n_bins*e_n_bins), rho_is.ravel(), e_is.ravel()):
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 import math, itertools
 
@@ -66,8 +66,8 @@
         """
         self._pf = pf
         self._data_source = data_source
-        self.star_mass = na.array(star_mass)
-        self.star_creation_time = na.array(star_creation_time)
+        self.star_mass = np.array(star_mass)
+        self.star_creation_time = np.array(star_creation_time)
         self.volume = volume
         self.bin_count = bins
         # Check to make sure we have the right set of informations.
@@ -114,13 +114,13 @@
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = na.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
-        inds = na.digitize(ct_stars, self.time_bins) - 1
+        inds = np.digitize(ct_stars, self.time_bins) - 1
         # Sum up the stars created in each time bin.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        for index in na.unique(inds):
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        for index in np.unique(inds):
             self.mass_bins[index] += sum(mass_stars[inds == index])
         # Calculate the cumulative mass sum over time by forward adding.
         self.cum_mass_bins = self.mass_bins.copy()
@@ -162,13 +162,13 @@
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])
             self.Msol_cumulative.append(self.cum_mass_bins[i])
-        self.time = na.array(self.time)
-        self.lookback_time = na.array(self.lookback_time)
-        self.redshift = na.array(self.redshift)
-        self.Msol_yr = na.array(self.Msol_yr)
-        self.Msol_yr_vol = na.array(self.Msol_yr_vol)
-        self.Msol = na.array(self.Msol)
-        self.Msol_cumulative = na.array(self.Msol_cumulative)
+        self.time = np.array(self.time)
+        self.lookback_time = np.array(self.lookback_time)
+        self.redshift = np.array(self.redshift)
+        self.Msol_yr = np.array(self.Msol_yr)
+        self.Msol_yr_vol = np.array(self.Msol_yr_vol)
+        self.Msol = np.array(self.Msol)
+        self.Msol_cumulative = np.array(self.Msol_cumulative)
     
     def write_out(self, name="StarFormationRate.out"):
         r"""Write out the star analysis to a text file *name*. The columns are in
@@ -234,10 +234,10 @@
 METAL3 = 0.2828
 METAL4 = 0.6325
 METAL5 = 1.5811
-METALS = na.array([METAL1, METAL2, METAL3, METAL4, METAL5])
+METALS = np.array([METAL1, METAL2, METAL3, METAL4, METAL5])
 
 # Translate METALS array digitize to the table dicts
-MtoD = na.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
+MtoD = np.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
 
 """
 This spectrum code is based on code from Ken Nagamine, converted from C to Python.
@@ -340,7 +340,7 @@
         >>> spec.calculate_spectrum(data_source=sp, min_age = 1.e6)
         """
         # Initialize values
-        self.final_spec = na.zeros(self.wavelength.size, dtype='float64')
+        self.final_spec = np.zeros(self.wavelength.size, dtype='float64')
         self._data_source = data_source
         if iterable(star_mass):
             self.star_mass = star_mass
@@ -372,7 +372,7 @@
                 """)
                 return None
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             if star_metallicity_fraction is not None:
                 self.star_metal = star_metallicity_fraction
@@ -382,7 +382,7 @@
             self.star_creation_time = ct[ct > 0]
             self.star_mass = self._data_source["ParticleMassMsun"][ct > 0]
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             else:
                 self.star_metal = self._data_source["metallicity_fraction"][ct > 0]
@@ -390,7 +390,7 @@
         self.star_metal /= Zsun
         # Age of star in years.
         dt = (self.time_now - self.star_creation_time * self._pf['Time']) / YEAR
-        dt = na.maximum(dt, 0.0)
+        dt = np.maximum(dt, 0.0)
         # Remove young stars
         sub = dt >= self.min_age
         if len(sub) == 0: return
@@ -398,18 +398,18 @@
         dt = dt[sub]
         self.star_creation_time = self.star_creation_time[sub]
         # Figure out which METALS bin the star goes into.
-        Mindex = na.digitize(self.star_metal, METALS)
+        Mindex = np.digitize(self.star_metal, METALS)
         # Replace the indices with strings.
         Mname = MtoD[Mindex]
         # Figure out which age bin this star goes into.
-        Aindex = na.digitize(dt, self.age)
+        Aindex = np.digitize(dt, self.age)
         # Ratios used for the interpolation.
         ratio1 = (dt - self.age[Aindex-1]) / (self.age[Aindex] - self.age[Aindex-1])
         ratio2 = (self.age[Aindex] - dt) / (self.age[Aindex] - self.age[Aindex-1])
         # Sort the stars by metallicity and then by age, which should reduce
         # memory access time by a little bit in the loop.
-        indexes = na.arange(self.star_metal.size)
-        sort = na.asarray([indexes[i] for i in na.lexsort([indexes, Aindex, Mname])])
+        indexes = np.arange(self.star_metal.size)
+        sort = np.asarray([indexes[i] for i in np.lexsort([indexes, Aindex, Mname])])
         Mname = Mname[sort]
         Aindex = Aindex[sort]
         ratio1 = ratio1[sort]
@@ -426,15 +426,15 @@
             # Get the one just before the one above.
             flux_1 = self.flux[star[0]][star[1]-1,:]
             # interpolate in log(flux), linear in time.
-            int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
+            int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux)
             # Add this flux to the total, weighted by mass.
-            self.final_spec += na.power(10., int_flux) * star[4]
+            self.final_spec += np.power(10., int_flux) * star[4]
             pbar.update(i)
         pbar.finish()    
         
         # Normalize.
-        self.total_mass = na.sum(self.star_mass)
-        self.avg_mass = na.mean(self.star_mass)
+        self.total_mass = np.sum(self.star_mass)
+        self.avg_mass = np.mean(self.star_mass)
         tot_metal = sum(self.star_metal * self.star_mass)
         self.avg_metal = math.log10(tot_metal / self.total_mass / Zsun)
 
@@ -455,25 +455,25 @@
 #             # From the flux array for this metal, and our selection, build
 #             # a new flux array just for the ages of these stars, in the 
 #             # same order as the selection of stars.
-#             this_flux = na.matrix(self.flux[metal_name][A])
+#             this_flux = np.matrix(self.flux[metal_name][A])
 #             # Make one for the last time step for each star in the same fashion
 #             # as above.
-#             this_flux_1 = na.matrix(self.flux[metal_name][A-1])
+#             this_flux_1 = np.matrix(self.flux[metal_name][A-1])
 #             # This is kind of messy, but we're going to multiply this_fluxes
 #             # by the appropriate ratios and add it together to do the 
 #             # interpolation in log(flux) and linear in time.
 #             print r1.size
-#             r1 = na.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
-#             r2 = na.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
+#             r1 = np.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
+#             r2 = np.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
 #             print this_flux_1.shape, r1.shape
-#             int_flux = na.multiply(na.log10(this_flux_1),r1) \
-#                 + na.multiply(na.log10(this_flux),r2)
+#             int_flux = np.multiply(np.log10(this_flux_1),r1) \
+#                 + np.multiply(np.log10(this_flux),r2)
 #             # Weight the fluxes by mass.
-#             sm = na.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
-#             int_flux = na.multiply(na.power(10., int_flux), sm)
+#             sm = np.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
+#             int_flux = np.multiply(np.power(10., int_flux), sm)
 #             # Sum along the columns, converting back to an array, adding
 #             # to the full spectrum.
-#             self.final_spec += na.array(int_flux.sum(axis=0))[0,:]
+#             self.final_spec += np.array(int_flux.sum(axis=0))[0,:]
 
     
     def write_out(self, name="sum_flux.out"):
@@ -518,8 +518,8 @@
         >>> spec.write_out_SED(name = "SED.out", flux_norm = 6000.)
         """
         # find the f_nu closest to flux_norm
-        fn_wavelength = na.argmin(abs(self.wavelength - flux_norm))
-        f_nu = self.final_spec * na.power(self.wavelength, 2.) / LIGHT
+        fn_wavelength = np.argmin(abs(self.wavelength - flux_norm))
+        f_nu = self.final_spec * np.power(self.wavelength, 2.) / LIGHT
         # Normalize f_nu
         self.f_nu = f_nu / f_nu[fn_wavelength]
         # Write out.


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -32,7 +32,7 @@
     pass
 
 import time
-import numpy as na
+import numpy as np
 import numpy.linalg as linalg
 import collections
 
@@ -78,14 +78,14 @@
 
     """
 
-    fc = na.array(fc)
-    fwidth = na.array(fwidth)
+    fc = np.array(fc)
+    fwidth = np.array(fwidth)
     
     #we must round the dle,dre to the nearest root grid cells
     ile,ire,super_level,ncells_wide= \
             round_ncells_wide(pf.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide)
 
-    assert na.all((ile-ire)==(ile-ire)[0])
+    assert np.all((ile-ire)==(ile-ire)[0])
     mylog.info("rounding specified region:")
     mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth)))
     mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
@@ -153,7 +153,7 @@
         print "[%03i %03i %03i] "%tuple(dre),
         print " with %i halos"%num_halos
         dle,dre = domain
-        dle, dre = na.array(dle),na.array(dre)
+        dle, dre = np.array(dle),np.array(dre)
         fn = fni 
         fn += "%03i_%03i_%03i-"%tuple(dle)
         fn += "%03i_%03i_%03i"%tuple(dre)
@@ -178,7 +178,7 @@
     dn = pf.domain_dimensions
     for halo in halo_list:
         fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir
-        dle,dre = na.floor(fle*dn), na.ceil(fre*dn)
+        dle,dre = np.floor(fle*dn), np.ceil(fre*dn)
         dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int'))
         if (dle,dre) in domains.keys():
             domains[(dle,dre)] += halo,
@@ -211,7 +211,7 @@
     del field_data
 
     #first we cast every cell as an oct
-    #ngrids = na.max([g.id for g in pf._grids])
+    #ngrids = np.max([g.id for g in pf._grids])
     grids = {}
     levels_all = {} 
     levels_finest = {}
@@ -220,13 +220,13 @@
         levels_all[l]=0
     pbar = get_pbar("Initializing octs ",len(pf.h.grids))
     for gi,g in enumerate(pf.h.grids):
-        ff = na.array([g[f] for f in fields])
+        ff = np.array([g[f] for f in fields])
         og = amr_utils.OctreeGrid(
                 g.child_index_mask.astype('int32'),
                 ff.astype("float64"),
                 g.LeftEdge.astype("float64"),
                 g.ActiveDimensions.astype("int32"),
-                na.ones(1,dtype="float64")*g.dds[0],
+                np.ones(1,dtype="float64")*g.dds[0],
                 g.Level,
                 g.id)
         grids[g.id] = og
@@ -246,11 +246,11 @@
     #oct_list =  amr_utils.OctreeGridList(grids)
     
     #initialize arrays to be passed to the recursion algo
-    o_length = na.sum(levels_all.values())
-    r_length = na.sum(levels_all.values())
-    output   = na.zeros((o_length,len(fields)), dtype='float64')
-    refined  = na.zeros(r_length, dtype='int32')
-    levels   = na.zeros(r_length, dtype='int32')
+    o_length = np.sum(levels_all.values())
+    r_length = np.sum(levels_all.values())
+    output   = np.zeros((o_length,len(fields)), dtype='float64')
+    refined  = np.zeros(r_length, dtype='int32')
+    levels   = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -332,7 +332,7 @@
         #calculate the floating point LE of the children
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
-        subgrid_ile = na.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
+        subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
         for i, (vertex,hilbert_child) in enumerate(hilbert):
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
@@ -340,7 +340,7 @@
                 subgrid = grid #we don't actually descend if we're a superlevel
                 child_ile = cell_index + vertex*2**(-level)
             else:
-                child_ile = subgrid_ile+na.array(vertex)
+                child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
                     subgrid,hilbert_child,output,refined,levels,grids,level+1,
@@ -381,17 +381,17 @@
     col_list.append(pyfits.Column("mass_metals", format='D',
                     array=fd['MetalMass'], unit="Msun"))
     # col_list.append(pyfits.Column("mass_stars", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("age_m", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("age_l", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("L_bol", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # col_list.append(pyfits.Column("L_lambda", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
@@ -402,7 +402,7 @@
                     array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
-                    array=na.zeros(size, dtype='D')))
+                    array=np.zeros(size, dtype='D')))
     cols = pyfits.ColDefs(col_list)
     mg_table = pyfits.new_table(cols)
     mg_table.header.update("M_g_tot", tm)
@@ -411,7 +411,7 @@
     mg_table.name = "GRIDDATA"
 
     # Add a dummy Primary; might be a better way to do this!
-    col_list = [pyfits.Column("dummy", format="F", array=na.zeros(1, dtype='float32'))]
+    col_list = [pyfits.Column("dummy", format="F", array=np.zeros(1, dtype='float32'))]
     cols = pyfits.ColDefs(col_list)
     md_table = pyfits.new_table(cols)
     md_table.header.update("snaptime", pf.current_time*pf['years'])
@@ -437,12 +437,12 @@
 
 def round_ncells_wide(dds,fle,fre,nwide=None):
     fc = (fle+fre)/2.0
-    assert na.all(fle < fc)
-    assert na.all(fre > fc)
-    ic = na.rint(fc*dds) #nearest vertex to the center
+    assert np.all(fle < fc)
+    assert np.all(fre > fc)
+    ic = np.rint(fc*dds) #nearest vertex to the center
     ile,ire = ic.astype('int'),ic.astype('int')
     cfle,cfre = fc.copy(),fc.copy()
-    idx = na.array([0,0,0]) #just a random non-equal array
+    idx = np.array([0,0,0]) #just a random non-equal array
     width = 0.0
     if nwide is None:
         #expand until borders are included and
@@ -450,41 +450,41 @@
         idxq,out=False,True
         while not out or not idxq:
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 0.1/dds
             #quit if idxq is true:
-            idxq = idx[0]>0 and na.all(idx==idx[0])
-            out  = na.all(fle>cfle) and na.all(fre<cfre) 
+            idxq = idx[0]>0 and np.all(idx==idx[0])
+            out  = np.all(fle>cfle) and np.all(fre<cfre) 
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
         #expand until we are nwide cells span
-        while not na.all(idx==nwide):
-            assert na.any(idx<=nwide)
+        while not np.all(idx==nwide):
+            assert np.any(idx<=nwide)
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 1e-2*1.0/dds
-    assert na.all(idx==nwide)
+    assert np.all(idx==nwide)
     assert idx[0]>0
-    maxlevel = -na.rint(na.log2(nwide)).astype('int')
-    assert abs(na.log2(nwide)-na.rint(na.log2(nwide)))<1e-5 #nwide should be a power of 2
+    maxlevel = -np.rint(np.log2(nwide)).astype('int')
+    assert abs(np.log2(nwide)-np.rint(np.log2(nwide)))<1e-5 #nwide should be a power of 2
     return ile,ire,maxlevel,nwide
 
 def round_nearest_edge(pf,fle,fre):
     dds = pf.domain_dimensions
-    ile = na.floor(fle*dds).astype('int')
-    ire = na.ceil(fre*dds).astype('int') 
+    ile = np.floor(fle*dds).astype('int')
+    ire = np.ceil(fre*dds).astype('int') 
     
     #this is the number of cells the super octree needs to expand to
     #must round to the nearest power of 2
-    width = na.max(ire-ile)
+    width = np.max(ire-ile)
     width = nearest_power(width)
     
-    maxlevel = -na.rint(na.log2(width)).astype('int')
+    maxlevel = -np.rint(np.log2(width)).astype('int')
     return ile,ire,maxlevel
 
 def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
@@ -497,14 +497,14 @@
         dd = pf.h.all_data()
     idx = dd["particle_type"] == star_type
     if pos is None:
-        pos = na.array([dd["particle_position_%s" % ax]
+        pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
     if vel is None:
-        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+        vel = np.array([dd["particle_velocity_%s" % ax][idx]
                         for ax in 'xyz']).transpose()
         # Velocity is cm/s, we want it to be kpc/yr
         #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
@@ -525,8 +525,8 @@
     formation_time = pf.current_time*pf['years']-age
     #create every column
     col_list = []
-    col_list.append(pyfits.Column("ID", format="J", array=na.arange(current_mass.size).astype('int32')))
-    col_list.append(pyfits.Column("parent_ID", format="J", array=na.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("ID", format="J", array=np.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("parent_ID", format="J", array=np.arange(current_mass.size).astype('int32')))
     col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
     col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
     col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
@@ -540,7 +540,7 @@
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
     #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=na.zeros(current_mass.size)))
+    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -570,7 +570,7 @@
                 / data["dynamical_time"])
         xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
                 / data["dynamical_time"])
-        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*np.exp(-xv1)))
         minitial = data["ParticleMassMsun"] / denom
         return minitial
 
@@ -698,14 +698,14 @@
     camera_positions in Sunrise.
     """
 
-    sim_center = na.array(sim_center)
+    sim_center = np.array(sim_center)
     if sim_sphere_radius is None:
         sim_sphere_radius = 10.0/pf['kpc']
     if sim_axis_short is None:
         if dd is None:
             dd = pf.h.all_data()
-        pos = na.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
+        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
+        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
         mas = dd["particle_mass"]
         pos = pos[idx]
         mas = mas[idx]
@@ -722,14 +722,14 @@
     if scene_distance is  None:
         scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
     if scene_fov is None:
-        radii = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))
+        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
         #idx= radii < sim_halo_radius*0.10
         #radii = radii[idx]
         #mass  = mas[idx] #copying mass into mas
-        si = na.argsort(radii)
+        si = np.argsort(radii)
         radii = radii[si]
         mass  = mas[si]
-        idx, = na.where(na.cumsum(mass)>mass.sum()/2.0)
+        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
         re = radii[idx[0]]
         scene_fov = 5*re
         scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
@@ -745,11 +745,11 @@
     
     #rotate the camera
     if scene_rot :
-        irotation = na.eye(3)
-    sunrise_pos = matmul(irotation,na.array(scene_position)*scene_distance) #do NOT include sim center
+        irotation = np.eye(3)
+    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
     sunrise_up  = matmul(irotation,scene_up)
     sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*na.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
+    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
 
     #change to physical kpc
     sunrise_pos *= pf['kpc']
@@ -763,11 +763,11 @@
     use this to muliply two matricies, it will think that you're
     trying to multiply by a set of vectors and all hell will break
     loose."""    
-    assert type(v) is not na.matrix
-    v = na.asarray(v)
-    m, vs = [na.asmatrix(a) for a in (m, v)]
+    assert type(v) is not np.matrix
+    v = np.asarray(v)
+    m, vs = [np.asmatrix(a) for a in (m, v)]
 
-    result = na.asarray(na.transpose(m * na.transpose(vs)))    
+    result = np.asarray(np.transpose(m * np.transpose(vs)))    
     if len(v.shape) == 1:
         return result[0]
     return result
@@ -775,14 +775,14 @@
 
 def mag(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
-        return na.sqrt( (vs**2).sum() )
-    return na.sqrt( (vs**2).sum(axis=1) )
+        return np.sqrt( (vs**2).sum() )
+    return np.sqrt( (vs**2).sum(axis=1) )
 
 def mag2(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
         return (vs**2).sum()
     return (vs**2).sum(axis=1)
@@ -791,25 +791,25 @@
 def position_moment(rs, ms=None, axes=None):
     """Find second position moment tensor.
     If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = na.asarray(rs)
+    rs = np.asarray(rs)
     Npart, N = rs.shape
-    if ms is None: ms = na.ones(Npart)
-    else: ms = na.asarray(ms)    
+    if ms is None: ms = np.ones(Npart)
+    else: ms = np.asarray(ms)    
     if axes is not None:
-        axes = na.asarray(axes,dtype=float64)
+        axes = np.asarray(axes,dtype=float64)
         axes = axes/axes.max()
         norms2 = mag2(rs/axes)
     else:
-        norms2 = na.ones(Npart)
+        norms2 = np.ones(Npart)
     M = ms.sum()
-    result = na.zeros((N,N))
+    result = np.zeros((N,N))
     # matrix is symmetric, so only compute half of it then fill in the
     # other half
     for i in range(N):
         for j in range(i+1):
             result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
         
-    result = result + result.transpose() - na.identity(N)*result
+    result = result + result.transpose() - np.identity(N)*result
     return result
     
 
@@ -826,7 +826,7 @@
     make the long axis line up with the x axis and the short axis line
     up with the x (z) axis for the 2 (3) dimensional case."""
     # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: na.sqrt(na.sum(x**2.0))
+    mag = lambda x: np.sqrt(np.sum(x**2.0))
     v = v/mag(v)
     w = w/mag(w)    
     if check:
@@ -843,7 +843,7 @@
     w_prime = euler_passive(w,phi,theta,0.)
     if w_prime[0] < 0: w_prime = -w_prime
     # Now last Euler angle should just be this:
-    psi = na.arctan2(w_prime[1],w_prime[0])
+    psi = np.arctan2(w_prime[1],w_prime[0])
     return phi, theta, psi
 
 def find_euler_phi_theta(v):
@@ -851,19 +851,19 @@
     direction"""
     # Make sure the vector is normalized
     v = v/mag(v)
-    theta = na.arccos(v[2])
-    phi = na.arctan2(v[0],-v[1])
+    theta = np.arccos(v[2])
+    phi = np.arctan2(v[0],-v[1])
     return phi,theta
 
 def euler_matrix(phi, the, psi):
     """Make an Euler transformation matrix"""
-    cpsi=na.cos(psi)
-    spsi=na.sin(psi)
-    cphi=na.cos(phi)
-    sphi=na.sin(phi)
-    cthe=na.cos(the)
-    sthe=na.sin(the)
-    m = na.mat(na.zeros((3,3)))
+    cpsi=np.cos(psi)
+    spsi=np.sin(psi)
+    cphi=np.cos(phi)
+    sphi=np.sin(phi)
+    cthe=np.cos(the)
+    sthe=np.sin(the)
+    m = np.mat(np.zeros((3,3)))
     m[0,0] = cpsi*cphi - cthe*sphi*spsi
     m[0,1] = cpsi*sphi + cthe*cphi*spsi
     m[0,2] = spsi*sthe
@@ -912,9 +912,9 @@
 cameraset_ring = collections.OrderedDict()
 
 segments = 20
-for angle in na.linspace(0,360,segments):
-    pos = [na.cos(angle),0.,na.sin(angle)]
-    vc  = [na.cos(90-angle),0.,na.sin(90-angle)] 
+for angle in np.linspace(0,360,segments):
+    pos = [np.cos(angle),0.,np.sin(angle)]
+    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
     cameraset_ring['02i'%angle]=(pos,vc)
             
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -144,10 +144,10 @@
             length_range[0] = math.sqrt(3) * self.pf.h.get_smallest_dx()
         # Make the list of ruler lengths.
         if length_type == "lin":
-            self.lengths = na.linspace(length_range[0], length_range[1],
+            self.lengths = np.linspace(length_range[0], length_range[1],
                 length_number)
         elif length_type == "log":
-            self.lengths = na.logspace(math.log10(length_range[0]),
+            self.lengths = np.logspace(math.log10(length_range[0]),
                 math.log10(length_range[1]), length_number)
         else:
             # Something went wrong.
@@ -177,7 +177,7 @@
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge
-        self.mt = na.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
+        self.mt = np.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
     
     def add_function(self, function, out_labels, sqrt, corr_norm=None):
         r"""Add a function to the list that will be evaluated at the
@@ -265,7 +265,7 @@
                 mylog.info("Doing length %1.5e" % length)
             # Things stop when this value below equals total_values.
             self.generated_points = 0
-            self.gen_array = na.zeros(self.size, dtype='int64')
+            self.gen_array = np.zeros(self.size, dtype='int64')
             self.comm_cycle_count = 0
             self.final_comm_cycle_count = 0
             self.sent_done = False
@@ -280,7 +280,7 @@
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
-                        #(na.abs(na.log10(na.abs(self.recv_points))) > 20).any():
+                        #(np.abs(np.log10(np.abs(self.recv_points))) > 20).any():
                     raise ValueError("self.recv_points is no good!")
                 self.points = self.recv_points.copy()
                 self.fields_vals = self.recv_fields_vals.copy()
@@ -312,7 +312,7 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        fKD.pos = na.asfortranarray(na.empty((3,xp.size), dtype='float64'))
+        fKD.pos = np.asfortranarray(np.empty((3,xp.size), dtype='float64'))
         # Normalize the grid points only within the kdtree.
         fKD.pos[0, :] = xp[:] / self.period[0]
         fKD.pos[1, :] = yp[:] / self.period[1]
@@ -332,8 +332,8 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        self.sizes = [na.unique(xp).size, na.unique(yp).size, na.unique(zp).size]        
-        self.sort = na.lexsort([zp, yp, xp])
+        self.sizes = [np.unique(xp).size, np.unique(yp).size, np.unique(zp).size]        
+        self.sort = np.lexsort([zp, yp, xp])
         del xp, yp, zp
         self.ds.clear_data()
     
@@ -341,7 +341,7 @@
         """
         Builds an array to store the field values array.
         """
-        self.fields_vals = na.empty((self.comm_size, len(self.fields)*2), \
+        self.fields_vals = np.empty((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         # At the same time build a dict to label the columns.
         self.fields_columns = {}
@@ -353,7 +353,7 @@
         Initializes the array that contains the random points as all negatives
         to start with.
         """
-        self.points = na.ones((self.comm_size, 6), dtype='float64') * -1.0
+        self.points = np.ones((self.comm_size, 6), dtype='float64') * -1.0
     
     def _setup_done_hooks_on_root(self):
         """
@@ -364,7 +364,7 @@
         self.recv_done = {}
         for task in xrange(self.size):
             if task == self.mine: continue
-            self.recv_done[task] = na.zeros(1, dtype='int64')
+            self.recv_done[task] = np.zeros(1, dtype='int64')
             self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
@@ -376,13 +376,13 @@
         if self.sent_done: return
         if self.mine !=0:
             # I send when I *think* things should finish.
-            self.send_done = na.ones(1, dtype='int64') * \
+            self.send_done = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
             self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
-            self.recv_done[0] = na.ones(1, dtype='int64') * \
+            self.recv_done[0] = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
         self.sent_done = True
     
@@ -416,10 +416,10 @@
         Creates the recv buffers and calls a non-blocking MPI receive pointing
         to the left-hand neighbor.
         """
-        self.recv_points = na.ones((self.comm_size, 6), dtype='float64') * -1.
-        self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
+        self.recv_points = np.ones((self.comm_size, 6), dtype='float64') * -1.
+        self.recv_fields_vals = np.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
-        self.recv_gen_array = na.zeros(self.size, dtype='int64')
+        self.recv_gen_array = np.zeros(self.size, dtype='int64')
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
@@ -470,7 +470,7 @@
         Picks out size random pairs separated by length *length*.
         """
         # First make random points inside this subvolume.
-        r1 = na.empty((size,3), dtype='float64')
+        r1 = np.empty((size,3), dtype='float64')
         for dim in range(3):
             r1[:,dim] = self.mt.uniform(low=self.ds.left_edge[dim],
                 high=self.ds.right_edge[dim], size=size)
@@ -480,15 +480,15 @@
         # but phi and theta are switched to the Physics convention.
         if self.constant_phi is None:
             phi = self.mt.uniform(low=0, high=2.*math.pi, size=size)
-        else: phi = self.constant_phi * na.ones(size, dtype='float64')
+        else: phi = self.constant_phi * np.ones(size, dtype='float64')
         if self.constant_theta is None:
             v = self.mt.uniform(low=0., high=1, size=size)
-            theta = na.arccos(2 * v - 1)
-        else: theta = self.constant_theta * na.ones(size, dtype='float64')
-        r2 = na.empty((size,3), dtype='float64')
-        r2[:,0] = r1[:,0] + length * na.cos(phi) * na.sin(theta)
-        r2[:,1] = r1[:,1] + length * na.sin(phi) * na.sin(theta)
-        r2[:,2] = r1[:,2] + length * na.cos(theta)
+            theta = np.arccos(2 * v - 1)
+        else: theta = self.constant_theta * np.ones(size, dtype='float64')
+        r2 = np.empty((size,3), dtype='float64')
+        r2[:,0] = r1[:,0] + length * np.cos(phi) * np.sin(theta)
+        r2[:,1] = r1[:,1] + length * np.sin(phi) * np.sin(theta)
+        r2[:,2] = r1[:,2] + length * np.cos(theta)
         # Reflect so it's inside the (full) volume.
         r2 %= self.period
         return (r1, r2)
@@ -508,7 +508,7 @@
             points[:, 1] = points[:, 1] / self.period[1]
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
-            fKD.nn_tags = na.asfortranarray(na.empty((1, points.shape[0]), dtype='int64'))
+            fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
             find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
@@ -521,7 +521,7 @@
         """
         # First find the grid data index field.
         indices = self._find_nearest_cell(points)
-        results = na.empty((len(indices), len(self.fields)), dtype='float64')
+        results = np.empty((len(indices), len(self.fields)), dtype='float64')
         # Put the field values into the columns of results.
         for field in self.fields:
             col = self.fields_columns[field]
@@ -547,7 +547,7 @@
                 self.generated_points += size
                 # If size != select.sum(), we need to pad the end of new_r1/r2
                 # which is what is effectively happening below.
-                newpoints = na.ones((ssum, 6), dtype='float64') * -1.
+                newpoints = np.ones((ssum, 6), dtype='float64') * -1.
                 newpoints[:size,:3] = new_r1
                 newpoints[:size,3:] = new_r2
                 # Now we insert them into self.points.
@@ -564,9 +564,9 @@
             # or I don't need to make any new points and I'm just processing the
             # array. Start by finding the indices of the points I own.
             self.points.shape = (self.comm_size*2, 3) # Doesn't make a copy - fast!
-            select = na.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
                 (self.points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             mypoints = self.points[select]
             if mypoints.size > 0:
                 # Get the fields values.
@@ -583,19 +583,19 @@
             # To run the functions, what is key is that the
             # second point in the pair is ours.
             second_points = self.points[:,3:]
-            select = na.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
                 (second_points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             if select.any():
                 points_to_eval = self.points[select]
                 fields_to_eval = self.fields_vals[select]
                 
                 # Find the normal vector between our points.
-                vec = na.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
-                norm = na.sqrt(na.sum(na.multiply(vec,vec), axis=1))
+                vec = np.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
+                norm = np.sqrt(np.sum(np.multiply(vec,vec), axis=1))
                 # I wish there was a better way to do this, but I can't find it.
                 for i, n in enumerate(norm):
-                    vec[i] = na.divide(vec[i], n)
+                    vec[i] = np.divide(vec[i], n)
                 
                 # Now evaluate the functions.
                 for fcn_set in self._fsets:
@@ -604,7 +604,7 @@
                     fcn_set._bin_results(length, fcn_results)
                 
                 # Now clear the buffers at the processed points.
-                self.points[select] = na.array([-1.]*6, dtype='float64')
+                self.points[select] = np.array([-1.]*6, dtype='float64')
                 
             else:
                 # We didn't clear any points, so we should move on with our
@@ -712,8 +712,8 @@
         self.corr_norm = corr_norm # A number used to normalize a correlation function.
         # These below are used to track how many times the function returns
         # unbinned results.
-        self.too_low = na.zeros(len(self.out_labels), dtype='int32')
-        self.too_high = na.zeros(len(self.out_labels), dtype='int32')
+        self.too_low = np.zeros(len(self.out_labels), dtype='int32')
+        self.too_high = np.zeros(len(self.out_labels), dtype='int32')
         
     def set_pdf_params(self, bin_type="lin", bin_number=1000, bin_range=None):
         r"""Set the parameters used to build the Probability Distribution Function
@@ -772,14 +772,14 @@
             bin_type, bin_number = [bin_type], [bin_number]
             bin_range = [bin_range]
         self.bin_type = bin_type
-        self.bin_number = na.array(bin_number) - 1
+        self.bin_number = np.array(bin_number) - 1
         self.dims = range(len(bin_type))
         # Create the dict that stores the arrays to store the bin hits, and
         # the arrays themselves.
         self.length_bin_hits = {}
         for length in self.tpf.lengths:
             # It's easier to index flattened, but will be unflattened later.
-            self.length_bin_hits[length] = na.zeros(self.bin_number,
+            self.length_bin_hits[length] = np.zeros(self.bin_number,
                 dtype='int64').flatten()
         # Create the bin edges for each dimension.
         # self.bins is indexed by dimension
@@ -792,10 +792,10 @@
                 raise ValueError("bin_range[1] must be larger than bin_range[0]")
             # Make the edges for this dimension.
             if bin_type[dim] == "lin":
-                self.bin_edges[dim] = na.linspace(bin_range[dim][0], bin_range[dim][1],
+                self.bin_edges[dim] = np.linspace(bin_range[dim][0], bin_range[dim][1],
                     bin_number[dim])
             elif bin_type[dim] == "log":
-                self.bin_edges[dim] = na.logspace(math.log10(bin_range[dim][0]),
+                self.bin_edges[dim] = np.logspace(math.log10(bin_range[dim][0]),
                     math.log10(bin_range[dim][1]), bin_number[dim])
             else:
                 raise SyntaxError("bin_edges is either \"lin\" or \"log\".")
@@ -822,32 +822,32 @@
         is flattened, so we need to figure out the offset for this hit by
         factoring the sizes of the other dimensions.
         """
-        hit_bin = na.zeros(results.shape[0], dtype='int64')
+        hit_bin = np.zeros(results.shape[0], dtype='int64')
         multi = 1
-        good = na.ones(results.shape[0], dtype='bool')
+        good = np.ones(results.shape[0], dtype='bool')
         for dim in range(len(self.out_labels)):
             for d1 in range(dim):
                 multi *= self.bin_edges[d1].size
             if dim == 0 and len(self.out_labels)==1:
                 try:
-                    digi = na.digitize(results, self.bin_edges[dim])
+                    digi = np.digitize(results, self.bin_edges[dim])
                 except ValueError:
                     # The user probably did something like 
                     # return a * b rather than
                     # return a[0] * b[0], which will only happen
                     # for single field functions.
-                    digi = na.digitize(results[0], self.bin_edges[dim])
+                    digi = np.digitize(results[0], self.bin_edges[dim])
             else:
-                digi = na.digitize(results[:,dim], self.bin_edges[dim])
+                digi = np.digitize(results[:,dim], self.bin_edges[dim])
             too_low = (digi == 0)
             too_high = (digi == self.bin_edges[dim].size)
             self.too_low[dim] += (too_low).sum()
             self.too_high[dim] += (too_high).sum()
-            newgood = na.bitwise_and(na.invert(too_low), na.invert(too_high))
-            good = na.bitwise_and(good, newgood)
-            hit_bin += na.multiply((digi - 1), multi)
-        digi_bins = na.arange(self.length_bin_hits[length].size+1)
-        hist, digi_bins = na.histogram(hit_bin[good], digi_bins)
+            newgood = np.bitwise_and(np.invert(too_low), np.invert(too_high))
+            good = np.bitwise_and(good, newgood)
+            hit_bin += np.multiply((digi - 1), multi)
+        digi_bins = np.arange(self.length_bin_hits[length].size+1)
+        hist, digi_bins = np.histogram(hit_bin[good], digi_bins)
         self.length_bin_hits[length] += hist
 
     def _dim_sum(self, a, dim):
@@ -855,11 +855,11 @@
         Given a multidimensional array a, this finds the sum over all the
         elements leaving the dimension dim untouched.
         """
-        dims = na.arange(len(a.shape))
-        dims = na.flipud(dims)
+        dims = np.arange(len(a.shape))
+        dims = np.flipud(dims)
         gt_dims = dims[dims > dim]
         lt_dims = dims[dims < dim]
-        iter_dims = na.concatenate((gt_dims, lt_dims))
+        iter_dims = np.concatenate((gt_dims, lt_dims))
         for this_dim in iter_dims:
             a = a.sum(axis=this_dim)
         return a
@@ -882,6 +882,6 @@
         """
         xi = {}
         for length in self.tpf.lengths:
-            xi[length] = -1 + na.sum(self.length_bin_hits[length] * \
+            xi[length] = -1 + np.sum(self.length_bin_hits[length] * \
                 self.bin_edges[0][:-1]) / self.corr_norm
         return xi


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import numpy.core.records as rec
 
 # Now define convenience functions
@@ -41,5 +41,5 @@
     """
     blanks = []
     for atype in desc['formats']:
-        blanks.append(na.zeros(elements, dtype=atype))
+        blanks.append(np.zeros(elements, dtype=atype))
     return rec.fromarrays(blanks, **desc)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -24,7 +24,7 @@
 """
 
 import glob
-import numpy as na
+import numpy as np
 import os, os.path, inspect, types
 from functools import wraps
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -29,7 +29,7 @@
 
 data_object_registry = {}
 
-import numpy as na
+import numpy as np
 import math
 import weakref
 import exceptions
@@ -74,9 +74,9 @@
         return item
     except AttributeError:
         if item:
-            return na.ones(shape, dtype='bool')
+            return np.ones(shape, dtype='bool')
         else:
-            return na.zeros(shape, dtype='bool')
+            return np.zeros(shape, dtype='bool')
 
 def restore_grid_state(func):
     """
@@ -181,13 +181,13 @@
         if field not in self.field_data.keys():
             if field == "RadiusCode":
                 center = self.field_parameters['center']
-                tempx = na.abs(self['x'] - center[0])
-                tempx = na.minimum(tempx, self.DW[0] - tempx)
-                tempy = na.abs(self['y'] - center[1])
-                tempy = na.minimum(tempy, self.DW[1] - tempy)
-                tempz = na.abs(self['z'] - center[2])
-                tempz = na.minimum(tempz, self.DW[2] - tempz)
-                tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
+                tempx = np.abs(self['x'] - center[0])
+                tempx = np.minimum(tempx, self.DW[0] - tempx)
+                tempy = np.abs(self['y'] - center[1])
+                tempy = np.minimum(tempy, self.DW[1] - tempy)
+                tempz = np.abs(self['z'] - center[2])
+                tempz = np.minimum(tempz, self.DW[2] - tempz)
+                tr = np.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
             else:
                 raise KeyError(field)
         else: tr = self.field_data[field]
@@ -235,14 +235,14 @@
             self.set_field_parameter(key, val)
 
     def __set_default_field_parameters(self):
-        self.set_field_parameter("center",na.zeros(3,dtype='float64'))
-        self.set_field_parameter("bulk_velocity",na.zeros(3,dtype='float64'))
+        self.set_field_parameter("center",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
             pass
-        elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
-            center = na.array(center)
+        elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
+            center = np.array(center)
         elif center in ("c", "center"):
             center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
@@ -250,7 +250,7 @@
         elif center.startswith("max_"):
             center = self.pf.h.find_max(center[4:])[1]
         else:
-            center = na.array(center, dtype='float64')
+            center = np.array(center, dtype='float64')
         self.center = center
         self.set_field_parameter('center', center)
 
@@ -376,7 +376,7 @@
         field_order += [field for field in fields if field not in field_order]
         fid = open(filename,"w")
         fid.write("\t".join(["#"] + field_order + ["\n"]))
-        field_data = na.array([self.field_data[field] for field in field_order])
+        field_data = np.array([self.field_data[field] for field in field_order])
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -421,11 +421,11 @@
         return grids
 
     def select_grid_indices(self, level):
-        return na.where(self.grid_levels == level)
+        return np.where(self.grid_levels == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
-            self.__grid_left_edge = na.array([g.LeftEdge for g in self._grids])
+            self.__grid_left_edge = np.array([g.LeftEdge for g in self._grids])
         return self.__grid_left_edge
 
     def __del_grid_left_edge(self):
@@ -441,7 +441,7 @@
 
     def __get_grid_right_edge(self):
         if self.__grid_right_edge == None:
-            self.__grid_right_edge = na.array([g.RightEdge for g in self._grids])
+            self.__grid_right_edge = np.array([g.RightEdge for g in self._grids])
         return self.__grid_right_edge
 
     def __del_grid_right_edge(self):
@@ -457,7 +457,7 @@
 
     def __get_grid_levels(self):
         if self.__grid_levels == None:
-            self.__grid_levels = na.array([g.Level for g in self._grids])
+            self.__grid_levels = np.array([g.Level for g in self._grids])
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +474,7 @@
 
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
-            self.__grid_dimensions = na.array([g.ActiveDimensions for g in self._grids])
+            self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
         return self.__grid_dimensions
 
     def __del_grid_dimensions(self):
@@ -516,13 +516,13 @@
             if field not in self.hierarchy.field_list and not in_grids:
                 if field not in ("dts", "t") and self._generate_field(field):
                     continue # True means we already assigned it
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
             if not self.field_data.has_key(field):
                 continue
             if self._sortkey is None:
-                self._sortkey = na.argsort(self[self.sort_by])
+                self._sortkey = np.argsort(self[self.sort_by])
             # We *always* sort the field here if we have not successfully
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
@@ -581,7 +581,7 @@
 
     def _get_list_of_grids(self):
         # This bugs me, but we will give the tie to the LeftEdge
-        y = na.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
+        y = np.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
                     & (self.px < self.pf.hierarchy.grid_right_edge[:,self.px_ax])
                     & (self.py >=  self.pf.hierarchy.grid_left_edge[:,self.py_ax])
                     & (self.py < self.pf.hierarchy.grid_right_edge[:,self.py_ax]))
@@ -604,10 +604,10 @@
         else:
             sl = self._cut_masks[grid.id]
         if not iterable(grid[field]):
-            gf = grid[field] * na.ones(grid.child_mask[sl].shape)
+            gf = grid[field] * np.ones(grid.child_mask[sl].shape)
         else:
             gf = grid[field][sl]
-        return gf[na.where(grid.child_mask[sl])]
+        return gf[np.where(grid.child_mask[sl])]
 
 class AMRRayBase(AMR1DData):
     _type_name = "ray"
@@ -646,10 +646,10 @@
         >>> print ray["Density"], ray["t"], ray["dts"]
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
-        self.start_point = na.array(start_point, dtype='float64')
-        self.end_point = na.array(end_point, dtype='float64')
+        self.start_point = np.array(start_point, dtype='float64')
+        self.end_point = np.array(end_point, dtype='float64')
         self.vec = self.end_point - self.start_point
-        #self.vec /= na.sqrt(na.dot(self.vec, self.vec))
+        #self.vec /= np.sqrt(np.dot(self.vec, self.vec))
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
         self._dts, self._ts = {}, {}
@@ -659,7 +659,7 @@
         # Get the value of the line at each LeftEdge and RightEdge
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        p = na.zeros(self.pf.h.num_grids, dtype='bool')
+        p = np.zeros(self.pf.h.num_grids, dtype='bool')
         # Check left faces first
         for i in range(3):
             i1 = (i+1) % 3
@@ -670,10 +670,10 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( na.all( LE <= self.start_point, axis=1 ) 
-                & na.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( na.all( LE <= self.end_point,   axis=1 ) 
-                & na.all( RE >= self.end_point,   axis=1 ) )
+        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+                & np.all( RE >= self.start_point, axis=1 ) )
+        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+                & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
     def _get_line_at_coord(self, v, index):
@@ -684,24 +684,24 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
         gf = grid[field]
         if not iterable(gf):
-            gf = gf * na.ones(grid.child_mask.shape)
+            gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
                        grid.dds, self.center, self.vec)
-        self._dts[grid.id] = na.abs(dts)
-        self._ts[grid.id] = na.abs(ts)
+        self._dts[grid.id] = np.abs(dts)
+        self._ts[grid.id] = np.abs(ts)
         return mask
 
 class AMRStreamlineBase(AMR1DData):
@@ -745,11 +745,11 @@
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
         self.positions = positions
-        self.dts = na.empty_like(positions[:,0])
-        self.dts[:-1] = na.sqrt(na.sum((self.positions[1:]-
+        self.dts = np.empty_like(positions[:,0])
+        self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-1]
-        self.ts = na.add.accumulate(self.dts)
+        self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
         self._dts, self._ts = {}, {}
@@ -760,14 +760,14 @@
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
         # Check left faces first
-        min_streampoint = na.min(self.positions, axis=0)
-        max_streampoint = na.max(self.positions, axis=0)
-        p = na.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
+        min_streampoint = np.min(self.positions, axis=0)
+        max_streampoint = np.max(self.positions, axis=0)
+        p = np.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
         self._grids = self.hierarchy.grids[p]
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
@@ -775,13 +775,13 @@
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
-        points_in_grid = na.all(self.positions > grid.LeftEdge, axis=1) & \
-                         na.all(self.positions <= grid.RightEdge, axis=1) 
-        pids = na.where(points_in_grid)[0]
+        points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
+                         np.all(self.positions <= grid.RightEdge, axis=1) 
+        pids = np.where(points_in_grid)[0]
         for i, pos in zip(pids, self.positions[points_in_grid]):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
@@ -842,8 +842,8 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = na.array([])
-            else: data = na.concatenate(data)
+            if len(data) == 0: data = np.array([])
+            else: data = np.concatenate(data)
             temp_data[field] = data
             # Now the next field can use this field
             self[field] = temp_data[field] 
@@ -891,7 +891,7 @@
 
         >>> proj = pf.h.proj(0, "Density")
         >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_100kpc.png')
         """
         if center is None:
             center = self.get_field_parameter("center")
@@ -944,11 +944,11 @@
         """
         import yt.utilities.delaunay as de
         if log_spacing:
-            zz = na.log10(self[field])
+            zz = np.log10(self[field])
         else:
             zz = self[field]
-        xi, yi = na.array( \
-                 na.mgrid[LE[0]:RE[0]:side*1j, \
+        xi, yi = np.array( \
+                 np.mgrid[LE[0]:RE[0]:side*1j, \
                           LE[1]:RE[1]:side*1j], 'float64')
         zi = de.Triangulation(self['px'],self['py']).nn_interpolator(zz)\
                  [LE[0]:RE[0]:side*1j, \
@@ -1082,7 +1082,7 @@
             points = None
             t = self.comm.par_combine_object(None, datatype="array", op="cat")
         else:
-            points = na.concatenate(points)
+            points = np.concatenate(points)
             # We have to transpose here so that _par_combine_object works
             # properly, as it and the alltoall assume the long axis is the last
             # one.
@@ -1124,27 +1124,27 @@
         nx = grid.child_mask.shape[xaxis]
         ny = grid.child_mask.shape[yaxis]
         mask = self.__cut_mask_child_mask(grid)[sl]
-        cm = na.where(mask.ravel()== 1)
-        cmI = na.indices((nx,ny))
+        cm = np.where(mask.ravel()== 1)
+        cmI = np.indices((nx,ny))
         ind = cmI[0, :].ravel()   # xind
         npoints = cm[0].shape
         # create array of "npoints" ones that will be reused later
-        points = na.ones(npoints, 'float64')
+        points = np.ones(npoints, 'float64')
         # calculate xpoints array
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
         del cmI   # no longer needed 
-        t = na.vstack( (t, points * ind[cm] * dy + \
+        t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
         del ind, cm   # no longer needed
         # calculate zpoints array
-        t = na.vstack((t, points * self.coord))
+        t = np.vstack((t, points * self.coord))
         # calculate dx array
-        t = na.vstack((t, points * dx * 0.5))
+        t = np.vstack((t, points * dx * 0.5))
         # calculate dy array
-        t = na.vstack((t, points * dy * 0.5))
+        t = np.vstack((t, points * dy * 0.5))
         # return [xpoints, ypoints, zpoints, dx, dy] as (5, npoints) array
         return t.swapaxes(0, 1)
 
@@ -1169,7 +1169,7 @@
             dv = self.hierarchy.io._read_data_slice(grid, field, self.axis, sl_ind) * conv_factor
         else:
             dv = grid[field]
-            if dv.size == 1: dv = na.ones(grid.ActiveDimensions)*dv
+            if dv.size == 1: dv = np.ones(grid.ActiveDimensions)*dv
             dv = dv[sl]
         mask = self.__cut_mask_child_mask(grid)[sl]
         dataVals = dv.ravel()[mask.ravel() == 1]
@@ -1251,11 +1251,11 @@
         # ax + by + cz + d = 0
         self.orienter = Orientation(normal, north_vector = north_vector)
         self._norm_vec = self.orienter.normal_vector
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._x_vec = self.orienter.unit_vectors[0]
         self._y_vec = self.orienter.unit_vectors[1]
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
@@ -1276,7 +1276,7 @@
         # @todo: Convert to using corners
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        vertices = na.array([[LE[:,0],LE[:,1],LE[:,2]],
+        vertices = np.array([[LE[:,0],LE[:,1],LE[:,2]],
                              [RE[:,0],RE[:,1],RE[:,2]],
                              [LE[:,0],LE[:,1],RE[:,2]],
                              [RE[:,0],RE[:,1],LE[:,2]],
@@ -1285,27 +1285,27 @@
                              [LE[:,0],RE[:,1],LE[:,2]],
                              [RE[:,0],LE[:,1],RE[:,2]]])
         # This gives us shape: 8, 3, n_grid
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
         self.D = D
         self._grids = self.hierarchy.grids[
-            na.where(na.logical_not(na.all(D<0,axis=0) | na.all(D>0,axis=0) )) ]
+            np.where(np.logical_not(np.all(D<0,axis=0) | np.all(D>0,axis=0) )) ]
 
     @cache_mask
     def _get_cut_mask(self, grid):
         # This is slow.  Suggestions for improvement would be great...
         ss = grid.ActiveDimensions
-        D = na.ones(ss) * self._d
+        D = np.ones(ss) * self._d
         x = grid.LeftEdge[0] + grid.dds[0] * \
-                (na.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
         y = grid.LeftEdge[1] + grid.dds[1] * \
-                (na.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
         z = grid.LeftEdge[2] + grid.dds[2] * \
-                (na.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
         D += (x * self._norm_vec[0]).reshape(ss[0],1,1)
         D += (y * self._norm_vec[1]).reshape(1,ss[1],1)
         D += (z * self._norm_vec[2]).reshape(1,1,ss[2])
-        diag_dist = na.sqrt(na.sum(grid.dds**2.0))
-        cm = (na.abs(D) <= 0.5*diag_dist) # Boolean
+        diag_dist = np.sqrt(np.sum(grid.dds**2.0))
+        cm = (np.abs(D) <= 0.5*diag_dist) # Boolean
         return cm
 
     def _generate_coords(self):
@@ -1313,12 +1313,12 @@
         for grid in self._get_grids():
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
-        else: points = na.concatenate(points)
+        else: points = np.concatenate(points)
         t = self.comm.par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
-        self['px'] = na.dot(pos, self._x_vec)
-        self['py'] = na.dot(pos, self._y_vec)
-        self['pz'] = na.dot(pos, self._norm_vec)
+        self['px'] = np.dot(pos, self._x_vec)
+        self['py'] = np.dot(pos, self._y_vec)
+        self['pz'] = np.dot(pos, self._norm_vec)
         self['pdx'] = t[:,3] * 0.5
         self['pdy'] = t[:,3] * 0.5
         self['pdz'] = t[:,3] * 0.5
@@ -1326,14 +1326,14 @@
     def _generate_grid_coords(self, grid):
         pointI = self._get_point_indices(grid)
         coords = [grid[ax][pointI].ravel() for ax in 'xyz']
-        coords.append(na.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
-        return na.array(coords).swapaxes(0,1)
+        coords.append(np.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
+        return np.array(coords).swapaxes(0,1)
 
     def _get_data_from_grid(self, grid, field):
         if not self.pf.field_info[field].particle_type:
             pointI = self._get_point_indices(grid)
             if grid[field].size == 1: # dx, dy, dz, cellvolume
-                t = grid[field] * na.ones(grid.ActiveDimensions)
+                t = grid[field] * np.ones(grid.ActiveDimensions)
                 return t[pointI].ravel()
             return grid[field][pointI].ravel()
         else:
@@ -1344,10 +1344,10 @@
 
     @cache_point_indices
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _gen_node_name(self):
         cen_name = ("%s" % (self.center,)).replace(" ","_")[1:-1]
@@ -1391,7 +1391,7 @@
         >>> L = sp.quantities["AngularMomentumVector"]()
         >>> cutting = pf.h.cutting(L, c)
         >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
             w, u = width
@@ -1435,34 +1435,34 @@
         self.width = width
         self.dims = dims
         self.dds = self.width / self.dims
-        self.bounds = na.array([0.0,1.0,0.0,1.0])
+        self.bounds = np.array([0.0,1.0,0.0,1.0])
         
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
 
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         if node_name is False:
             self._refresh_data()
@@ -1479,11 +1479,11 @@
         # within width/2 of the center.
         vertices = self.hierarchy.gridCorners
         # Shape = (8,3,n_grid)
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        valid_grids = na.where(na.logical_not(na.all(D<0,axis=0) |
-                                              na.all(D>0,axis=0) ))[0]
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        valid_grids = np.where(np.logical_not(np.all(D<0,axis=0) |
+                                              np.all(D>0,axis=0) ))[0]
         # Now restrict these grids to a rect. prism that bounds the slice
-        sliceCorners = na.array([ \
+        sliceCorners = np.array([ \
             self.center + 0.5*self.width * (+self._x_vec + self._y_vec),
             self.center + 0.5*self.width * (+self._x_vec - self._y_vec),
             self.center + 0.5*self.width * (-self._x_vec - self._y_vec),
@@ -1491,12 +1491,12 @@
         sliceLeftEdge = sliceCorners.min(axis=0)
         sliceRightEdge = sliceCorners.max(axis=0)
         # Check for bounding box and grid overlap
-        leftOverlap = na.less(self.hierarchy.gridLeftEdge[valid_grids],
+        leftOverlap = np.less(self.hierarchy.gridLeftEdge[valid_grids],
                               sliceRightEdge).all(axis=1)
-        rightOverlap = na.greater(self.hierarchy.gridRightEdge[valid_grids],
+        rightOverlap = np.greater(self.hierarchy.gridRightEdge[valid_grids],
                                   sliceLeftEdge).all(axis=1)
         self._grids = self.hierarchy.grids[valid_grids[
-            na.where(leftOverlap & rightOverlap)]]
+            np.where(leftOverlap & rightOverlap)]]
         self._grids = self._grids[::-1]
 
     def _generate_coords(self):
@@ -1512,7 +1512,7 @@
             pointI = self._get_point_indices(grid)
             if len(pointI) == 0: return
             vc = self._calc_vertex_centered_data(grid, field)
-            bds = na.array(zip(grid.LeftEdge,
+            bds = np.array(zip(grid.LeftEdge,
                                grid.RightEdge)).ravel()
             interp = TrilinearFieldInterpolator(vc, bds, ['x', 'y', 'z'])
             self[field][pointI] = interp( \
@@ -1538,27 +1538,27 @@
         self.width = width
         self.dds = self.width / self.dims
         self.set_field_parameter('center', center)
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
 
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         self._refresh_data()
         return
@@ -1584,7 +1584,7 @@
                     continue # A "True" return means we did it
             if not self._vc_data.has_key(field):
                 self._vc_data[field] = {}
-            self[field] = na.zeros(_size, dtype='float64')
+            self[field] = np.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
             self[field] = self.comm.mpi_allreduce(\
@@ -1686,9 +1686,9 @@
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
         self.proj_style = style
         if style == "mip":
-            self.func = na.max
+            self.func = np.max
         elif style == "integrate":
-            self.func = na.sum # for the future
+            self.func = np.sum # for the future
         else:
             raise NotImplementedError(style)
         self.weight_field = weight_field
@@ -1743,7 +1743,7 @@
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
-        return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
+        return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
                         style = self.proj_style)
 
     def _get_dls(self, grid, fields):
@@ -1755,8 +1755,8 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        dls = na.array(dls)
-        convs = na.array(convs)
+        dls = np.array(dls)
+        convs = np.array(convs)
         if self.proj_style == "mip":
             dls[:] = 1.0
             convs[:] = 1.0
@@ -1822,14 +1822,14 @@
                 ds = gs[0].dds[0]
             else:
                 ds = 0.0
-            dxs.append(na.ones(nvals.shape[0], dtype='float64') * ds)
-        coord_data = na.concatenate(coord_data, axis=0).transpose()
-        field_data = na.concatenate(field_data, axis=0).transpose()
+            dxs.append(np.ones(nvals.shape[0], dtype='float64') * ds)
+        coord_data = np.concatenate(coord_data, axis=0).transpose()
+        field_data = np.concatenate(field_data, axis=0).transpose()
         if self._weight is None:
             dls, convs = self._get_dls(self._grids[0], fields)
             field_data *= convs[:,None]
-        weight_data = na.concatenate(weight_data, axis=0).transpose()
-        dxs = na.concatenate(dxs, axis=0).transpose()
+        weight_data = np.concatenate(weight_data, axis=0).transpose()
+        dxs = np.concatenate(dxs, axis=0).transpose()
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = dxs
@@ -1843,7 +1843,7 @@
         data['pdy'] = data['pdx'] # generalization is out the window!
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -1853,7 +1853,7 @@
     def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
         # We build up the fields to add
         if self._weight is None or fields is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -1873,16 +1873,16 @@
         weight_proj = self.func(weight_data, axis=self.axis) * wdl
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.logical_or.reduce(used_data, self.axis)
+            used_points = np.logical_or.reduce(used_data, self.axis)
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         xind, yind = [arr[used_points].ravel()
-                      for arr in na.indices(full_proj[0].shape)]
+                      for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
-        to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
+        to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
         tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
                     to_add, weight_proj[used_points].ravel())
 
@@ -1894,8 +1894,8 @@
         if len(grids_to_initialize) == 0: return
         pbar = get_pbar('Initializing tree % 2i / % 2i' \
                           % (level, self._max_level), len(grids_to_initialize))
-        start_index = na.empty(2, dtype="int64")
-        dims = na.empty(2, dtype="int64")
+        start_index = np.empty(2, dtype="int64")
+        dims = np.empty(2, dtype="int64")
         xax = x_dict[self.axis]
         yax = y_dict[self.axis]
         for pi, grid in enumerate(grids_to_initialize):
@@ -1920,7 +1920,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2024,7 +2024,7 @@
         self._max_level = max_level
         self._weight = weight_field
         self.preload_style = preload_style
-        self.func = na.sum # for the future
+        self.func = np.sum # for the future
         self.__retval_coords = {}
         self.__retval_fields = {}
         self.__retval_coarse = {}
@@ -2083,7 +2083,7 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        return na.array(dls), na.array(convs)
+        return np.array(dls), np.array(convs)
 
     def __project_level(self, level, fields):
         grids_to_project = self.source.select_grids(level)
@@ -2112,12 +2112,12 @@
             field_data.append([pi[fine] for pi in self.__retval_fields[grid.id]])
             self.__retval_coords[grid.id] = [pi[coarse] for pi in self.__retval_coords[grid.id]]
             self.__retval_fields[grid.id] = [pi[coarse] for pi in self.__retval_fields[grid.id]]
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
         if self._weight is not None:
             field_data = field_data / coord_data[3,:].reshape((1,coord_data.shape[1]))
         else:
-            field_data *= convs[...,na.newaxis]
+            field_data *= convs[...,np.newaxis]
         mylog.info("Level %s done: %s final", \
                    level, coord_data.shape[1])
         pdx = grids_to_project[0].dds[x_dict[self.axis]] # this is our dl
@@ -2142,7 +2142,7 @@
                 args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
                 args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
                 args.append(1) # Refinement factor
-                args.append(na.ones(args[0].shape, dtype='int64'))
+                args.append(np.ones(args[0].shape, dtype='int64'))
                 kk = CombineGrids(*args)
                 goodI = args[-1].astype('bool')
                 self.__retval_coords[grid2.id] = \
@@ -2169,8 +2169,8 @@
                     # that this complicated rounding is because sometimes
                     # epsilon differences in dds between the grids causes this
                     # to round to up or down from the expected value.
-                    args.append(int(na.rint(grid2.dds / grid1.dds)[0]))
-                    args.append(na.ones(args[0].shape, dtype='int64'))
+                    args.append(int(np.rint(grid2.dds / grid1.dds)[0]))
+                    args.append(np.ones(args[0].shape, dtype='int64'))
                     kk = CombineGrids(*args)
                     goodI = args[-1].astype('bool')
                     self.__retval_coords[grid2.id] = \
@@ -2213,8 +2213,8 @@
                 self.__project_level(level, fields)
             coord_data.append(my_coords)
             field_data.append(my_fields)
-            pdxs.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
-            pdys.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
+            pdxs.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
+            pdys.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
             if self._check_region and False:
                 check=self.__cleanup_level(level - 1)
                 if len(check) > 0: all_data.append(check)
@@ -2225,10 +2225,10 @@
                 del self.__overlap_masks[grid.id]
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
-        pdxs = na.concatenate(pdxs, axis=1)
-        pdys = na.concatenate(pdys, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
+        pdxs = np.concatenate(pdxs, axis=1)
+        pdys = np.concatenate(pdys, axis=1)
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = pdxs; del pdxs
@@ -2244,7 +2244,7 @@
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
         data = self.comm.par_combine_object(data, datatype='dict', op='cat')
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -2260,7 +2260,7 @@
         # in _get_data_from_grid *and* we attempt not to load weight data
         # independently of the standard field data.
         if self._weight is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -2278,18 +2278,18 @@
         weight_proj = self.func(weight_data, axis=self.axis)
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.where(na.logical_or.reduce(used_data, self.axis))
+            used_points = np.where(np.logical_or.reduce(used_data, self.axis))
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         if zero_out:
-            subgrid_mask = na.logical_and.reduce(
-                                na.logical_or(grid.child_mask,
+            subgrid_mask = np.logical_and.reduce(
+                                np.logical_or(grid.child_mask,
                                              ~used_data),
                                 self.axis).astype('int64')
         else:
-            subgrid_mask = na.ones(full_proj[0].shape, dtype='int64')
-        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj[0].shape)]
+            subgrid_mask = np.ones(full_proj[0].shape, dtype='int64')
+        xind, yind = [arr[used_points].ravel() for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
@@ -2300,7 +2300,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2367,30 +2367,30 @@
         >>> print fproj["Density"]
         """
         AMR2DData.__init__(self, axis, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.dims = na.array([dims]*2)
-        self.ActiveDimensions = na.array([dims]*3, dtype='int32')
+        self.dims = np.array([dims]*2)
+        self.ActiveDimensions = np.array([dims]*3, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
-        self.global_startindex = na.rint((self.left_edge - self.pf.domain_left_edge)
+        self.global_startindex = np.rint((self.left_edge - self.pf.domain_left_edge)
                                          /self.dds).astype('int64')
         self._dls = {}
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        if na.any(self.left_edge < self.pf.domain_left_edge) or \
-           na.any(self.right_edge > self.pf.domain_right_edge):
+        if np.any(self.left_edge < self.pf.domain_left_edge) or \
+           np.any(self.right_edge > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids(
                             self.left_edge, self.right_edge)
         else:
             grids,ind = self.pf.hierarchy.get_box_grids(
                             self.left_edge, self.right_edge)
         level_ind = (self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
         self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
 
     def _generate_coords(self):
@@ -2398,9 +2398,9 @@
         yax = y_dict[self.axis]
         ci = self.left_edge + self.dds*0.5
         cf = self.left_edge + self.dds*(self.ActiveDimensions-0.5)
-        cx = na.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
-        cy = na.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
-        blank = na.ones( (self.ActiveDimensions[xax],
+        cx = np.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
+        cy = np.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
+        blank = np.ones( (self.ActiveDimensions[xax],
                           self.ActiveDimensions[yax]), dtype='float64')
         self['px'] = cx[None,:] * blank
         self['py'] = cx[:,None] * blank
@@ -2422,7 +2422,7 @@
         if len(fields_to_get) == 0: return
         temp_data = {}
         for field in fields_to_get:
-            self[field] = na.zeros(self.dims, dtype='float64')
+            self[field] = np.zeros(self.dims, dtype='float64')
         dls = self.__setup_dls(fields_to_get)
         for i,grid in enumerate(self._get_grids()):
             mylog.debug("Getting fields from %s", i)
@@ -2483,10 +2483,10 @@
             if ( (i%100) == 0):
                 mylog.info("Working on % 7i / % 7i", i, len(self._grids))
             grid.set_field_parameter("center", self.center)
-            points.append((na.ones(
+            points.append((np.ones(
                 grid.ActiveDimensions,dtype='float64')*grid['dx'])\
                     [self._get_point_indices(grid)])
-            t = na.concatenate([t,points])
+            t = np.concatenate([t,points])
             del points
         self['dx'] = t
         #self['dy'] = t
@@ -2496,8 +2496,8 @@
     @restore_grid_state
     def _generate_grid_coords(self, grid, field=None):
         pointI = self._get_point_indices(grid)
-        dx = na.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
-        tr = na.array([grid['x'][pointI].ravel(), \
+        dx = np.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
+        tr = np.array([grid['x'][pointI].ravel(), \
                 grid['y'][pointI].ravel(), \
                 grid['z'][pointI].ravel(), \
                 grid["RadiusCode"][pointI].ravel(),
@@ -2533,7 +2533,7 @@
                 if self._generate_field(field):
                     continue # True means we already assigned it
             mylog.info("Getting field %s from %s", field, len(self._grids))
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
         for field in fields_to_get:
@@ -2545,21 +2545,21 @@
     def _get_data_from_grid(self, grid, field):
         if field in self.pf.field_info and self.pf.field_info[field].particle_type:
             # int64 -> float64 with the first real set of data
-            if grid.NumberOfParticles == 0: return na.array([], dtype='int64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='int64')
             pointI = self._get_particle_indices(grid)
             if self.pf.field_info[field].vector_field:
                 f = grid[field]
-                return na.array([f[i,:][pointI] for i in range(3)])
+                return np.array([f[i,:][pointI] for i in range(3)])
             if self._is_fully_enclosed(grid): return grid[field].ravel()
             return grid[field][pointI].ravel()
         if field in self.pf.field_info and self.pf.field_info[field].vector_field:
             pointI = self._get_point_indices(grid)
             f = grid[field]
-            return na.array([f[i,:][pointI] for i in range(3)])
+            return np.array([f[i,:][pointI] for i in range(3)])
         else:
             tr = grid[field]
             if tr.size == 1: # dx, dy, dz, cellvolume
-                tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
+                tr = tr * np.ones(grid.ActiveDimensions, dtype='float64')
             if len(grid.Children) == 0 and grid.OverlappingSiblings is None \
                 and self._is_fully_enclosed(grid):
                 return tr.ravel()
@@ -2579,19 +2579,19 @@
             if grid.has_key(field):
                 new_field = grid[field]
             else:
-                new_field = na.ones(grid.ActiveDimensions, dtype=dtype) * default_val
+                new_field = np.ones(grid.ActiveDimensions, dtype=dtype) * default_val
             new_field[pointI] = self[field][i:i+np]
             grid[field] = new_field
             i += np
 
     def _is_fully_enclosed(self, grid):
-        return na.all(self._get_cut_mask)
+        return np.all(self._get_cut_mask)
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _get_cut_particle_mask(self, grid):
         if self._is_fully_enclosed(grid):
@@ -2600,9 +2600,9 @@
         return self._get_cut_mask(fake_grid)
 
     def _get_particle_indices(self, grid):
-        k = na.zeros(grid.NumberOfParticles, dtype='bool')
+        k = np.zeros(grid.NumberOfParticles, dtype='bool')
         k = (k | self._get_cut_particle_mask(grid))
-        return na.where(k)
+        return np.where(k)
 
     def cut_region(self, field_cuts):
         """
@@ -2705,16 +2705,16 @@
                 samples.append(svals)
             verts.append(my_verts)
         pb.finish()
-        verts = na.concatenate(verts).transpose()
+        verts = np.concatenate(verts).transpose()
         verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
         verts = verts.transpose()
         if sample_values is not None:
-            samples = na.concatenate(samples)
+            samples = np.concatenate(samples)
             samples = self.comm.par_combine_object(samples, op='cat',
                                 datatype='array')
         if rescale:
-            mi = na.min(verts, axis=0)
-            ma = na.max(verts, axis=0)
+            mi = np.min(verts, axis=0)
+            ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
             f = open(filename, "w")
@@ -2818,7 +2818,7 @@
         mask = self._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(field)
         if fluxing_field is None:
-            ff = na.ones(vals.shape, dtype="float64")
+            ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
         xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
@@ -2835,10 +2835,10 @@
         them to be plotted.
         """
         if log_space:
-            cons = na.logspace(na.log10(min_val),na.log10(max_val),
+            cons = np.logspace(np.log10(min_val),np.log10(max_val),
                                num_levels+1)
         else:
-            cons = na.linspace(min_val, max_val, num_levels+1)
+            cons = np.linspace(min_val, max_val, num_levels+1)
         contours = {}
         if cache: cached_fields = defaultdict(lambda: dict())
         else: cached_fields = None
@@ -2867,7 +2867,7 @@
         """
         for grid in self._grids:
             if default_value != None:
-                grid[field] = na.ones(grid.ActiveDimensions)*default_value
+                grid[field] = np.ones(grid.ActiveDimensions)*default_value
             grid[field][self._get_point_indices(grid)] = value
 
     _particle_handler = None
@@ -2951,36 +2951,36 @@
         grid_vals, xi, yi, zi = [], [], [], []
         for grid in self._base_region._grids:
             xit,yit,zit = self._base_region._get_point_indices(grid)
-            grid_vals.append(na.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
+            grid_vals.append(np.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
             xi.append(xit)
             yi.append(yit)
             zi.append(zit)
-        grid_vals = na.concatenate(grid_vals)[self._base_indices]
-        grid_order = na.argsort(grid_vals)
+        grid_vals = np.concatenate(grid_vals)[self._base_indices]
+        grid_order = np.argsort(grid_vals)
         # Note: grid_vals is still unordered
-        grid_ids = na.unique(grid_vals)
-        xi = na.concatenate(xi)[self._base_indices][grid_order]
-        yi = na.concatenate(yi)[self._base_indices][grid_order]
-        zi = na.concatenate(zi)[self._base_indices][grid_order]
-        bc = na.bincount(grid_vals)
+        grid_ids = np.unique(grid_vals)
+        xi = np.concatenate(xi)[self._base_indices][grid_order]
+        yi = np.concatenate(yi)[self._base_indices][grid_order]
+        zi = np.concatenate(zi)[self._base_indices][grid_order]
+        bc = np.bincount(grid_vals)
         splits = []
         for i,v in enumerate(bc):
             if v > 0: splits.append(v)
-        splits = na.add.accumulate(splits)
-        xis, yis, zis = [na.array_split(aa, splits) for aa in [xi,yi,zi]]
+        splits = np.add.accumulate(splits)
+        xis, yis, zis = [np.array_split(aa, splits) for aa in [xi,yi,zi]]
         self._indices = {}
         h = self._base_region.pf.h
         for grid_id, x, y, z in itertools.izip(grid_ids, xis, yis, zis):
             # grid_id needs no offset
             ll = h.grids[grid_id].ActiveDimensions.prod() \
-               - (na.logical_not(h.grids[grid_id].child_mask)).sum()
+               - (np.logical_not(h.grids[grid_id].child_mask)).sum()
             # This means we're completely enclosed, except for child masks
             if x.size == ll:
                 self._indices[grid_id] = None
             else:
                 # This will slow things down a bit, but conserve memory
                 self._indices[grid_id] = \
-                    na.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
+                    np.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
                 self._indices[grid_id][(x,y,z)] = True
         self._grids = h.grids[self._indices.keys()]
 
@@ -2992,16 +2992,16 @@
         return False
 
     def _get_cut_mask(self, grid):
-        cm = na.zeros(grid.ActiveDimensions, dtype='bool')
+        cm = np.zeros(grid.ActiveDimensions, dtype='bool')
         cm[self._get_point_indices(grid, False)] = True
         return cm
 
-    __empty_array = na.array([], dtype='bool')
+    __empty_array = np.array([], dtype='bool')
     def _get_point_indices(self, grid, use_child_mask=True):
         # Yeah, if it's not true, we don't care.
         tr = self._indices.get(grid.id-grid._id_offset, self.__empty_array)
-        if tr is None: tr = na.where(grid.child_mask)
-        else: tr = na.where(tr)
+        if tr is None: tr = np.where(grid.child_mask)
+        else: tr = np.where(tr)
         return tr
 
     def __repr__(self):
@@ -3018,7 +3018,7 @@
             grid = self.pf.h.grids[g]
             if g in other._indices and g in self._indices:
                 # We now join the indices
-                ind = na.zeros(grid.ActiveDimensions, dtype='bool')
+                ind = np.zeros(grid.ActiveDimensions, dtype='bool')
                 ind[self._indices[g]] = True
                 ind[other._indices[g]] = True
                 if ind.prod() == grid.ActiveDimensions.prod(): ind = None
@@ -3056,7 +3056,7 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        point_mask = na.ones(grid.ActiveDimensions, dtype='bool')
+        point_mask = np.ones(grid.ActiveDimensions, dtype='bool')
         point_mask *= self._base_region._get_cut_mask(grid)
         for cut in self._field_cuts:
             point_mask *= eval(cut)
@@ -3076,35 +3076,35 @@
         within the cylinder will be selected.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
+        self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
         self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._refresh_data()
 
     def _get_list_of_grids(self):
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((self.pf.h.grid_corners -
+        D = np.sqrt(np.sum((self.pf.h.grid_corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
+        R = np.sqrt(D**2.0-H**2.0)
         self._grids = self.hierarchy.grids[
-            ( (na.any(na.abs(H)<self._height,axis=0))
-            & (na.any(R<self._radius,axis=0)
-            & (na.logical_not((na.all(H>0,axis=0) | (na.all(H<0, axis=0)))) )
+            ( (np.any(np.abs(H)<self._height,axis=0))
+            & (np.any(R<self._radius,axis=0)
+            & (np.logical_not((np.all(H>0,axis=0) | (np.all(H<0, axis=0)))) )
             ) ) ]
         self._grids = self.hierarchy.grids
 
     def _is_fully_enclosed(self, grid):
         corners = grid._corners.reshape((8,3,1))
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((corners -
+        D = np.sqrt(np.sum((corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
-        return (na.all(na.abs(H) < self._height, axis=0) \
-            and na.all(R < self._radius, axis=0))
+        R = np.sqrt(D**2.0-H**2.0)
+        return (np.all(np.abs(H) < self._height, axis=0) \
+            and np.all(R < self._radius, axis=0))
 
     @cache_mask
     def _get_cut_mask(self, grid):
@@ -3115,13 +3115,13 @@
               + grid['y'] * self._norm_vec[1] \
               + grid['z'] * self._norm_vec[2] \
               + self._d
-            d = na.sqrt(
+            d = np.sqrt(
                 (grid['x'] - self.center[0])**2.0
               + (grid['y'] - self.center[1])**2.0
               + (grid['z'] - self.center[2])**2.0
                 )
-            r = na.sqrt(d**2.0-h**2.0)
-            cm = ( (na.abs(h) <= self._height)
+            r = np.sqrt(d**2.0-h**2.0)
+            cm = ( (np.abs(h) <= self._height)
                  & (r <= self._radius))
         return cm
 
@@ -3138,8 +3138,8 @@
         describe the box.  No checks are done to ensure that the box satisfies
         a right-hand rule, but if it doesn't, behavior is undefined.
         """
-        self.origin = na.array(origin)
-        self.box_vectors = na.array(box_vectors, dtype='float64')
+        self.origin = np.array(origin)
+        self.box_vectors = np.array(box_vectors, dtype='float64')
         self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
         center = origin + 0.5*self.box_vectors.sum(axis=0)
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
@@ -3150,11 +3150,11 @@
         xv = self.box_vectors[0,:]
         yv = self.box_vectors[1,:]
         zv = self.box_vectors[2,:]
-        self._x_vec = xv / na.sqrt(na.dot(xv, xv))
-        self._y_vec = yv / na.sqrt(na.dot(yv, yv))
-        self._z_vec = zv / na.sqrt(na.dot(zv, zv))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._z_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = xv / np.sqrt(np.dot(xv, xv))
+        self._y_vec = yv / np.sqrt(np.dot(yv, yv))
+        self._z_vec = zv / np.sqrt(np.dot(zv, zv))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._z_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
@@ -3172,7 +3172,7 @@
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
             
 
@@ -3185,7 +3185,7 @@
     def _get_cut_mask(self, grid):
         if self._is_fully_enclosed(grid):
             return True
-        pm = na.zeros(grid.ActiveDimensions, dtype='int32')
+        pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
                               self._rot_mat, grid.LeftEdge, 
                               grid.RightEdge, grid.dds, pm, 0)
@@ -3228,7 +3228,7 @@
                                                            self.right_edge)
 
     def _is_fully_enclosed(self, grid):
-        return na.all( (grid._corners <= self.right_edge)
+        return np.all( (grid._corners <= self.right_edge)
                      & (grid._corners >= self.left_edge))
 
     @cache_mask
@@ -3282,10 +3282,10 @@
 
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
-        self.right_edge = na.array(right_edge)
+        self.left_edge = np.array(left_edge)
+        self.right_edge = np.array(right_edge)
         self._refresh_data()
-        self.offsets = (na.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
+        self.offsets = (np.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
                         (self.pf.domain_right_edge -
                          self.pf.domain_left_edge)[:,None,None,None])\
                        .transpose().reshape(27,3) # cached and in order
@@ -3300,7 +3300,7 @@
                            self.left_edge[1]+off_y,self.left_edge[2]+off_z]
             region_right = [self.right_edge[0]+off_x,
                             self.right_edge[1]+off_y,self.right_edge[2]+off_z]
-            if (na.all((grid._corners <= region_right) &
+            if (np.all((grid._corners <= region_right) &
                        (grid._corners >= region_left))):
                 return True
         return False
@@ -3310,7 +3310,7 @@
         if self._is_fully_enclosed(grid):
             return True
         else:
-            cm = na.zeros(grid.ActiveDimensions,dtype='bool')
+            cm = np.zeros(grid.ActiveDimensions,dtype='bool')
             dxp, dyp, dzp = self._dx_pad * grid.dds
             for off_x, off_y, off_z in self.offsets:
                 cm = cm | ( (grid['x'] - dxp + off_x < self.right_edge[0])
@@ -3350,7 +3350,7 @@
         Child cells are not returned.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._grids = na.array(grid_list)
+        self._grids = np.array(grid_list)
         self.grid_list = self._grids
 
     def _get_list_of_grids(self):
@@ -3361,13 +3361,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 class AMRMaxLevelCollection(AMR3DData):
@@ -3394,13 +3394,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask and grid.Level < self.max_level:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 
@@ -3441,14 +3441,14 @@
         # Now we sort by level
         grids = grids.tolist()
         grids.sort(key=lambda x: (x.Level, x.LeftEdge[0], x.LeftEdge[1], x.LeftEdge[2]))
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = na.abs(grid._corners - self.center)
-        r = na.minimum(r, na.abs(self.DW[None,:]-r))
-        corner_radius = na.sqrt((r**2.0).sum(axis=1))
-        return na.all(corner_radius <= self.radius)
+        r = np.abs(grid._corners - self.center)
+        r = np.minimum(r, np.abs(self.DW[None,:]-r))
+        corner_radius = np.sqrt((r**2.0).sum(axis=1))
+        return np.all(corner_radius <= self.radius)
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3477,7 +3477,7 @@
         can define a ellipsoid of any proportion.  Only cells whose centers are
         within the ellipsoid will be selected.
         """
-        AMR3DData.__init__(self, na.array(center), fields, pf, **kwargs)
+        AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
@@ -3488,12 +3488,12 @@
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0[1] / e0[0])
+        t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
         RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
         r1 = (e0 * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
         given the tilt about the x axis when e0 was aligned 
@@ -3505,7 +3505,7 @@
         e1 = ((0, 1, 0) * RX).sum(axis = 1)
         e1 = (e1 * RY).sum(axis = 1)
         e1 = (e1 * RZ).sum(axis = 1)
-        e2 = na.cross(e0, e1)
+        e2 = np.cross(e0, e1)
 
         self._e1 = e1
         self._e2 = e2
@@ -3535,7 +3535,7 @@
                                   x.LeftEdge[0], \
                                   x.LeftEdge[1], \
                                   x.LeftEdge[2]))
-        self._grids = na.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype = 'object')
 
     def _is_fully_enclosed(self, grid):
         """
@@ -3545,18 +3545,18 @@
         vr = (grid._corners - self.center)
         # 3 possible cases of locations taking periodic BC into account
         # just listing the components, find smallest later
-        dotarr=na.array([vr, vr + self.DW, vr - self.DW])
+        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
         # these vrdote# finds the product of vr components with e#
         # square the results
         # find the smallest
         # sums it
-        vrdote0_2 = (na.multiply(dotarr, self._e0)**2).min(axis \
+        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
+        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
+        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        return na.all(vrdote0_2 / self._A**2 + \
+        return np.all(vrdote0_2 / self._A**2 + \
                       vrdote1_2 / self._B**2 + \
                       vrdote2_2 / self._C**2 <=1.0)
 
@@ -3572,21 +3572,21 @@
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = na.zeros(grid["x"].shape, dtype = 'float64')
+        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
         dim = grid["x"].shape
         # need this to take into account non-cube root grid tiles
-        dot_evec = na.zeros([3, dim[0], dim[1], dim[2]])
+        dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
         for i, ax in enumerate('xyz'):
             # distance to center
             ar  = grid[ax]-self.center[i]
             # cases to take into account periodic BC
-            case = na.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
             # find which of the 3 cases is smallest in magnitude
-            index = na.abs(case).argmin(axis = 0)
+            index = np.abs(case).argmin(axis = 0)
             # restrict distance to only the smallest cases
-            vec = na.choose(index, case)
+            vec = np.choose(index, case)
             # sum up to get the dot product with e_vectors
-            dot_evec += na.array([vec * self._e0[i], \
+            dot_evec += np.array([vec * self._e0[i], \
                                   vec * self._e1[i], \
                                   vec * self._e2[i]])
         # Calculate the eqn of ellipsoid, if it is inside
@@ -3627,22 +3627,22 @@
         """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = na.array(dims,dtype='int32')
+        self.ActiveDimensions = np.array(dims,dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar
-        self.global_startindex = na.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self, buffer = 0.0):
         if self._grids is not None: return
-        if na.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
-           na.any(self.right_edge + buffer > self.pf.domain_right_edge):
+        if np.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
+           np.any(self.right_edge + buffer > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
                             self.right_edge + buffer, self.level)
@@ -3650,14 +3650,14 @@
             grids,ind = self.pf.hierarchy.get_box_grids_below_level(
                 self.left_edge - buffer,
                 self.right_edge + buffer, self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind])
         self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
     def _refresh_data(self):
         AMR3DData._refresh_data(self)
-        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dx'] = self.dds[0] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dy'] = self.dds[1] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dz'] = self.dds[2] * np.ones(self.ActiveDimensions, dtype='float64')
 
     def get_data(self, fields=None):
         if self._grids is None:
@@ -3677,7 +3677,7 @@
                 except NeedsOriginalGrid, ngt_exception:
                     pass
             obtain_fields.append(field)
-            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
+            self[field] = np.zeros(self.ActiveDimensions, dtype='float64') -999
         if len(obtain_fields) == 0: return
         mylog.debug("Getting fields %s from %s possible grids",
                    obtain_fields, len(self._grids))
@@ -3689,9 +3689,9 @@
             count -= self._get_data_from_grid(grid, obtain_fields)
             if count <= 0: break
         if self._use_pbar: pbar.finish()
-        if count > 0 or na.any(self[obtain_fields[0]] == -999):
+        if count > 0 or np.any(self[obtain_fields[0]] == -999):
             # and self.dx < self.hierarchy.grids[0].dx:
-            n_bad = na.where(self[obtain_fields[0]]==-999)[0].size
+            n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
             
@@ -3737,7 +3737,7 @@
         g_fields = []
         for field in fields:
             if not grid.has_key(field): grid[field] = \
-               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
+               np.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
             g_fields.append(grid[field])
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
@@ -3832,7 +3832,7 @@
         if self.level > 0:
             for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]
-                if na.any(self[field] == -999):
+                if np.any(self[field] == -999):
                     # and self.dx < self.hierarchy.grids[0].dx:
                     n_bad = (self[field]==-999).sum()
                     mylog.error("Covering problem: %s cells are uncovered", n_bad)
@@ -3846,35 +3846,35 @@
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint(LL / dx).astype('int64') - 1
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
-            self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
+            self.global_startindex = np.array(np.floor(LL/ dx), dtype='int64')
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
-        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
+        dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
+        output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
 
         for field in fields:
-            output_field = na.zeros(output_dims, dtype="float64")
+            output_field = np.zeros(output_dims, dtype="float64")
             output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
@@ -3944,7 +3944,7 @@
             self._all_regions.append(item)
             # So cut_masks don't get messed up.
             item._boolean_touched = True
-        self._all_regions = na.unique(self._all_regions)
+        self._all_regions = np.unique(self._all_regions)
     
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
@@ -3969,7 +3969,7 @@
                 # The whole grid is in the hybrid region if a) its cut_mask
                 # in the original region is identical to the new one and b)
                 # the original region cut_mask is all ones.
-                if (local == na.bitwise_and(overall, local)).all() and \
+                if (local == np.bitwise_and(overall, local)).all() and \
                         (local == True).all():
                     self._all_overlap.append(grid)
                     continue
@@ -3997,7 +3997,7 @@
         return (grid in self._all_overlap)
 
     def _get_list_of_grids(self):
-        self._grids = na.array(self._some_overlap + self._all_overlap,
+        self._grids = np.array(self._some_overlap + self._all_overlap,
             dtype='object')
 
     def _get_cut_mask(self, grid, field=None):
@@ -4054,13 +4054,13 @@
             if i == 0: continue
             if item == "AND":
                 # So, the next item in level_masks we want to AND.
-                na.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
             if item == "NOT":
                 # It's convenient to remember that NOT == AND NOT
-                na.bitwise_and(this_cut_mask, na.invert(level_masks[i+1]),
+                np.bitwise_and(this_cut_mask, np.invert(level_masks[i+1]),
                     this_cut_mask)
             if item == "OR":
-                na.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
         if not isinstance(grid, FakeGridForParticles):
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -100,7 +100,7 @@
             if not iterable(rv): rv = (rv,)
             for i in range(self.n_ret): self.retvals[i].append(rv[i])
             g.clear_data()
-        self.retvals = [na.array(self.retvals[i]) for i in range(self.n_ret)]
+        self.retvals = [np.array(self.retvals[i]) for i in range(self.n_ret)]
         return self.c_func(self._data_source, *self.retvals)
 
     def _finalize_parallel(self):
@@ -110,7 +110,7 @@
         # our long axis is the first one!
         rv = []
         for my_list in self.retvals:
-            data = na.array(my_list).transpose()
+            data = np.array(my_list).transpose()
             rv.append(self.comm.par_combine_object(data,
                         datatype="array", op="cat").transpose())
         self.retvals = rv
@@ -185,7 +185,7 @@
 
     return x,y,z, den
 def _combCenterOfMass(data, x,y,z, den):
-    return na.array([x.sum(), y.sum(), z.sum()])/den.sum()
+    return np.array([x.sum(), y.sum(), z.sum()])/den.sum()
 add_quantity("CenterOfMass", function=_CenterOfMass,
              combine_function=_combCenterOfMass, n_ret = 4)
 
@@ -218,7 +218,7 @@
     xv = xv.sum()/w
     yv = yv.sum()/w
     zv = zv.sum()/w
-    return na.array([xv, yv, zv])
+    return np.array([xv, yv, zv])
 add_quantity("BulkVelocity", function=_BulkVelocity,
              combine_function=_combBulkVelocity, n_ret=4)
 
@@ -249,9 +249,9 @@
     return [j_mag]
 
 def _combAngularMomentumVector(data, j_mag):
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     L_vec = j_mag.sum(axis=0)
-    L_vec_norm = L_vec / na.sqrt((L_vec**2.0).sum())
+    L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
     return L_vec_norm
 add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
              combine_function=_combAngularMomentumVector, n_ret=1)
@@ -268,17 +268,17 @@
     amx = data["SpecificAngularMomentumX"]*data["CellMassMsun"]
     amy = data["SpecificAngularMomentumY"]*data["CellMassMsun"]
     amz = data["SpecificAngularMomentumZ"]*data["CellMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
     weight=data["CellMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
 def _combBaryonSpinParameter(data, j_mag, m_enc, e_term_pre, weight):
     # Because it's a vector field, we have to ensure we have enough dimensions
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     W = weight.sum()
     M = m_enc.sum()
-    J = na.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
-    E = na.sqrt(e_term_pre.sum()/W)
+    J = np.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
+    E = np.sqrt(e_term_pre.sum()/W)
     G = 6.67e-8 # cm^3 g^-1 s^-2
     spin = J * E / (M*1.989e33*G)
     return spin
@@ -292,11 +292,11 @@
     """
     m_enc = data["CellMassMsun"].sum() + data["ParticleMassMsun"].sum()
     amx = data["ParticleSpecificAngularMomentumX"]*data["ParticleMassMsun"]
-    if amx.size == 0: return (na.zeros((3,), dtype='float64'), m_enc, 0, 0)
+    if amx.size == 0: return (np.zeros((3,), dtype='float64'), m_enc, 0, 0)
     amy = data["ParticleSpecificAngularMomentumY"]*data["ParticleMassMsun"]
     amz = data["ParticleSpecificAngularMomentumZ"]*data["ParticleMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["ParticleMassMsun"]
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["ParticleMassMsun"]
                        *data["ParticleVelocityMagnitude"]**2.0)
     weight=data["ParticleMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
@@ -360,15 +360,15 @@
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
         kinetic += thermal
     if periodic_test:
-        kinetic = na.ones_like(kinetic)
+        kinetic = np.ones_like(kinetic)
     # Gravitational potential energy
     # We only divide once here because we have velocity in cgs, but radius is
     # in code.
     G = 6.67e-8 / data.convert("cm") # cm^3 g^-1 s^-2
     # Check for periodicity of the clump.
-    two_root = 2. / na.array(data.pf.domain_dimensions)
+    two_root = 2. / np.array(data.pf.domain_dimensions)
     domain_period = data.pf.domain_right_edge - data.pf.domain_left_edge
-    periodic = na.array([0., 0., 0.])
+    periodic = np.array([0., 0., 0.])
     for i,dim in enumerate(["x", "y", "z"]):
         sorted = data[dim][data[dim].argsort()]
         # If two adjacent values are different by (more than) two root grid
@@ -380,7 +380,7 @@
             # define the gap from the right boundary, which we'll use for the
             # periodic adjustment later.
             sel = (diff >= two_root[i])
-            index = na.min(na.nonzero(sel))
+            index = np.min(np.nonzero(sel))
             # The last addition term below ensures that the data makes a full
             # wrap-around.
             periodic[i] = data.pf.domain_right_edge[i] - sorted[index + 1] + \
@@ -402,26 +402,26 @@
             local_data[dim] += periodic[i]
             local_data[dim] %= domain_period[i]
     if periodic_test:
-        local_data["CellMass"] = na.ones_like(local_data["CellMass"])
+        local_data["CellMass"] = np.ones_like(local_data["CellMass"])
     import time
     t1 = time.time()
     if treecode:
         # Calculate the binding energy using the treecode method.
         # Faster but less accurate.
         # The octree doesn't like uneven root grids, so we will make it cubical.
-        root_dx = 1./na.array(data.pf.domain_dimensions).astype('float64')
-        left = min([na.amin(local_data['x']), na.amin(local_data['y']),
-            na.amin(local_data['z'])])
-        right = max([na.amax(local_data['x']), na.amax(local_data['y']),
-            na.amax(local_data['z'])])
-        cover_min = na.array([left, left, left])
-        cover_max = na.array([right, right, right])
+        root_dx = 1./np.array(data.pf.domain_dimensions).astype('float64')
+        left = min([np.amin(local_data['x']), np.amin(local_data['y']),
+            np.amin(local_data['z'])])
+        right = max([np.amax(local_data['x']), np.amax(local_data['y']),
+            np.amax(local_data['z'])])
+        cover_min = np.array([left, left, left])
+        cover_max = np.array([right, right, right])
         # Fix the coverage to match to root grid cell left 
         # edges for making indexes.
         cover_min = cover_min - cover_min % root_dx
         cover_max = cover_max - cover_max % root_dx
-        cover_imin = (cover_min * na.array(data.pf.domain_dimensions)).astype('int64')
-        cover_imax = (cover_max * na.array(data.pf.domain_dimensions) + 1).astype('int64')
+        cover_imin = (cover_min * np.array(data.pf.domain_dimensions)).astype('int64')
+        cover_imax = (cover_max * np.array(data.pf.domain_dimensions) + 1).astype('int64')
         cover_ActiveDimensions = cover_imax - cover_imin
         # Create the octree with these dimensions.
         # One value (mass) with incremental=True.
@@ -429,12 +429,12 @@
         #print 'here', cover_ActiveDimensions
         # Now discover what levels this data comes from, not assuming
         # symmetry.
-        dxes = na.unique(data['dx']) # unique returns a sorted array,
-        dyes = na.unique(data['dy']) # so these will all have the same
-        dzes = na.unique(data['dz']) # order.
+        dxes = np.unique(data['dx']) # unique returns a sorted array,
+        dyes = np.unique(data['dy']) # so these will all have the same
+        dzes = np.unique(data['dz']) # order.
         # We only need one dim to figure out levels, we'll use x.
         dx = 1./data.pf.domain_dimensions[0]
-        levels = (na.log(dx / dxes) / na.log(data.pf.refine_by)).astype('int')
+        levels = (np.log(dx / dxes) / np.log(data.pf.refine_by)).astype('int')
         lsort = levels.argsort()
         levels = levels[lsort]
         dxes = dxes[lsort]
@@ -447,9 +447,9 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = na.array([local_data["CellMass"][sel]], order='F')
+	    vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
-               na.ones_like(thisx).astype('float64'), treecode = 1)
+               np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)
         mylog.info("Using a treecode to find gravitational energy for %d cells." % local_data['x'].size)
@@ -484,7 +484,7 @@
     m = (data['CellMass'] * mass_scale_factor).astype('float32')
     assert(m.size > bsize)
 
-    gsize=int(na.ceil(float(m.size)/bsize))
+    gsize=int(np.ceil(float(m.size)/bsize))
     assert(gsize > 16)
 
     # Now the tedious process of rescaling our values...
@@ -492,7 +492,7 @@
     x = ((data['x'] - data['x'].min()) * length_scale_factor).astype('float32')
     y = ((data['y'] - data['y'].min()) * length_scale_factor).astype('float32')
     z = ((data['z'] - data['z'].min()) * length_scale_factor).astype('float32')
-    p = na.zeros(z.shape, dtype='float32')
+    p = np.zeros(z.shape, dtype='float32')
     
     x_gpu = cuda.mem_alloc(x.size * x.dtype.itemsize)
     y_gpu = cuda.mem_alloc(y.size * y.dtype.itemsize)
@@ -569,7 +569,7 @@
          block=(bsize,1,1), grid=(gsize, gsize), time_kernel=True)
     cuda.memcpy_dtoh(p, p_gpu)
     p1 = p.sum()
-    if na.any(na.isnan(p)): raise ValueError
+    if np.any(np.isnan(p)): raise ValueError
     return p1 * (length_scale_factor / (mass_scale_factor**2.0))
 
 def _Extrema(data, fields, non_zero = False, filter=None):
@@ -613,9 +613,9 @@
                 maxs.append(-1e90)
     return len(fields), mins, maxs
 def _combExtrema(data, n_fields, mins, maxs):
-    mins, maxs = na.atleast_2d(mins, maxs)
+    mins, maxs = np.atleast_2d(mins, maxs)
     n_fields = mins.shape[1]
-    return [(na.min(mins[:,i]), na.max(maxs[:,i])) for i in range(n_fields)]
+    return [(np.min(mins[:,i]), np.max(maxs[:,i])) for i in range(n_fields)]
 add_quantity("Extrema", function=_Extrema, combine_function=_combExtrema,
              n_ret=3)
 
@@ -644,14 +644,14 @@
     """
     ma, maxi, mx, my, mz, mg = -1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        maxi = na.argmax(data[field])
+        maxi = np.argmax(data[field])
         ma = data[field][maxi]
         mx, my, mz = [data[ax][maxi] for ax in 'xyz']
         mg = data["GridIndices"][maxi]
     return (ma, maxi, mx, my, mz, mg)
 def _combMaxLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmax(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmax(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MaxLocation", function=_MaxLocation,
              combine_function=_combMaxLocation, n_ret = 6)
@@ -663,14 +663,14 @@
     """
     ma, mini, mx, my, mz, mg = 1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        mini = na.argmin(data[field])
+        mini = np.argmin(data[field])
         ma = data[field][mini]
         mx, my, mz = [data[ax][mini] for ax in 'xyz']
         mg = data["GridIndices"][mini]
     return (ma, mini, mx, my, mz, mg)
 def _combMinLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmin(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmin(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MinLocation", function=_MinLocation,
              combine_function=_combMinLocation, n_ret = 6)
@@ -691,8 +691,8 @@
         totals.append(data[field].sum())
     return len(fields), totals
 def _combTotalQuantity(data, n_fields, totals):
-    totals = na.atleast_2d(totals)
+    totals = np.atleast_2d(totals)
     n_fields = totals.shape[1]
-    return [na.sum(totals[:,i]) for i in range(n_fields)]
+    return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -30,7 +30,7 @@
 import copy
 import itertools
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -151,8 +151,8 @@
         self.ActiveDimensions = [nd,nd,nd]
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
-        self.dds = na.ones(3, "float64")
-        self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
+        self.dds = np.ones(3, "float64")
+        self['dx'] = self['dy'] = self['dz'] = np.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
 
@@ -161,8 +161,8 @@
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
-            pf.domain_left_edge = na.zeros(3, 'float64')
-            pf.domain_right_edge = na.ones(3, 'float64')
+            pf.domain_left_edge = np.zeros(3, 'float64')
+            pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3
         self.pf = pf
 
@@ -180,12 +180,12 @@
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd, nd, nd), dtype='float64')
-                + 1e-4*na.random.random((nd, nd, nd)))
+                lambda: np.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*np.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd * nd * nd), dtype='float64')
-                + 1e-4*na.random.random((nd * nd * nd)))
+                lambda: np.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*np.random.random((nd * nd * nd)))
 
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
@@ -215,13 +215,13 @@
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(field_name) and FI[field_name].particle_type:
             self.requested.append(field_name)
-            return na.ones(self.NumberOfParticles)
+            return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
-            return na.random.random(3) * 1e-2
+            return np.random.random(3) * 1e-2
         else:
             return 0.0
     _num_ghost_zones = 0


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -27,7 +27,7 @@
 import pdb
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
@@ -79,11 +79,11 @@
         if self.Parent == None:
             left = self.LeftEdge - self.pf.domain_left_edge
             start_index = left / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
 
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
+                       np.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
         self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -184,15 +184,15 @@
                 if self.pf.field_info[field].particle_type and \
                    self.NumberOfParticles == 0:
                     # because this gets upcast to float
-                    self[field] = na.array([],dtype='int64')
+                    self[field] = np.array([],dtype='int64')
                     return self.field_data[field]
                 try:
                     temp = self.hierarchy.io.pop(self, field)
-                    self[field] = na.multiply(temp, conv_factor, temp)
+                    self[field] = np.multiply(temp, conv_factor, temp)
                 except self.hierarchy.io._read_exception, exc:
                     if field in self.pf.field_info:
                         if self.pf.field_info[field].not_in_all:
-                            self[field] = na.zeros(self.ActiveDimensions, dtype='float64')
+                            self[field] = np.zeros(self.ActiveDimensions, dtype='float64')
                         else:
                             raise
                     else: raise
@@ -209,14 +209,14 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
-        return na.array([ # Unroll!
+        return np.array([ # Unroll!
             [self.LeftEdge[0],  self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
@@ -237,9 +237,9 @@
         x = x_dict[axis]
         y = y_dict[axis]
         cond = self.RightEdge[x] >= LE[:,x]
-        cond = na.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
-        cond = na.logical_and(cond, self.RightEdge[y] >= LE[:,y])
-        cond = na.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
+        cond = np.logical_and(cond, self.RightEdge[y] >= LE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
         return cond
 
     def __repr__(self):
@@ -278,19 +278,19 @@
         self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
     def __len__(self):
-        return na.prod(self.ActiveDimensions)
+        return np.prod(self.ActiveDimensions)
 
     def find_max(self, field):
         """ Returns value, index of maximum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmax()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def find_min(self, field):
         """ Returns value, index of minimum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmin()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
@@ -369,8 +369,8 @@
     def __fill_child_mask(self, child, mask, tofill):
         rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = na.maximum(0, cgi / rf - gi)
-        endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],
@@ -383,7 +383,7 @@
         thus, where higher resolution data is available).
 
         """
-        self._child_mask = na.ones(self.ActiveDimensions, 'int32')
+        self._child_mask = np.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
             self.__fill_child_mask(child, self._child_mask, 0)
         if self.OverlappingSiblings is not None:
@@ -398,7 +398,7 @@
         and otherwise has the ID of the grid that resides there.
 
         """
-        self._child_index_mask = na.zeros(self.ActiveDimensions, 'int32') - 1
+        self._child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
             self.__fill_child_mask(child, self._child_index_mask,
                                    child.id)
@@ -425,8 +425,8 @@
         Creates self.coords, which is of dimensions (3, ActiveDimensions)
 
         """
-        ind = na.indices(self.ActiveDimensions)
-        left_shaped = na.reshape(self.LeftEdge, (3, 1, 1, 1))
+        ind = np.indices(self.ActiveDimensions)
+        left_shaped = np.reshape(self.LeftEdge, (3, 1, 1, 1))
         self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
 
     child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
@@ -462,7 +462,7 @@
         return cube
 
     def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
             of = self[field]
@@ -474,9 +474,9 @@
             new_field[1:,:-1,1:] += of
             new_field[1:,1:,:-1] += of
             new_field[1:,1:,1:] += of
-            na.multiply(new_field, 0.125, new_field)
+            np.multiply(new_field, 0.125, new_field)
             if self.pf.field_info[field].take_log:
-                new_field = na.log10(new_field)
+                new_field = np.log10(new_field)
 
             new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
             new_field[:,:, 0]  = 2.0*new_field[:,:,1] - new_field[:,:,2]
@@ -486,17 +486,17 @@
             new_field[0,:,:]  = 2.0*new_field[1,:,:] - new_field[2,:,:]
 
             if self.pf.field_info[field].take_log:
-                na.power(10.0, new_field, new_field)
+                np.power(10.0, new_field, new_field)
         else:
             cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            na.multiply(new_field, 0.125, new_field)
+            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
+            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
+            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
+            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
+            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
+            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
+            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
+            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
+            np.multiply(new_field, 0.125, new_field)
 
         return new_field


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import string, re, gc, time, cPickle, pdb
 import weakref
 
@@ -116,11 +116,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _setup_classes(self, dd):
         # Called by subclass
@@ -172,7 +172,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -361,13 +361,13 @@
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]
         self.level_stats['numcells'] = [0 for i in range(MAXLEVEL)]
         for level in xrange(self.max_level+1):
-            self.level_stats[level]['numgrids'] = na.sum(self.grid_levels == level)
+            self.level_stats[level]['numgrids'] = np.sum(self.grid_levels == level)
             li = (self.grid_levels[:,0] == level)
             self.level_stats[level]['numcells'] = self.grid_dimensions[li,:].prod(axis=1).sum()
 
     @property
     def grid_corners(self):
-        return na.array([
+        return np.array([
           [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.lib import \
@@ -38,15 +38,15 @@
         along *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the two edges, we win!
-        na.choose(na.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
-        na.choose(na.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
+        np.choose(np.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_max(self, field, finest_levels = 3):
@@ -70,18 +70,18 @@
         max_val, maxi, mx, my, mz, mg = \
             source.quantities["MaxLocation"]( field, lazy_reader=True)
         max_grid = self.grids[mg]
-        mc = na.unravel_index(maxi, max_grid.ActiveDimensions)
+        mc = np.unravel_index(maxi, max_grid.ActiveDimensions)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s %s", \
               max_val, mx, my, mz, max_grid, max_grid.Level, mc)
         self.parameters["Max%sValue" % (field)] = max_val
         self.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
-        return max_grid, mc, max_val, na.array((mx,my,mz), dtype='float64')
+        return max_grid, mc, max_val, np.array((mx,my,mz), dtype='float64')
 
     def find_min(self, field):
         """
         Returns (value, center) of location of minimum for a given field
         """
-        gI = na.where(self.grid_levels >= 0) # Slow but pedantic
+        gI = np.where(self.grid_levels >= 0) # Slow but pedantic
         minVal = 1e100
         for grid in self.grids[gI[0]]:
             mylog.debug("Checking %s (level %s)", grid.id, grid.Level)
@@ -90,7 +90,7 @@
                 minCoord = coord
                 minVal = val
                 minGrid = grid
-        mc = na.array(minCoord)
+        mc = np.array(minCoord)
         pos=minGrid.get_position(mc)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", \
               minVal, pos[0], pos[1], pos[2], minGrid, minGrid.Level)
@@ -103,11 +103,11 @@
         """
         Returns the (objects, indices) of grids containing an (x,y,z) point
         """
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         for i in xrange(len(coord)):
-            na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-            na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-        ind = na.where(mask == 1)
+            np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+            np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_field_value_at_point(self, fields, coord):
@@ -134,7 +134,7 @@
         # Get the most-refined grid at this coordinate.
         this = self.find_point(coord)[0][-1]
         cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions
-        mark = na.zeros(3).astype('int')
+        mark = np.zeros(3).astype('int')
         # Find the index for the cell containing this point.
         for dim in xrange(len(coord)):
             mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim])
@@ -151,15 +151,15 @@
         *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the edges, we win!
-        #ind = na.where( na.logical_and(self.grid_right_edge[:,axis] > coord, \
+        #ind = np.where( np.logical_and(self.grid_right_edge[:,axis] > coord, \
                                        #self.grid_left_edge[:,axis] < coord))
-        na.choose(na.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_sphere_grids(self, center, radius):
@@ -167,29 +167,29 @@
         Returns objects, indices of grids within a sphere
         """
         centers = (self.grid_right_edge + self.grid_left_edge)/2.0
-        long_axis = na.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
-        t = na.abs(centers - center)
+        long_axis = np.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
+        t = np.abs(centers - center)
         DW = self.parameter_file.domain_right_edge \
            - self.parameter_file.domain_left_edge
-        na.minimum(t, na.abs(DW-t), t)
-        dist = na.sqrt(na.sum((t**2.0), axis=1))
-        gridI = na.where(dist < (radius + long_axis))
+        np.minimum(t, np.abs(DW-t), t)
+        dist = np.sqrt(np.sum((t**2.0), axis=1))
+        gridI = np.where(dist < (radius + long_axis))
         return self.grids[gridI], gridI
 
     def get_box_grids(self, left_edge, right_edge):
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = na.where((na.all(self.grid_right_edge > left_edge, axis=1)
-                         & na.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
+                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -203,26 +203,26 @@
                     nre = nle + db
                     g, gi = self.get_box_grids(nle, nre)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_box_grids_below_level(self, left_edge, right_edge, level,
                                   min_level = 0):
         # We discard grids if they are ABOVE the level
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
                             self.grid_levels.astype("int32"), mask, min_level)
         mask = mask.astype("bool")
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_periodic_box_grids_below_level(self, left_edge, right_edge, level,
                                            min_level = 0):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -237,5 +237,5 @@
                     g, gi = self.get_box_grids_below_level(nle, nre,
                                             level, min_level)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -86,7 +86,7 @@
         for field in fields:
             f = self.pf.field_info[field]
             to_add = f.get_dependencies(pf = self.pf).requested
-            to_add = list(na.unique(to_add))
+            to_add = list(np.unique(to_add))
             if len(to_add) != 1: raise KeyError
             fields_to_read += to_add
             if f._particle_convert_function is None:
@@ -95,9 +95,9 @@
                 func = f.particle_convert
             func = particle_converter(func)
             conv_factors.append(
-              na.fromiter((func(g) for g in grid_list),
+              np.fromiter((func(g) for g in grid_list),
                           count=len(grid_list), dtype='float64'))
-        conv_factors = na.array(conv_factors).transpose()
+        conv_factors = np.array(conv_factors).transpose()
         self.conv_factors = conv_factors
         rvs = self.pf.h.io._read_particles(
             fields_to_read, rtype, args, grid_list, count_list,
@@ -115,9 +115,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64') 
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64') 
-        args = (na.array(self.left_edge), na.array(self.right_edge), 
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64') 
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64') 
+        args = (np.array(self.left_edge), np.array(self.right_edge), 
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
@@ -140,9 +140,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64')
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64')
-        return (1, (na.array(self.center, dtype='float64'), self.radius,
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64')
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64')
+        return (1, (np.array(self.center, dtype='float64'), self.radius,
             1, DLE, DRE))
 
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
@@ -156,8 +156,8 @@
         ParticleIOHandler.__init__(self, pf, source)
     
     def _get_args(self):
-        args = (na.array(self.center, dtype='float64'),
-                na.array(self.normal, dtype='float64'),
+        args = (np.array(self.center, dtype='float64'),
+                np.array(self.normal, dtype='float64'),
                 self.radius, self.height)
         return (2, args)
         


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -25,7 +25,7 @@
 from yt.utilities.lib import sample_field_at_positions
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import h5py
 
 class ParticleTrajectoryCollection(object) :
@@ -112,16 +112,16 @@
         for pf in self.pfs :
             dd = pf.h.all_data()
             newtags = dd["particle_index"].astype("int")
-            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)) :
                 print "Not all requested particle ids contained in this file!"
                 raise IndexError
-            mask = na.in1d(newtags, indices, assume_unique=True)
-            sorts = na.argsort(newtags[mask])
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
             self.masks.append(mask)            
             self.sorts.append(sorts)
             self.times.append(pf.current_time)
 
-        self.times = na.array(self.times)
+        self.times = np.array(self.times)
 
         # Set up the derived field list and the particle field list
         # so that if the requested field is a particle field, we'll
@@ -226,7 +226,7 @@
         
         if not self.field_data.has_key(field):
             
-            particles = na.empty((0))
+            particles = np.empty((0))
 
             step = int(0)
                 
@@ -238,13 +238,13 @@
 
                     dd = pf.h.all_data()
                     pfield = dd[field][mask]
-                    particles = na.append(particles, pfield[sort])
+                    particles = np.append(particles, pfield[sort])
 
                 else :
 
                     # This is hard... must loop over grids
 
-                    pfield = na.zeros((self.num_indices))
+                    pfield = np.zeros((self.num_indices))
                     x = self["particle_position_x"][:,step]
                     y = self["particle_position_y"][:,step]
                     z = self["particle_position_z"][:,step]
@@ -258,7 +258,7 @@
                                                             grid.RightEdge,
                                                             x, y, z)
 
-                    particles = na.append(particles, pfield)
+                    particles = np.append(particles, pfield)
 
                 step += 1
                 
@@ -294,9 +294,9 @@
         >>> pl.savefig("orbit")
         """
         
-        mask = na.in1d(self.indices, (index,), assume_unique=True)
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
 
-        if not na.any(mask) :
+        if not np.any(mask) :
             print "The particle index %d is not in the list!" % (index)
             raise IndexError
 
@@ -376,7 +376,7 @@
 
         fields = [field for field in sorted(self.field_data.keys())]
         
-        fid.create_dataset("particle_indices", dtype=na.int32,
+        fid.create_dataset("particle_indices", dtype=np.int32,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -115,13 +115,13 @@
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
         #pbar.finish()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 self.__data[field][ub] /= self.__weight_data[field][ub]
                 self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
-            self["%s_std" % field] = na.sqrt(self.__std_data[field])
+            self["%s_std" % field] = np.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
         del self.__data, self.__std_data, self.__weight_data, self.__used
 
@@ -131,7 +131,7 @@
         for key in self.__data:
             my_mean[key] = self._get_empty_field()
             my_weight[key] = self._get_empty_field()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for key in self.__data:
             my_mean[key][ub] = self.__data[key][ub] / self.__weight_data[key][ub]
             my_weight[key][ub] = self.__weight_data[key][ub]
@@ -151,7 +151,7 @@
                                          accumulation, self._args, check_cut = False)
             if weight:
                 f[u] /= w[u]
-                q[u] = na.sqrt(q[u] / w[u])
+                q[u] = np.sqrt(q[u] / w[u])
             self[field] = f
             self["%s_std" % field] = q
         self["UsedBins"] = u
@@ -202,7 +202,7 @@
                 else:
                     pointI = self._data_source._get_point_indices(source)
             data.append(source[field][pointI].ravel().astype('float64'))
-        return na.concatenate(data, axis=0)
+        return np.concatenate(data, axis=0)
 
     def _fix_pickle(self):
         if isinstance(self._data_source, tuple):
@@ -235,10 +235,10 @@
 
         # Get our bins
         if log_space:
-            func = na.logspace
-            lower_bound, upper_bound = na.log10(lower_bound), na.log10(upper_bound)
+            func = np.logspace
+            lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
         else:
-            func = na.linspace
+            func = np.linspace
 
         # These are the bin *edges*
         self._bins = func(lower_bound, upper_bound, n_bins + 1)
@@ -253,7 +253,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros(self[self.bin_field].size, dtype='float64')
+        return np.zeros(self[self.bin_field].size, dtype='float64')
 
     @preserve_source_parameters
     def _bin_field(self, source, field, weight, accumulation,
@@ -263,7 +263,7 @@
         # (i.e., lazy_reader)
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -282,7 +282,7 @@
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
-            binned_field = na.add.accumulate(binned_field)
+            binned_field = np.add.accumulate(binned_field)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -293,7 +293,7 @@
             raise EmptyProfileData()
         # Truncate at boundaries.
         if self.end_collect:
-            mi = na.ones_like(source_data).astype('bool')
+            mi = np.ones_like(source_data).astype('bool')
         else:
             mi = ((source_data > self._bins.min())
                &  (source_data < self._bins.max()))
@@ -301,9 +301,9 @@
         if sd.size == 0:
             raise EmptyProfileData()
         # Stick the bins into our fixed bins, set at initialization
-        bin_indices = na.digitize(sd, self._bins)
+        bin_indices = np.digitize(sd, self._bins)
         if self.end_collect: #limit the range of values to 0 and n_bins-1
-            bin_indices = na.clip(bin_indices, 0, self.n_bins - 1)
+            bin_indices = np.clip(bin_indices, 0, self.n_bins - 1)
         else: #throw away outside values
             bin_indices -= 1
           
@@ -319,7 +319,7 @@
         elif bin_style is 'left': x = x[:-1]
         elif bin_style is 'right': x = x[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
+            if self._x_log: x=np.log10(x)
             x = 0.5*(x[:-1] + x[1:])
             if self._x_log: x=10**x
         else:
@@ -337,11 +337,11 @@
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
-        field_data = na.array(self.choose_bins(bin_style)) 
+        field_data = np.array(self.choose_bins(bin_style)) 
         if bin_style is 'both':
-            field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field] for field in fields]), axis=0)
         else: 
-            field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field][:-1] for field in fields]), axis=0)
         
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -409,18 +409,18 @@
         self.x_n_bins = x_n_bins
         self.y_n_bins = y_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])):
             mylog.error("Your min/max values for x, y have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -428,7 +428,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size), dtype='float64')
 
     @preserve_source_parameters
@@ -436,7 +436,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -456,9 +456,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -470,9 +470,9 @@
             raise EmptyProfileData()
 
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
-            mi = na.where( (source_data_x > self._x_bins.min())
+            mi = np.where( (source_data_x > self._x_bins.min())
                            & (source_data_x < self._x_bins.max())
                            & (source_data_y > self._y_bins.min())
                            & (source_data_y < self._y_bins.max()))
@@ -481,11 +481,11 @@
         if sd_x.size == 0 or sd_y.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y)
@@ -507,8 +507,8 @@
             x = x[1:]
             y = y[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             if self._x_log: x=10**x
@@ -531,7 +531,7 @@
         fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
                           + fields + ["\n"]))
         x,y = self.choose_bins(bin_style)
-        x,y = na.meshgrid(x,y)
+        x,y = np.meshgrid(x,y)
         field_data = [x.ravel(), y.ravel()]
         if bin_style is not 'both':
             field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
@@ -540,7 +540,7 @@
             field_data += [self.field_data[field].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
 
-        field_data = na.array(field_data)
+        field_data = np.array(field_data)
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -579,7 +579,7 @@
         return [self.x_bin_field, self.y_bin_field]
 
 def fix_bounds(upper, lower, logit):
-    if logit: return na.log10(upper), na.log10(lower)
+    if logit: return np.log10(upper), np.log10(lower)
     return upper, lower
 
 class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -599,7 +599,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -617,9 +617,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, used_field.astype('bool')
 
         
@@ -656,24 +656,24 @@
         self.y_n_bins = y_n_bins
         self.z_n_bins = z_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        func = {True:na.logspace, False:na.linspace}[z_log]
+        func = {True:np.logspace, False:np.linspace}[z_log]
         bounds = fix_bounds(z_lower_bound, z_upper_bound, z_log)
         self._z_bins = func(bounds[0], bounds[1], z_n_bins + 1)
         self[z_bin_field] = self._z_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])) \
-            or na.any(na.isnan(self[z_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])) \
+            or np.any(np.isnan(self[z_bin_field])):
             mylog.error("Your min/max values for x, y or z have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -681,7 +681,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size,
                          self[self.z_bin_field].size), dtype='float64')
 
@@ -689,9 +689,9 @@
     def _bin_field(self, source, field, weight, accumulation,
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
-        weight_data = na.ones(source_data.shape).astype('float64')
+        weight_data = np.ones(source_data.shape).astype('float64')
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape).astype('float64')
+        else: weight_data = np.ones(source_data.shape).astype('float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -711,11 +711,11 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
-                binned_field = na.add.accumulate(binned_field, axis=2)
+                binned_field = np.add.accumulate(binned_field, axis=2)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -727,7 +727,7 @@
         if source_data_x.size == 0:
             raise EmptyProfileData()
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
             mi = ( (source_data_x > self._x_bins.min())
                  & (source_data_x < self._x_bins.max())
@@ -741,13 +741,13 @@
         if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
-        bin_indices_z = na.digitize(sd_z, self._z_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
+        bin_indices_z = np.digitize(sd_z, self._z_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
-            bin_indices_z = na.minimum(na.maximum(1, bin_indices_z), self.z_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_z = np.minimum(np.maximum(1, bin_indices_z), self.z_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y, bin_indices_z)
@@ -772,9 +772,9 @@
             y = y[1:]
             z = z[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
-            if self._z_log: z=na.log10(z)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
+            if self._z_log: z=np.log10(z)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             z = 0.5*(z[:-1] + z[1:])
@@ -853,7 +853,7 @@
             if field in set_attr.values(): continue
             order.append(field)
             values.append(self[field].ravel())
-        values = na.array(values).transpose()
+        values = np.array(values).transpose()
         self._data_source.hierarchy.save_data(values, "/Profiles", name,
                                               set_attr, force=force)
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -26,7 +26,7 @@
 """
 
 import types
-import numpy as na
+import numpy as np
 import inspect
 import copy
 
@@ -61,66 +61,66 @@
 
 def _dx(field, data):
     return data.dds[0]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
 add_field('dx', function=_dx, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dy(field, data):
     return data.dds[1]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
 add_field('dy', function=_dy, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dz(field, data):
     return data.dds[2]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
 add_field('dz', function=_dz,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _coordX(field, data):
     dim = data.ActiveDimensions[0]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[0])[:,None,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
             +0.5) * data['dx'] + data.LeftEdge[0]
 add_field('x', function=_coordX, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordY(field, data):
     dim = data.ActiveDimensions[1]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[1])[None,:,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
             +0.5) * data['dy'] + data.LeftEdge[1]
 add_field('y', function=_coordY, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordZ(field, data):
     dim = data.ActiveDimensions[2]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[2])[None,None,:]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
             +0.5) * data['dz'] + data.LeftEdge[2]
 add_field('z', function=_coordZ, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _GridLevel(field, data):
-    return na.ones(data.ActiveDimensions)*(data.Level)
+    return np.ones(data.ActiveDimensions)*(data.Level)
 add_field("GridLevel", function=_GridLevel,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
 def _GridIndices(field, data):
-    return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
+    return np.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)], take_log=False)
 
 def _OnesOverDx(field, data):
-    return na.ones(data["Ones"].shape,
+    return np.ones(data["Ones"].shape,
                    dtype=data["Density"].dtype)/data['dx']
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
 
 def _Ones(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64')
+    return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
           validators=[ValidateSpatial(0)],
           projection_conversion="unitary",
@@ -130,7 +130,7 @@
 
 def _SoundSpeed(field, data):
     if data.pf["EOSType"] == 1:
-        return na.ones(data["Density"].shape, dtype='float64') * \
+        return np.ones(data["Density"].shape, dtype='float64') * \
                 data.pf["EOSSoundSpeed"]
     return ( data.pf["Gamma"]*data["Pressure"] / \
              data["Density"] )**(1.0/2.0)
@@ -139,7 +139,7 @@
 
 def _RadialMachNumber(field, data):
     """M{|v|/t_sound}"""
-    return na.abs(data["RadialVelocity"]) / data["SoundSpeed"]
+    return np.abs(data["RadialVelocity"]) / data["SoundSpeed"]
 add_field("RadialMachNumber", function=_RadialMachNumber)
 
 def _MachNumber(field, data):
@@ -157,7 +157,7 @@
     t3 = data['dz'] / (
         data["SoundSpeed"] + \
         abs(data["z-velocity"]))
-    return na.minimum(na.minimum(t1,t2),t3)
+    return np.minimum(np.minimum(t1,t2),t3)
 def _convertCourantTimeStep(data):
     # SoundSpeed and z-velocity are in cm/s, dx is in code
     return data.convert("cm")
@@ -169,7 +169,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["particle_velocity_x"]-bulk_velocity[0])**2.0 + \
              (data["particle_velocity_y"]-bulk_velocity[1])**2.0 + \
              (data["particle_velocity_z"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -181,7 +181,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
              (data["y-velocity"]-bulk_velocity[1])**2.0 + \
              (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -189,13 +189,13 @@
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _TangentialOverVelocityMagnitude(field, data):
-    return na.abs(data["TangentialVelocity"])/na.abs(data["VelocityMagnitude"])
+    return np.abs(data["TangentialVelocity"])/np.abs(data["VelocityMagnitude"])
 add_field("TangentialOverVelocityMagnitude",
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
 def _TangentialVelocity(field, data):
-    return na.sqrt(data["VelocityMagnitude"]**2.0
+    return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)
 add_field("TangentialVelocity", 
           function=_TangentialVelocity,
@@ -223,14 +223,14 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
     ## The spherical coordinates radius is simply the magnitude of the
     ## coords vector.
 
-    return na.sqrt(na.sum(coords**2,axis=-1))
+    return np.sqrt(np.sum(coords**2,axis=-1))
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,7 +245,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -254,11 +254,11 @@
     ## vector.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    JdotCoords = na.sum(J*coords,axis=-1)
+    JdotCoords = np.sum(J*coords,axis=-1)
     
-    return na.arccos( JdotCoords / na.sqrt(na.sum(coords**2,axis=-1)) )
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,7 +269,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
     
@@ -282,18 +282,18 @@
     ## The angle is then given by the arctan of the ratio of the
     ## yprime-component and the xprime-component of the coords vector.
 
-    xprime = na.cross([0.0,1.0,0.0],normal)
-    if na.sum(xprime) == 0: xprime = na.array([0.0, 0.0, 1.0])
-    yprime = na.cross(normal,xprime)
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = na.tile(xprime,tile_shape)
-    Jy = na.tile(yprime,tile_shape)
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
     
-    Px = na.sum(Jx*coords,axis=-1)
-    Py = na.sum(Jy*coords,axis=-1)
+    Px = np.sum(Jx*coords,axis=-1)
+    Py = np.sum(Jy*coords,axis=-1)
     
-    return na.arctan2(Py,Px)
+    return np.arctan2(Py,Px)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -305,7 +305,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -313,10 +313,10 @@
     ## gives a vector of magnitude equal to the cylindrical radius.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    JcrossCoords = na.cross(J,coords)
-    return na.sqrt(na.sum(JcrossCoords**2,axis=-1))
+    JcrossCoords = np.cross(J,coords)
+    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -331,7 +331,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -339,9 +339,9 @@
     ## the cylindrical height.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    return na.sum(J*coords,axis=-1)  
+    return np.sum(J*coords,axis=-1)  
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -399,7 +399,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*np.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -502,7 +502,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*data['dx']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']*data['dx']
     return data["dx"]*data["dy"]*data["dz"]
@@ -520,7 +520,7 @@
           convert_function=_ConvertCellVolumeCGS)
 
 def _ChandraEmissivity(field, data):
-    logT0 = na.log10(data["Temperature"]) - 7
+    logT0 = np.log10(data["Temperature"]) - 7
     return ((data["NumberDensity"].astype('float64')**2.0) \
             *(10**(-0.0103*logT0**8 \
                    +0.0417*logT0**7 \
@@ -579,15 +579,15 @@
 
 def _AveragedDensity(field, data):
     nx, ny, nz = data["Density"].shape
-    new_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    weight_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    i_i, j_i, k_i = na.mgrid[0:3,0:3,0:3]
+    new_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    weight_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    i_i, j_i, k_i = np.mgrid[0:3,0:3,0:3]
     for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):
         sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]
         new_field += data["Density"][sl] * data["CellMass"][sl]
         weight_field += data["CellMass"][sl]
     # Now some fancy footwork
-    new_field2 = na.zeros((nx,ny,nz))
+    new_field2 = np.zeros((nx,ny,nz))
     new_field2[1:-1,1:-1,1:-1] = new_field/weight_field
     return new_field2
 add_field("AveragedDensity",
@@ -615,7 +615,7 @@
         ds = div_fac * data['dz'].flat[0]
         f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
         f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = f
     return new_field
 def _convertDivV(data):
@@ -627,12 +627,12 @@
           convert_function=_convertDivV)
 
 def _AbsDivV(field, data):
-    return na.abs(data['DivV'])
+    return np.abs(data['DivV'])
 add_field("AbsDivV", function=_AbsDivV,
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return -na.ones_like(data["Ones"])
+    return -np.ones_like(data["Ones"])
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
 add_field("tempContours", function=_Contours,
@@ -642,7 +642,7 @@
 def obtain_velocities(data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["x-velocity"] - bv[0]
     yv = data["y-velocity"] - bv[1]
     zv = data["z-velocity"] - bv[2]
@@ -694,18 +694,18 @@
     """
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["particle_velocity_x"] - bv[0]
     yv = data["particle_velocity_y"] - bv[1]
     zv = data["particle_velocity_z"] - bv[2]
     center = data.get_field_parameter('center')
-    coords = na.array([data['particle_position_x'],
+    coords = np.array([data['particle_position_x'],
                        data['particle_position_y'],
                        data['particle_position_z']], dtype='float64')
     new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - na.reshape(center,new_shape)
-    v_vec = na.array([xv,yv,zv], dtype='float64')
-    return na.cross(r_vec, v_vec, axis=0)
+    r_vec = coords - np.reshape(center,new_shape)
+    v_vec = np.array([xv,yv,zv], dtype='float64')
+    return np.cross(r_vec, v_vec, axis=0)
 #add_field("ParticleSpecificAngularMomentum",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
@@ -720,7 +720,7 @@
 def _ParticleSpecificAngularMomentumX(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     y = data["particle_position_y"] - center[1]
     z = data["particle_position_z"] - center[2]
@@ -730,7 +730,7 @@
 def _ParticleSpecificAngularMomentumY(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     z = data["particle_position_z"] - center[2]
@@ -740,7 +740,7 @@
 def _ParticleSpecificAngularMomentumZ(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     y = data["particle_position_y"] - center[1]
@@ -788,20 +788,20 @@
 def _ParticleRadius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["particle_position_x"].shape, dtype='float64')
+    radius = np.zeros(data["particle_position_x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data["particle_position_%s" % ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data["particle_position_%s" % ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _Radius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["x"].shape, dtype='float64')
+    radius = np.zeros(data["x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data[ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data[ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _ConvertRadiusCGS(data):
     return data.convert("cm")
@@ -886,16 +886,16 @@
     center = data.get_field_parameter("center")
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
                 + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
                 + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
                 )/data["RadiusCode"]
-    if na.any(na.isnan(new_field)): # to fix center = point
-        new_field[na.isnan(new_field)] = 0.0
+    if np.any(np.isnan(new_field)): # to fix center = point
+        new_field[np.isnan(new_field)] = 0.0
     return new_field
 def _RadialVelocityABS(field, data):
-    return na.abs(_RadialVelocity(field, data))
+    return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
     return 1e-5
 add_field("RadialVelocity", function=_RadialVelocity,
@@ -916,10 +916,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(x_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(x_vec, v_vec)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -929,10 +929,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(y_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(y_vec, v_vec)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -955,16 +955,16 @@
 def _convertDensity(data):
     return data.convert("Density")
 def _pdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                 data["particle_position_y"].astype(na.float64),
-                 data["particle_position_z"].astype(na.float64),
-                 data["particle_mass"].astype(na.float32),
-                 na.int64(data.NumberOfParticles),
-                 blank, na.array(data.LeftEdge).astype(na.float64),
-                 na.array(data.ActiveDimensions).astype(na.int32),
-                 na.float64(data['dx']))
+    CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                 data["particle_position_y"].astype(np.float64),
+                 data["particle_position_z"].astype(np.float64),
+                 data["particle_mass"].astype(np.float32),
+                 np.int64(data.NumberOfParticles),
+                 blank, np.array(data.LeftEdge).astype(np.float64),
+                 np.array(data.ActiveDimensions).astype(np.int32),
+                 np.float64(data['dx']))
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
@@ -993,7 +993,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape)
+    new_field = np.zeros(data["x-velocity"].shape)
     dvzdy = (data["z-velocity"][1:-1,sl_right,1:-1] -
              data["z-velocity"][1:-1,sl_left,1:-1]) \
              / (div_fac*data["dy"].flat[0])
@@ -1018,7 +1018,7 @@
              / (div_fac*data["dy"].flat[0])
     new_field[1:-1,1:-1,1:-1] += (dvydx - dvxdy)**2.0
     del dvydx, dvxdy
-    new_field = na.abs(new_field)
+    new_field = np.abs(new_field)
     return new_field
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
@@ -1038,7 +1038,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
@@ -1053,7 +1053,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
@@ -1068,7 +1068,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
@@ -1083,7 +1083,7 @@
               units=r"\rm{dyne}/\rm{cm}^{3}")
 
 def _gradPressureMagnitude(field, data):
-    return na.sqrt(data["gradPressureX"]**2 +
+    return np.sqrt(data["gradPressureX"]**2 +
                    data["gradPressureY"]**2 +
                    data["gradPressureZ"]**2)
 add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
@@ -1100,7 +1100,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
@@ -1115,7 +1115,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
@@ -1130,7 +1130,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
@@ -1145,7 +1145,7 @@
               units=r"\rm{g}/\rm{cm}^{4}")
 
 def _gradDensityMagnitude(field, data):
-    return na.sqrt(data["gradDensityX"]**2 +
+    return np.sqrt(data["gradDensityX"]**2 +
                    data["gradDensityY"]**2 +
                    data["gradDensityZ"]**2)
 add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
@@ -1171,7 +1171,7 @@
           units=r"\rm{s}^{-1}")
 
 def _BaroclinicVorticityMagnitude(field, data):
-    return na.sqrt(data["BaroclinicVorticityX"]**2 +
+    return np.sqrt(data["BaroclinicVorticityX"]**2 +
                    data["BaroclinicVorticityY"]**2 +
                    data["BaroclinicVorticityZ"]**2)
 add_field("BaroclinicVorticityMagnitude",
@@ -1189,7 +1189,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
                                  data["z-velocity"][1:-1,sl_left,1:-1]) \
                                  / (div_fac*data["dy"].flat[0])
@@ -1207,7 +1207,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
                                  data["x-velocity"][1:-1,1:-1,sl_left]) \
                                  / (div_fac*data["dz"].flat[0])
@@ -1225,7 +1225,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
                                  data["y-velocity"][sl_left,1:-1,1:-1]) \
                                  / (div_fac*data["dx"].flat[0])
@@ -1244,7 +1244,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityMagnitude(field, data):
-    return na.sqrt(data["VorticityX"]**2 +
+    return np.sqrt(data["VorticityX"]**2 +
                    data["VorticityY"]**2 +
                    data["VorticityZ"]**2)
 add_field("VorticityMagnitude", function=_VorticityMagnitude,
@@ -1263,7 +1263,7 @@
     add_field(n, function=eval("_%s" % n),
               validators=[ValidateSpatial(0)])
 def _VorticityStretchingMagnitude(field, data):
-    return na.sqrt(data["VorticityStretchingX"]**2 +
+    return np.sqrt(data["VorticityStretchingX"]**2 +
                    data["VorticityStretchingY"]**2 +
                    data["VorticityStretchingZ"]**2)
 add_field("VorticityStretchingMagnitude", 
@@ -1285,13 +1285,13 @@
                           ["x-velocity", "y-velocity", "z-velocity"])],
               units=r"\rm{s}^{-2}")
 def _VorticityGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityGrowthX"]**2 +
+    result = np.sqrt(data["VorticityGrowthX"]**2 +
                      data["VorticityGrowthY"]**2 +
                      data["VorticityGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1299,7 +1299,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityGrowthX"]**2 +
+    return np.sqrt(data["VorticityGrowthX"]**2 +
                    data["VorticityGrowthY"]**2 +
                    data["VorticityGrowthZ"]**2)
 add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
@@ -1311,7 +1311,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],
@@ -1344,7 +1344,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityRadPressureMagnitude(field, data):
-    return na.sqrt(data["VorticityRadPressureX"]**2 +
+    return np.sqrt(data["VorticityRadPressureX"]**2 +
                    data["VorticityRadPressureY"]**2 +
                    data["VorticityRadPressureZ"]**2)
 add_field("VorticityRadPressureMagnitude",
@@ -1369,13 +1369,13 @@
                        ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
               units=r"\rm{s}^{-1}")
 def _VorticityRPGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityRPGrowthX"]**2 +
+    result = np.sqrt(data["VorticityRPGrowthX"]**2 +
                      data["VorticityRPGrowthY"]**2 +
                      data["VorticityRPGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1383,7 +1383,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityRPGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityRPGrowthX"]**2 +
+    return np.sqrt(data["VorticityRPGrowthX"]**2 +
                    data["VorticityRPGrowthY"]**2 +
                    data["VorticityRPGrowthZ"]**2)
 add_field("VorticityRPGrowthMagnitudeABS", 
@@ -1396,7 +1396,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 import cPickle
@@ -106,7 +106,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -120,10 +120,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -141,7 +141,7 @@
         #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
         
@@ -180,9 +180,9 @@
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_info = np.array(self.pf.level_info)        
         self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
+        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
         self.pf.level_art_child_masks = {}
@@ -192,10 +192,10 @@
         del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
-                        na.zeros(3, dtype='int64'), # left index of PSG
+                        np.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
-                        na.zeros((1,3), dtype='int64'), # left edges of grids
-                        na.zeros((1,6), dtype='int64') # empty
+                        np.zeros((1,3), dtype='int64'), # left edges of grids
+                        np.zeros((1,6), dtype='int64') # empty
                         )
         
         self.proto_grids = [[root_psg],]
@@ -224,8 +224,8 @@
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
-            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
-                              na.log10(2))
+            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                              np.log10(2))
             hilbert_indices = _ramses_reader.get_hilbert_indices(
                                     level + base_level, left_index)
             #print base_level, hilbert_indices.max(),
@@ -234,7 +234,7 @@
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             
@@ -260,15 +260,15 @@
                 #why would we ever have non-unique octs?
                 #perhaps the hilbert ordering may visit the same
                 #oct multiple times - review only unique octs 
-                #for idomain in na.unique(ddfl[:,1]):
+                #for idomain in np.unique(ddfl[:,1]):
                 #dom_ind = ddfl[:,1] == idomain
                 #dleft_index = ddleft_index[dom_ind,:]
                 #dfl = ddfl[dom_ind,:]
                 
                 dleft_index = ddleft_index
                 dfl = ddfl
-                initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                initial_left = np.min(dleft_index, axis=0)
+                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                 #this creates a grid patch that doesn't cover the whole level
                 #necessarily, but with other patches covers all the regions
                 #with octs. This object automatically shrinks its size
@@ -298,8 +298,8 @@
                 
                 step+=1
                 pbar.update(step)
-            eff_mean = na.mean(psg_eff)
-            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_mean = np.mean(psg_eff)
+            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
             eff_nall = len(psg_eff)
             mylog.info("Average subgrid efficiency %02.1f %%",
                         eff_mean*100.0)
@@ -345,14 +345,14 @@
                 self.grid_right_edge[gi,:] = props[1,:] / dds
                 self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                child_mask = na.zeros(props[2,:],'uint8')
+                child_mask = np.zeros(props[2,:],'uint8')
                 amr_utils.fill_child_mask(fl,props[0],
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*na.array(correction).astype('int64')))
+                    props*np.array(correction).astype('int64')))
                 gi += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         
 
         if self.pf.file_particle_data:
@@ -372,7 +372,7 @@
             pbar.update(1)
             npa,npb=0,0
             npb = lspecies[-1]
-            clspecies = na.concatenate(([0,],lspecies))
+            clspecies = np.concatenate(([0,],lspecies))
             if self.pf.only_particle_type is not None:
                 npb = lspecies[0]
                 if type(self.pf.only_particle_type)==type(5):
@@ -388,13 +388,13 @@
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = na.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            self.pf.particle_type         = np.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = np.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
@@ -461,17 +461,17 @@
             init = self.pf.particle_position.shape[0]
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
-            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
             if type(self.pf.grid_particles) == type(5):
                 particle_level = min(self.pf.max_level,self.pf.grid_particles)
             else:
                 particle_level = 2
-            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
+            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
 
             pbar = get_pbar("Gridding Particles ",init)
             assignment,ilists = amr_utils.assign_particles_to_cell_lists(
                     self.grid_levels.ravel().astype('int32'),
-                    na.zeros(len(pos[:,0])).astype('int32')-1,
+                    np.zeros(len(pos[:,0])).astype('int32')-1,
                     particle_level, #dont grid particles past this
                     self.grid_left_edge.astype('float32'),
                     self.grid_right_edge.astype('float32'),
@@ -500,10 +500,10 @@
             
 
     def _get_grid_parents(self, grid, LE, RE):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
         return self.grids[mask]
 
     def _populate_grid_objects(self):
@@ -519,7 +519,7 @@
         self.max_level = self.grid_levels.max()
 
     # def _populate_grid_objects(self):
-    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     mask = np.empty(self.grids.size, dtype='int32')
     #     pb = get_pbar("Populating grids", len(self.grids))
     #     for gi,g in enumerate(self.grids):
     #         pb.update(gi)
@@ -609,7 +609,7 @@
         self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
-            self.limit_level = na.inf
+            self.limit_level = np.inf
         else:
             limit_level = int(limit_level)
             mylog.info("Using maximum level: %i",limit_level)
@@ -685,7 +685,7 @@
         wmu = self["wmu"]
         #ng = self.domain_dimensions[0]
         #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + na.sqrt(self.omega_matter))
+        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
         #v0 = r0 / t0
         #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
         #e0 = v0**2.0
@@ -696,7 +696,7 @@
         hubble = self.hubble_constant
         ng = self.domain_dimensions[0]
         self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * na.sqrt(self.omega_matter)  #cm/s
+        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         self.t0 = self.r0/self.v0
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
@@ -730,8 +730,8 @@
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
-        self.domain_left_edge = na.zeros(3, dtype="float64")
-        self.domain_right_edge = na.ones(3, dtype="float64")
+        self.domain_left_edge = np.zeros(3, dtype="float64")
+        self.domain_right_edge = np.ones(3, dtype="float64")
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters = {}
@@ -812,10 +812,10 @@
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
         # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
+        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
         # integrand_arr = integrand(spacings)
-        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
         # self.current_time *= self.hubble_time
         self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
@@ -824,7 +824,7 @@
         
         Om0 = self.parameters['Om0']
         hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * na.sqrt(Om0)
+        dummy = 100.0 * hubble * np.sqrt(Om0)
         ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
         boxh = header_vals['boxh'] 
@@ -836,7 +836,7 @@
         self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
         #velocity velocity units in km/s
         self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                na.sqrt(self.parameters["Om0"])
+                np.sqrt(self.parameters["Om0"])
         #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
         self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
         rho0 = self.parameters["rho0"]
@@ -857,10 +857,10 @@
     
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = int(na.rint(self.ncell**(1.0/3.0)))
+        est = int(np.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64')*est 
+        self.domain_dimensions = np.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
         #_skip_record(f) # iOctCh
@@ -927,8 +927,8 @@
         seek_extras = 137
         fh.seek(seek_extras)
         n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
         self.parameters['wspecies'] = self.parameters['wspecies'][:n]
         self.parameters['lspecies'] = self.parameters['lspecies'][:n]
         fh.close()


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -44,7 +44,7 @@
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as na
+import numpy as np
 
 #these are just the hydro fields
 known_art_fields = [ 'Density','TotalEnergy',
@@ -178,7 +178,7 @@
     di = dd==0.0
     #dd[di] = -1.0
     tr = dg/dd
-    #tr[na.isnan(tr)] = 0.0
+    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
     #    import pdb;pdb.set_trace()
     tr /= data.pf.conversion_factors["GasEnergy"]
@@ -186,7 +186,7 @@
     tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
-    #assert na.all(na.isfinite(tr))
+    #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
     x = data.pf.conversion_factors["Temperature"]
@@ -258,9 +258,9 @@
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
     tr  = data["Ones"] #create a grid in the right size
-    if na.sum(idx)>0:
-        tr /= na.prod(tr.shape) #divide by the volume
-        tr *= na.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+    if np.sum(idx)>0:
+        tr /= np.prod(tr.shape) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
         return tr
     else:
         return tr*0.0


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import struct
 
 import os
@@ -93,9 +93,9 @@
         f.seek(self.level_offsets[level])
         ncells = 8*self.level_info[level]
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
-        arr = na.fromfile(f, dtype='>f', count=nvals)
+        arr = np.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
         if field==None:
             self.level_data[level] = arr.astype('float32')
@@ -108,13 +108,13 @@
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
+        hvar = np.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
-        na.fromfile(f,dtype='>i',count=2) #throw away the pads
+        np.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
+        var = np.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
-        arr = na.concatenate((hvar,var))
+        arr = np.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
@@ -122,9 +122,9 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = na.array(grid.particle_indices)
+        idx = np.array(grid.particle_indices)
         if field == 'particle_index':
-            return na.array(idx)
+            return np.array(idx)
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
@@ -168,10 +168,10 @@
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
             return tr.swapaxes(0, 2).astype("float64")
-        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
-        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        filled = np.zeros(grid.ActiveDimensions, dtype='uint8')
         to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
@@ -198,9 +198,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -242,20 +242,20 @@
     #fortran indices start at 1
     
     #Skip all the oct hierarchy data
-    le     = na.zeros((nLevel,3),dtype='int64')
-    fl     = na.ones((nLevel,6),dtype='int64')
-    iocts  = na.zeros(nLevel+1,dtype='int64')
+    le     = np.zeros((nLevel,3),dtype='int64')
+    fl     = np.ones((nLevel,6),dtype='int64')
+    iocts  = np.zeros(nLevel+1,dtype='int64')
     idxa,idxb = 0,0
     chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
     left = nLevel
     while left > 0 :
         this_chunk = min(chunk,left)
         idxb=idxa+this_chunk
-        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data = np.fromfile(f,dtype='>i',count=this_chunk*15)
         data=data.reshape(this_chunk,15)
         left-=this_chunk
         le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        fl[idxa:idxb,1] = np.arange(idxa,idxb)
         #pad byte is last, LL2, then ioct right before it
         iocts[idxa:idxb] = data[:,-3] 
         idxa=idxa+this_chunk
@@ -272,12 +272,12 @@
     #now correct iocts for fortran indices start @ 1
     iocts = iocts-1
 
-    assert na.unique(iocts).shape[0] == nLevel
+    assert np.unique(iocts).shape[0] == nLevel
     
     #ioct tries to access arrays much larger than le & fl
     #just make sure they appear in the right order, skipping
     #the empty space in between
-    idx = na.argsort(iocts)
+    idx = np.argsort(iocts)
     
     #now rearrange le & fl in order of the ioct
     le = le[idx]
@@ -294,7 +294,7 @@
     #now read the hvars and vars arrays
     #we are looking for iOctCh
     #we record if iOctCh is >0, in which it is subdivided
-    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
     
     
     
@@ -309,9 +309,9 @@
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
-    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
-    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    f = np.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = np.vsplit(np.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
 def read_stars(file,nstars,Nrow):
@@ -332,8 +332,8 @@
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
     nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
-    ioctch = na.zeros(nLevel,dtype='uint8')
-    idc = na.zeros(nLevel,dtype='int32')
+    ioctch = np.zeros(nLevel,dtype='uint8')
+    idc = np.zeros(nLevel,dtype='int32')
     
     chunk = long(1e6)
     left = nLevel
@@ -342,9 +342,9 @@
     while left > 0:
         chunk = min(chunk,left)
         b += chunk
-        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = np.fromfile(f, dtype='>i', count=chunk*width)
         arr = arr.reshape((width, chunk), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
         ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
         #zero in the mask means there is refinement available
@@ -354,12 +354,12 @@
     return idc,ioctch
     
 nchem=8+2
-dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
                 ",>%sf4"%(2)+",>i4")
 def _read_art_child(f, level_child_offsets,level,nLevel,field):
     pos=f.tell()
     f.seek(level_child_offsets[level])
-    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = np.fromfile(f, dtype='>f', count=nLevel * 8)
     arr = arr.reshape((nLevel,16), order="F")
     arr = arr[3:-1,:].astype("float64")
     f.seek(pos)
@@ -372,8 +372,8 @@
 
 def _read_frecord(f,fmt):
     s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    count = s1/na.dtype(fmt).itemsize
-    ss = na.fromfile(f,fmt,count=count)
+    count = s1/np.dtype(fmt).itemsize
+    ss = np.fromfile(f,fmt,count=count)
     s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
     assert s1==s2
     return ss
@@ -406,14 +406,14 @@
 
 #All of these functions are to convert from hydro time var to 
 #proper time
-sqrt = na.sqrt
-sign = na.sign
+sqrt = np.sqrt
+sign = np.sign
 
 def find_root(f,a,b,tol=1e-6):
     c = (a+b)/2.0
-    last = -na.inf
+    last = -np.inf
     assert(sign(f(a)) != sign(f(b)))  
-    while na.abs(f(c)-last) > tol:
+    while np.abs(f(c)-last) > tol:
         last=f(c)
         if sign(last)==sign(f(b)):
             b=c
@@ -423,9 +423,9 @@
     return c
 
 def quad(fintegrand,xmin,xmax,n=1e4):
-    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    spacings = np.logspace(np.log10(xmin),np.log10(xmax),n)
     integrand_arr = fintegrand(spacings)
-    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    val = np.trapz(integrand_arr,dx=np.diff(spacings))
     return val
 
 def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
@@ -450,14 +450,14 @@
     integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
     #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
     current_time = quad(integrand,1e-4,at)
-    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #spacings = np.logspace(-5,np.log10(at),1e5)
     #integrand_arr = integrand(spacings)
-    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
     current_time *= 9.779/h
     return current_time
 
 def b2t(tb,n = 1e2,logger=None,**kwargs):
-    tb = na.array(tb)
+    tb = np.array(tb)
     if type(tb) == type(1.1): 
         return a2t(b2a(tb))
     if tb.shape == (): 
@@ -465,14 +465,14 @@
     if len(tb) < n: n= len(tb)
     age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
     age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
-    tbs  = -1.*na.logspace(na.log10(-tb.min()),
-                          na.log10(-tb.max()),n)
+    tbs  = -1.*np.logspace(np.log10(-tb.min()),
+                          np.log10(-tb.max()),n)
     ages = []
     for i,tbi in enumerate(tbs):
         ages += a2t(b2a(tbi)),
         if logger: logger(i)
-    ages = na.array(ages)
-    fb2t = na.interp(tb,tbs,ages)
+    ages = np.array(ages)
+    fb2t = np.interp(tb,tbs,ages)
     #fb2t = interp1d(tbs,ages)
     return fb2t
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -109,7 +109,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
@@ -174,12 +174,12 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #na.array(map(int, self._global_header_lines[counter].split()))
+        #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         #domain_re.search(self._global_header_lines[counter]).groups()
@@ -187,9 +187,9 @@
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -273,8 +273,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                                        level, gfn, gfo, dims,
@@ -296,7 +296,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
 
         self.field_list += castro_particle_field_names[:]
@@ -311,7 +311,7 @@
 
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = na.fromiter((int(i)
+        grid_info = np.fromiter((int(i)
                                  for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
@@ -347,15 +347,15 @@
         self._dtype = dtype
 
     def _calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.fullplotdir
 
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
@@ -367,9 +367,9 @@
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids * [self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
                                   for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
@@ -384,9 +384,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -405,7 +405,7 @@
             grid._setup_dx()
 
     def _setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -424,10 +424,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -439,7 +439,7 @@
             except:
                 continue
 
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
 
         for field in self.field_list:
@@ -473,11 +473,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -620,9 +620,9 @@
                     else:
                         self.parameters[paramName] = t
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals.split()])
+                self.domain_right_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals.split()])
+                self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 from yt.utilities.lib import \
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
             castro_particle_field_names.index(field),
             len(castro_particle_field_names),
@@ -85,8 +85,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
+            start = np.array(map(int, start.split(',')))
+            stop = np.array(map(int, stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -126,7 +126,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
+        field = np.fromfile(inFile, count=nElements, dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
      defaultdict
@@ -81,10 +81,10 @@
         if self.Parent == []:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -97,7 +97,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -137,18 +137,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py                                                                                                             
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
@@ -182,8 +182,8 @@
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
             for level_id, box in enumerate(boxes):
-                si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
                 pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
                 grids.append(pg)
@@ -193,9 +193,9 @@
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-#        self.grids = na.array(self.grids, dtype='object')
+#        self.grids = np.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -212,7 +212,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
@@ -316,21 +316,21 @@
     def __calc_left_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         fileh.close()
         return LE
 
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
                   
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
-        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         return R_index - L_index
  
     @classmethod


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,7 +33,7 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-import numpy as na
+import numpy as np
 
 KnownChomboFields = FieldInfoContainer()
 add_chombo_field = KnownChomboFields.add_field
@@ -131,7 +131,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
         


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,7 +25,7 @@
 """
 import h5py
 import re
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -108,4 +108,4 @@
                     if ( (grid.LeftEdge < coord).all() and
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import weakref
-import numpy as na
+import numpy as np
 import os
 import stat
 import string
@@ -90,7 +90,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -179,7 +179,7 @@
                 if self.pf.field_info[field].particle_type: continue
                 temp = self.hierarchy.io._read_raw_data_set(self, field)
                 temp = temp.swapaxes(0, 2)
-                cube.field_data[field] = na.multiply(temp, conv_factor, temp)[sl]
+                cube.field_data[field] = np.multiply(temp, conv_factor, temp)[sl]
         return cube
 
 class EnzoHierarchy(AMRHierarchy):
@@ -291,7 +291,7 @@
         f = open(self.hierarchy_filename, "rb")
         self.grids = [self.grid(1, self)]
         self.grids[0].Level = 0
-        si, ei, LE, RE, fn, np = [], [], [], [], [], []
+        si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
         pbar = get_pbar("Parsing Hierarchy", self.num_grids)
         for grid_id in xrange(self.num_grids):
@@ -304,29 +304,29 @@
             nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
             fn.append(["-1"])
             if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
-            np.append(int(_next_token_line("NumberOfParticles", f)[0]))
-            if nb == 0 and np[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
+            npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
+            if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
             for line in f:
                 if len(line) < 2: break
                 if line.startswith("Pointer:"):
                     vv = patt.findall(line)[0]
                     self.__pointer_handler(vv)
         pbar.finish()
-        self._fill_arrays(ei, si, LE, RE, np)
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        self._fill_arrays(ei, si, LE, RE, npart)
+        temp_grids = np.empty(self.num_grids, dtype='object')
         temp_grids[:] = self.grids
         self.grids = temp_grids
         self.filenames = fn
         self._store_binary_hierarchy()
         t2 = time.time()
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions.flat[:] = ei
-        self.grid_dimensions -= na.array(si, self.float_type)
+        self.grid_dimensions -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge.flat[:] = LE
         self.grid_right_edge.flat[:] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
 
     def __pointer_handler(self, m):
         sgi = int(m[2])-1
@@ -379,7 +379,7 @@
             if Pid > -1:
                 grids[Pid-1]._children_ids.append(grid.id)
             self.filenames.append(pmap[P])
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
         f.close()
         mylog.info("Finished with binary hierarchy reading")
         return True
@@ -408,9 +408,9 @@
             procs.append(int(self.filenames[i][0][-4:]))
             levels.append(g.Level)
 
-        parents = na.array(parents, dtype='int64')
-        procs = na.array(procs, dtype='int64')
-        levels = na.array(levels, dtype='int64')
+        parents = np.array(parents, dtype='int64')
+        procs = np.array(procs, dtype='int64')
+        levels = np.array(levels, dtype='int64')
         f.create_dataset("/ParentIDs", data=parents)
         f.create_dataset("/Processor", data=procs)
         f.create_dataset("/Level", data=levels)
@@ -425,7 +425,7 @@
         mylog.info("Rebuilding grids on level %s", level)
         cmask = (self.grid_levels.flat == (level + 1))
         cmsum = cmask.sum()
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         for grid in self.select_grids(level):
             mask[:] = 0
             LE = self.grid_left_edge[grid.id - grid._id_offset]
@@ -477,20 +477,20 @@
 
     def _generate_random_grids(self):
         if self.num_grids > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(self.grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
             # We also add in a bit to make sure that some of the grids have
             # particles
             gwp = self.grid_particle_count > 0
-            if na.any(gwp) and not na.any(gwp[(random_sample,)]):
+            if np.any(gwp) and not np.any(gwp[(random_sample,)]):
                 # We just add one grid.  This is not terribly efficient.
-                first_grid = na.where(gwp)[0][0]
+                first_grid = np.where(gwp)[0][0]
                 random_sample.resize((21,))
                 random_sample[-1] = first_grid
                 mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(self.grids),1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
@@ -518,7 +518,7 @@
         pstore = []
         for level in range(self.max_level, -1, -1):
             for grid in self.select_grids(level):
-                index = na.where(grid['particle_type'] == ptype)[0]
+                index = np.where(grid['particle_type'] == ptype)[0]
                 total += len(index)
                 pstore.append(index)
                 if total >= max_num: break
@@ -527,7 +527,7 @@
         if total > 0:
             result = {}
             for p in pfields:
-                result[p] = na.zeros(total, 'float64')
+                result[p] = np.zeros(total, 'float64')
             # Now we retrieve data for each field
             ig = count = 0
             for level in range(self.max_level, -1, -1):
@@ -590,7 +590,7 @@
                 grids[pid-1]._children_ids.append(grids[-1].id)
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for i, grid in enumerate(grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -601,7 +601,7 @@
 
     def _initialize_grid_arrays(self):
         EnzoHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def _copy_hierarchy_structure(self):
         # Dimensions are important!
@@ -638,18 +638,18 @@
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(my_grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(my_grids)-1:20j].astype("int32")
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
         return my_grids[(random_sample,)]
 
 class EnzoHierarchy1D(EnzoHierarchy):
 
     def _fill_arrays(self, ei, si, LE, RE, np):
         self.grid_dimensions[:,:1] = ei
-        self.grid_dimensions[:,:1] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:1] = LE
         self.grid_right_edge[:,:1] = RE
@@ -662,7 +662,7 @@
 
     def _fill_arrays(self, ei, si, LE, RE, np):
         self.grid_dimensions[:,:2] = ei
-        self.grid_dimensions[:,:2] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:2] = LE
         self.grid_right_edge[:,:2] = RE
@@ -707,17 +707,17 @@
         self._hierarchy_class = EnzoHierarchy1D
         self._fieldinfo_fallback = Enzo1DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
+            np.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
-            na.concatenate([[self.domain_right_edge], [1.0, 1.0]])
+            np.concatenate([[self.domain_right_edge], [1.0, 1.0]])
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
         self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([self.domain_left_edge, [0.0]])
+            np.concatenate([self.domain_left_edge, [0.0]])
         self.domain_right_edge = \
-            na.concatenate([self.domain_right_edge, [1.0]])
+            np.concatenate([self.domain_right_edge, [1.0]])
 
     def get_parameter(self,parameter,type=None):
         """
@@ -810,7 +810,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
@@ -825,17 +825,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         self.current_time = self.parameters["InitialTime"]
         # To be enabled when we can break old pickles:
@@ -925,7 +925,7 @@
         with fortran code.
         """
         k = {}
-        k["utim"] = 2.52e17/na.sqrt(self.omega_matter)\
+        k["utim"] = 2.52e17/np.sqrt(self.omega_matter)\
                        / self.hubble_constant \
                        / (1+self.parameters["CosmologyInitialRedshift"])**1.5
         k["urho"] = 1.88e-29 * self.omega_matter \
@@ -937,8 +937,8 @@
                (1.0 + self.current_redshift)
         k["uaye"] = 1.0/(1.0 + self.parameters["CosmologyInitialRedshift"])
         k["uvel"] = 1.225e7*self.parameters["CosmologyComovingBoxSize"] \
-                      *na.sqrt(self.omega_matter) \
-                      *na.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
+                      *np.sqrt(self.omega_matter) \
+                      *np.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
         k["utem"] = 1.88e6 * (self.parameters["CosmologyComovingBoxSize"]**2) \
                       * self.omega_matter \
                       * (1.0 + self.parameters["CosmologyInitialRedshift"])
@@ -978,7 +978,7 @@
         self.conversion_factors.update(enzo.conversion_factors)
         for i in self.parameters:
             if isinstance(self.parameters[i], types.TupleType):
-                self.parameters[i] = na.array(self.parameters[i])
+                self.parameters[i] = np.array(self.parameters[i])
             if i.endswith("Units") and not i.startswith("Temperature"):
                 dataType = i[:-5]
                 self.conversion_factors[dataType] = self.parameters[i]
@@ -986,7 +986,7 @@
         self.domain_right_edge = self.parameters["DomainRightEdge"].copy()
         for i in self.conversion_factors:
             if isinstance(self.conversion_factors[i], types.TupleType):
-                self.conversion_factors[i] = na.array(self.conversion_factors[i])
+                self.conversion_factors[i] = np.array(self.conversion_factors[i])
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
         for p, v in self._conversion_override.items():


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
@@ -193,7 +193,7 @@
     # We can assume that we at least have Density
     # We should actually be guaranteeing the presence of a .shape attribute,
     # but I am not currently implementing that
-    fieldData = na.zeros(data["Density"].shape,
+    fieldData = np.zeros(data["Density"].shape,
                          dtype = data["Density"].dtype)
     if data.pf["MultiSpecies"] == 0:
         if data.has_field_parameter("mu"):
@@ -249,7 +249,7 @@
 KnownEnzoFields["z-velocity"].projection_conversion='1'
 
 def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
+    return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
     f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
@@ -324,39 +324,39 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
     else:
-        filter = na.ones(data.NumberOfParticles, dtype='bool')
+        filter = np.ones(data.NumberOfParticles, dtype='bool')
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
@@ -367,28 +367,28 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           particle_field_data.astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           top, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           particle_field_data.astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           top, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           data["particle_mass"].astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           bottom, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           data["particle_mass"].astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           bottom, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -406,30 +406,30 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
     particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          particle_field_data.astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          top, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          particle_field_data.astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          top, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          data["particle_mass"][filter].astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          bottom, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          data["particle_mass"][filter].astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          bottom, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -466,7 +466,7 @@
           projection_conversion="1")
 
 def _StarAge(field, data):
-    star_age = na.zeros(data['StarCreationTimeYears'].shape)
+    star_age = np.zeros(data['StarCreationTimeYears'].shape)
     with_stars = data['StarCreationTimeYears'] > 0
     star_age[with_stars] = data.pf.time_units['years'] * \
         data.pf.current_time - \
@@ -485,7 +485,7 @@
 def _Bmag(field, data):
     """ magnitude of bvec
     """
-    return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
+    return np.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
 add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
 
@@ -495,7 +495,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         try:
             return io._read_data_set(data, p_field).astype(dtype)
         except io._read_exception:
@@ -555,13 +555,13 @@
 def _convertParticleMass(data):
     return data.convert("Density")*(data.convert("cm")**3.0)
 def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
     return cf
 def _convertParticleMassMsun(data):
     return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
 def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
     return cf
 add_field("ParticleMass",
@@ -584,7 +584,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']
     return data["dx"]*data["dy"]
@@ -606,7 +606,7 @@
         Enzo2DFieldInfo["CellArea%s" % a]
 
 def _zvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_2d_field("z-velocity", function=_zvel)
 
@@ -637,7 +637,7 @@
         Enzo1DFieldInfo["CellLength%s" % a]
 
 def _yvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -25,7 +25,7 @@
 
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import glob
 import os
 
@@ -236,8 +236,8 @@
             else:
                 my_final_time = self.final_time
 
-            my_times = na.array(map(lambda a:a['time'], my_all_outputs))
-            my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+            my_times = np.array(map(lambda a:a['time'], my_all_outputs))
+            my_indices = np.digitize([my_initial_time, my_final_time], my_times)
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
@@ -294,7 +294,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         self.refine_by = self.parameters["RefineBy"]
         self.dimensionality = self.parameters["TopGridRank"]
@@ -303,17 +303,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         if self.parameters["ComovingCoordinates"]:
             cosmo_attr = {'box_size': 'CosmologyComovingBoxSize',
@@ -374,7 +374,7 @@
                     current_time * self.enzo_cosmology.TimeUnits)
 
             self.all_time_outputs.append(output)
-            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            if np.abs(self.final_time - current_time) / self.final_time < 1e-4: break
             current_time += self.parameters['dtDataDump']
             index += 1
 
@@ -476,8 +476,8 @@
         self.parameters['RedshiftDumpDir'] = "RD"
         self.parameters['ComovingCoordinates'] = 0
         self.parameters['TopGridRank'] = 3
-        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
-        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
         self.parameters['Refineby'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
@@ -491,7 +491,7 @@
 
         self.time_units = {}
         if self.cosmological_simulation:
-            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+            self.parameters['TimeUnits'] = 2.52e17 / np.sqrt(self.omega_matter) \
                 / self.hubble_constant / (1 + self.initial_redshift)**1.5
         self.time_units['1'] = 1.
         self.time_units['seconds'] = self.parameters['TimeUnits']
@@ -586,8 +586,8 @@
             outputs = self.all_outputs
         my_outputs = []
         for value in values:
-            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
-            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+            outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
+            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
                     and outputs[0] not in my_outputs:
                 my_outputs.append(outputs[0])
             else:
@@ -649,7 +649,7 @@
 
         """
 
-        times = na.array(times) / self.time_units[time_units]
+        times = np.array(times) / self.time_units[time_units]
         return self._get_outputs_by_key('time', times, tolerance=tolerance,
                                         outputs=outputs)
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import stat
-import numpy as na
+import numpy as np
 import weakref
 
 from yt.funcs import *
@@ -70,7 +70,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -123,14 +123,14 @@
             self.grid_particle_count[:] = f["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
-        self._particle_indices = na.zeros(self.num_grids + 1, dtype='int64')
-        na.add.accumulate(self.grid_particle_count.squeeze(),
+        self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
+        np.add.accumulate(self.grid_particle_count.squeeze(),
                           out=self._particle_indices[1:])
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
         self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i+1, self, self.grid_levels[i,0])
         
@@ -139,20 +139,20 @@
         rdx = (self.parameter_file.domain_width /
                 self.parameter_file.domain_dimensions)
         nlevels = self.grid_levels.max()
-        dxs = na.ones((nlevels+1,3),dtype='float64')
+        dxs = np.ones((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
             dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = na.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = na.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx
+            self.grid_right_edge[i] = np.rint(self.grid_right_edge[i]/dx)*dx
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)
         
         offset = 7
-        ii = na.argsort(self.grid_levels.flat)
+        ii = np.argsort(self.grid_levels.flat)
         gid = self._handle["/gid"][:]
         first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
         for g in self.grids[ii].flat:
@@ -364,9 +364,9 @@
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
-        self.domain_left_edge = na.array(
+        self.domain_left_edge = np.array(
             [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
-        self.domain_right_edge = na.array(
+        self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         self.min_level = self.parameters.get("lrefine_min", 1) - 1
 
@@ -392,7 +392,7 @@
         nblockz = self.parameters["nblockz"]
         self.dimensionality = dimensionality
         self.domain_dimensions = \
-            na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
+            np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
         try:
             self.parameters["Gamma"] = self.parameters["gamma"]
         except:


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 
 from yt.utilities.io_handler import \
@@ -54,7 +54,7 @@
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:
-            if grid.NumberOfParticles == 0: return na.array([], dtype='float64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 from itertools import izip
 
 from yt.funcs import *
@@ -104,7 +104,7 @@
         
     def _parse_hierarchy(self):
         f = self._handle # shortcut
-        npa = na.array
+        npa = np.array
         DLE = self.parameter_file.domain_left_edge
         DRE = self.parameter_file.domain_right_edge
         DW = (DRE - DLE)
@@ -119,12 +119,12 @@
                                 + dxs *(1 + self.grid_dimensions)
         self.grid_particle_count.flat[:] = f['/grid_particle_count'][:].astype("int32")
         grid_parent_id = f['/grid_parent_id'][:]
-        self.max_level = na.max(self.grid_levels)
+        self.max_level = np.max(self.grid_levels)
         
         args = izip(xrange(self.num_grids), self.grid_levels.flat,
                     grid_parent_id, LI,
                     self.grid_dimensions, self.grid_particle_count.flat)
-        self.grids = na.empty(len(args), dtype='object')
+        self.grids = np.empty(len(args), dtype='object')
         for gi, (j,lvl,p, le, d, n) in enumerate(args):
             self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
         


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import \


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,9 +38,9 @@
             address = '/data/grid_%010i/particles/%s/%s' % (grid.id, ptype, field)
             data.append(fh[address][:])
         if len(data) > 0:
-            data = na.concatenate(data)
+            data = np.concatenate(data)
         fh.close()
-        return na.array(data)
+        return np.array(data)
     def _read_field_names(self,grid): 
         adr = grid.Address
         fh = h5py.File(grid.filename,mode='r')


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -28,7 +28,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -71,7 +71,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -108,11 +108,11 @@
     def _parse_hierarchy(self):
         f = self._fhandle
         dxs = []
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         levels = (f['grid_level'][:]).copy()
         glis = (f['grid_left_index'][:]).copy()
         gdims = (f['grid_dimensions'][:]).copy()
-        active_dims = ~((na.max(gdims, axis=0) == 1) &
+        active_dims = ~((np.max(gdims, axis=0) == 1) &
                         (self.parameter_file.domain_dimensions == 1))
 
         for i in range(levels.shape[0]):
@@ -125,7 +125,7 @@
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
-        dx = na.array(dxs)
+        dx = np.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
@@ -147,7 +147,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
     defaultdict
@@ -110,7 +110,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -170,9 +170,9 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self.__global_header_lines[counter].split()
         counter += 1
@@ -181,9 +181,9 @@
         counter += 1 # unused line in Maestro BoxLib
         
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
 
         counter += self.n_levels # unused line in Maestro BoxLib
         
@@ -259,8 +259,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -304,17 +304,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
         self.grid_levels = self.grid_levels.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -325,9 +325,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -354,10 +354,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -367,7 +367,7 @@
                 fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -381,11 +381,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -494,9 +494,9 @@
                 t = parameterTypes[paramName](val)
                 exec("self.%s = %s" % (paramName,t))
 
-        self.domain_dimensions = na.array([_n_cellx,_n_celly,_n_cellz])
-        self.domain_left_edge = na.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
-        self.domain_right_edge = na.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
+        self.domain_dimensions = np.array([_n_cellx,_n_celly,_n_cellz])
+        self.domain_left_edge = np.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
+        self.domain_right_edge = np.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
         
         self.cosmological_simulation = self.current_redshift = \
             self.omega_matter = self.omega_lambda = self.hubble_constant = 0


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/maestro/io.py
--- a/yt/frontends/maestro/io.py
+++ b/yt/frontends/maestro/io.py
@@ -28,7 +28,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -72,8 +72,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -113,7 +113,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -35,7 +35,7 @@
 from string import strip, rstrip
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import AMRGridPatch
@@ -108,7 +108,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
@@ -172,20 +172,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #na.array(map(int, self._global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         counter += 1
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self._global_header_lines[counter:counter + self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -269,8 +269,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                              level, gfn, gfo, dims, start, stop,
@@ -290,7 +290,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
         self.field_list += nyx_particle_field_names[:]
         header = open(os.path.join(self.parameter_file.path, "DM", "Header"))
@@ -304,7 +304,7 @@
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel + 1):dummy = header.readline()
 
-        grid_info = na.fromiter((int(i) for line in header.readlines()
+        grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
                                 count=3*self.num_grids).reshape((self.num_grids, 3))
@@ -341,15 +341,15 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.path
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
             g.particle_filename = os.path.join(basedir, "DM",
@@ -361,9 +361,9 @@
         self.grid_particle_count[:, 0] = self.pgrid_info[:, 1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids, 1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]]
                                    for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:, 0].reshape((self.num_grids, 1))
@@ -378,9 +378,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]  # why the same thing twice?
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -398,7 +398,7 @@
             grid._setup_dx()
 
     def __setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -415,10 +415,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -444,11 +444,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids, 3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids, 3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids, 3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids, 1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids, 1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids, 3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids, 3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids, 3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids, 1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids, 1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -464,7 +464,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -607,9 +607,9 @@
                         self.parameters[param_name] = vals
 
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals])
+                self.domain_right_edge = np.array([float(i) for i in vals])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals])
+                self.domain_left_edge = np.array([float(i) for i in vals])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals[0])
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -27,7 +27,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.lib import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
                               nyx_particle_field_names.index(field),
                               len(nyx_particle_field_names), tr)
@@ -68,7 +68,7 @@
         offset2 = int(nElements*bytesPerReal*field_index)
 
         dtype = grid.hierarchy._dtype
-        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        field = np.empty(nElements, dtype=grid.hierarchy._dtype)
         read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
         field = field.reshape(grid.ActiveDimensions, order='F')
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -107,7 +107,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -139,7 +139,7 @@
         simply add it to the if/elif/else block.
 
         """
-        self.grid_particle_count = na.zeros(len(self.grids))
+        self.grid_particle_count = np.zeros(len(self.grids))
 
         for particle_filename in ["StarParticles", "SinkParticles"]:
             fn = os.path.join(self.pf.fullplotdir, particle_filename)
@@ -160,18 +160,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
@@ -211,20 +211,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int,self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #np.array(map(int,self.__global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
         #domain_re.search(self.__global_header_lines[counter]).groups()
         counter += 1
         self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
         counter += self.n_levels
         self.geometry = int(self.__global_header_lines[counter])
         if self.geometry != 0:
@@ -302,8 +302,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -347,17 +347,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = na.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grid_levels = np.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -368,9 +368,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -399,10 +399,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _count_grids(self):
@@ -413,11 +413,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -551,14 +551,14 @@
                 
             elif param.startswith("geometry.prob_hi"):
                 self.domain_right_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
                 self.domain_left_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
-        self.domain_dimensions = na.array(self.parameters["TopGridDimensions"],dtype='int32')
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"],dtype='int32')
         self.refine_by = self.parameters["RefineBy"]
 
         if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.utilities.physical_constants import \
     mh, kboltz
@@ -146,7 +146,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -76,7 +76,7 @@
                     if ( (grid.LeftEdge < coord).all() and 
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)
 
     def _read_data_set(self,grid,field):
         """
@@ -109,8 +109,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -150,7 +150,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 
@@ -79,7 +79,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -93,10 +93,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -116,7 +116,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.tree_proxy = pf.ramses_tree
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -153,12 +153,12 @@
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         level_info = self.tree_proxy.count_zones()
         num_ogrids = sum(level_info)
-        ogrid_left_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_right_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_levels = na.zeros((num_ogrids,1), dtype='int32')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        ogrid_hilbert_indices = na.zeros(num_ogrids, dtype='uint64')
-        ochild_masks = na.zeros((num_ogrids, 8), dtype='int32')
+        ogrid_left_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_right_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_levels = np.zeros((num_ogrids,1), dtype='int32')
+        ogrid_file_locations = np.zeros((num_ogrids,6), dtype='int64')
+        ogrid_hilbert_indices = np.zeros(num_ogrids, dtype='uint64')
+        ochild_masks = np.zeros((num_ogrids, 8), dtype='int32')
         self.tree_proxy.fill_hierarchy_arrays(
             self.pf.domain_dimensions,
             ogrid_left_edge, ogrid_right_edge,
@@ -180,7 +180,7 @@
             if level_info[level] == 0: continue
             # Get the indices of grids on this level
             ggi = (ogrid_levels == level).ravel()
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2 
+            dims = np.ones((ggi.sum(), 3), dtype='int64') * 2 
             mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
             nd = self.pf.domain_dimensions * 2**level
             fl = ogrid_file_locations[ggi,:]
@@ -189,7 +189,7 @@
             # We want grids that cover no more than MAX_EDGE cells in every direction
             psgs = []
             # left_index is integers of the index, with respect to this level
-            left_index = na.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
+            left_index = np.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
             # we've got octs, so it's +2
             pbar = get_pbar("Re-gridding ", left_index.shape[0])
             dlp = [None, None, None]
@@ -203,18 +203,18 @@
             #print level, hilbert_indices.min(), hilbert_indices.max()
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.debug("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
             for ddleft_index, ddfl in zip(lefts, locs):
-                for idomain in na.unique(ddfl[:,0]):
+                for idomain in np.unique(ddfl[:,0]):
                     dom_ind = ddfl[:,0] == idomain
                     dleft_index = ddleft_index[dom_ind,:]
                     dfl = ddfl[dom_ind,:]
-                    initial_left = na.min(dleft_index, axis=0)
-                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                     psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
                                     dleft_index, dfl)
                     if psg.efficiency <= 0: continue
@@ -226,12 +226,12 @@
             pbar.finish()
             self.proto_grids.append(psgs)
             print sum(len(psg.grid_file_locations) for psg in psgs)
-            sums = na.zeros(3, dtype='int64')
+            sums = np.zeros(3, dtype='int64')
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
             #for g in self.proto_grids[level]:
             #    sums += [s.sum() for s in g.sigs]
-            #assert(na.all(sums == dims.prod(axis=1).sum()))
+            #assert(np.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
     def _parse_hierarchy(self):
@@ -251,11 +251,11 @@
                 grids.append(self.grid(gi, self, level, fl, props[0,:]))
                 gi += 1
         self.proto_grids = []
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
 
     def _populate_grid_objects(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         print self.grid_levels.dtype
         for gi,g in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[gi,:],
@@ -346,10 +346,10 @@
         rheader = self.ramses_tree.get_file_info()
         self.parameters.update(rheader)
         self.current_time = self.parameters['time'] * self.parameters['unit_t']
-        self.domain_right_edge = na.ones(3, dtype='float64') \
+        self.domain_right_edge = np.ones(3, dtype='float64') \
                                            * rheader['boxlen']
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_dimensions = na.ones(3, dtype='int32') * 2
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_dimensions = np.ones(3, dtype='int32') * 2
         # This is likely not true, but I am not sure how to otherwise
         # distinguish them.
         mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,7 +24,7 @@
 """
 
 from collections import defaultdict
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,8 +38,8 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
 
     def _read_data_set(self, grid, field):
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float64')
+        filled = np.zeros(grid.ActiveDimensions, dtype='int32')
         to_fill = grid.ActiveDimensions.prod()
         grids = [grid]
         l_delta = 0


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -24,7 +24,7 @@
 """
 
 import weakref
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -73,7 +73,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -181,7 +181,7 @@
             self._reconstruct_parent_child()
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids = np.empty(self.num_grids, dtype='object')
         for i, grid in enumerate(self.grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -192,7 +192,7 @@
         mylog.debug("Prepared")
 
     def _reconstruct_parent_child(self):
-        mask = na.empty(len(self.grids), dtype='int32')
+        mask = np.empty(len(self.grids), dtype='int32')
         mylog.debug("First pass; identifying child grids")
         for i, grid in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[i,:],
@@ -200,7 +200,7 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = na.where(mask.astype("bool"))
+            ids = np.where(mask.astype("bool"))
             grid._children_ids = ids[0] # where is a tuple
         mylog.debug("Second pass; identifying parents")
         for i, grid in enumerate(self.grids): # Second pass
@@ -209,7 +209,7 @@
 
     def _initialize_grid_arrays(self):
         AMRHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def save_data(self, *args, **kwargs):
         pass
@@ -225,7 +225,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -330,19 +330,19 @@
     Examples
     --------
 
-    >>> arr = na.random.random((128, 128, 129))
+    >>> arr = np.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> bbox = na.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
     >>> pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
 
     """
 
-    domain_dimensions = na.array(domain_dimensions)
+    domain_dimensions = np.array(domain_dimensions)
     if bbox is None:
-        bbox = na.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
-    domain_left_edge = na.array(bbox[:, 0], 'float64')
-    domain_right_edge = na.array(bbox[:, 1], 'float64')
-    grid_levels = na.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
     sfh = StreamDictFieldHandler()
 
@@ -350,10 +350,10 @@
         temp = {}
         new_data = {}
         for key in data.keys():
-            psize = get_psize(na.array(data[key].shape), nprocs)
+            psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
                 decompose_array(data[key], psize, bbox)
-            grid_dimensions = na.array([grid.shape for grid in temp[key]])
+            grid_dimensions = np.array([grid.shape for grid in temp[key]])
         for gid in range(nprocs):
             new_data[gid] = {}
             for key in temp.keys():
@@ -371,9 +371,9 @@
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        -na.ones(nprocs, dtype='int64'),
-        number_of_particles*na.ones(nprocs, dtype='int64').reshape(nprocs,1),
-        na.zeros(nprocs).reshape((nprocs,1)),
+        -np.ones(nprocs, dtype='int64'),
+        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
     )
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -44,15 +44,15 @@
         self.RightEdge = right_edge
         self.Level = 0
         self.NumberOfParticles = 0
-        self.left_dims = na.array(left_dims, dtype='int32')
-        self.right_dims = na.array(right_dims, dtype='int32')
+        self.left_dims = np.array(left_dims, dtype='int32')
+        self.right_dims = np.array(right_dims, dtype='int32')
         self.ActiveDimensions = self.right_dims - self.left_dims
         self.Parent = None
         self.Children = []
 
     @property
     def child_mask(self):
-        return na.ones(self.ActiveDimensions, dtype='int32')
+        return np.ones(self.ActiveDimensions, dtype='int32')
 
     def __repr__(self):
         return "TigerGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -70,7 +70,7 @@
         # Tiger is unigrid
         self.ngdims = [i/j for i,j in
                 izip(self.pf.root_size, self.pf.max_grid_size)]
-        self.num_grids = na.prod(self.ngdims)
+        self.num_grids = np.prod(self.ngdims)
         self.max_level = 0
 
     def _setup_classes(self):
@@ -87,18 +87,18 @@
         DW = DRE - DLE
         gds = DW / self.ngdims
         rd = [self.pf.root_size[i]-self.pf.max_grid_size[i] for i in range(3)]
-        glx, gly, glz = na.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
+        glx, gly, glz = np.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
                                  DLE[1]:DRE[1]-gds[1]:self.ngdims[1]*1j,
                                  DLE[2]:DRE[2]-gds[2]:self.ngdims[2]*1j]
-        gdx, gdy, gdz = na.mgrid[0:rd[0]:self.ngdims[0]*1j,
+        gdx, gdy, gdz = np.mgrid[0:rd[0]:self.ngdims[0]*1j,
                                  0:rd[1]:self.ngdims[1]*1j,
                                  0:rd[2]:self.ngdims[2]*1j]
         LE, RE, levels, counts = [], [], [], []
         i = 0
         for glei, gldi in izip(izip(glx.flat, gly.flat, glz.flat),
                                izip(gdx.flat, gdy.flat, gdz.flat)):
-            gld = na.array(gldi)
-            gle = na.array(glei)
+            gld = np.array(gldi)
+            gle = np.array(glei)
             gre = gle + gds
             g = self.grid(i, self, gle, gre, gld, gld+self.pf.max_grid_size)
             grids.append(g)
@@ -108,13 +108,13 @@
             levels.append(g.Level)
             counts.append(g.NumberOfParticles)
             i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-        self.grid_dimensions[:] = na.array(dims, dtype='int64')
-        self.grid_left_edge[:] = na.array(LE, dtype='float64')
-        self.grid_right_edge[:] = na.array(RE, dtype='float64')
-        self.grid_levels.flat[:] = na.array(levels, dtype='int32')
-        self.grid_particle_count.flat[:] = na.array(counts, dtype='int32')
+        self.grid_dimensions[:] = np.array(dims, dtype='int64')
+        self.grid_left_edge[:] = np.array(LE, dtype='float64')
+        self.grid_right_edge[:] = np.array(RE, dtype='float64')
+        self.grid_levels.flat[:] = np.array(levels, dtype='int32')
+        self.grid_particle_count.flat[:] = np.array(counts, dtype='int32')
 
     def _populate_grid_objects(self):
         # We don't need to do anything here
@@ -186,8 +186,8 @@
         self.parameters['RefineBy'] = 2
 
     def _set_units(self):
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_right_edge = na.ones(3, dtype='float64')
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_right_edge = np.ones(3, dtype='float64')
         self.units = {}
         self.time_units = {}
         self.time_units['1'] = 1


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/frontends/tiger/io.py
--- a/yt/frontends/tiger/io.py
+++ b/yt/frontends/tiger/io.py
@@ -36,17 +36,17 @@
 
     def _read_data_set(self, grid, field):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64')
-        SS = na.array(grid.ActiveDimensions, dtype='int64')
-        RS = na.array(grid.pf.root_size, dtype='int64')
+        LD = np.array(grid.left_dims, dtype='int64')
+        SS = np.array(grid.ActiveDimensions, dtype='int64')
+        RS = np.array(grid.pf.root_size, dtype='int64')
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
         return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64').copy()
-        SS = na.array(grid.ActiveDimensions, dtype='int64').copy()
-        RS = na.array(grid.pf.root_size, dtype='int64').copy()
+        LD = np.array(grid.left_dims, dtype='int64').copy()
+        SS = np.array(grid.ActiveDimensions, dtype='int64').copy()
+        RS = np.array(grid.pf.root_size, dtype='int64').copy()
         LD[axis] += coord
         SS[axis] = 1
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/gui/opengl_widgets/mip_viewer.py
--- a/yt/gui/opengl_widgets/mip_viewer.py
+++ b/yt/gui/opengl_widgets/mip_viewer.py
@@ -31,7 +31,7 @@
 import OpenGL.GL.ARB.framebuffer_object as GL_fbo
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 from small_apps import ViewHandler3D, GenericGLUTScene
@@ -85,8 +85,8 @@
                     yield s[v][i]
 
     def _get_texture_vertices(self):
-        vs = [na.zeros(3, dtype='float32'),
-              na.ones(3, dtype='float32')]
+        vs = [np.zeros(3, dtype='float32'),
+              np.ones(3, dtype='float32')]
         #vs.reverse()
         for b in self.hv.bricks:
             shape = b.my_data[0].shape
@@ -126,7 +126,7 @@
 
         DW = self.hv.pf.domain_right_edge - self.hv.pf.domain_left_edge
         dds = ((brick.RightEdge - brick.LeftEdge) /
-               (na.array([ix,iy,iz], dtype='float32')-1)) / DW
+               (np.array([ix,iy,iz], dtype='float32')-1)) / DW
         BLE = brick.LeftEdge / DW - 0.5
         self._brick_textures.append(
             (id_field, (ix-1,iy-1,iz-1), dds, BLE))
@@ -135,7 +135,7 @@
 
     def _setup_colormap(self):
 
-        buffer = na.mgrid[0.0:1.0:256j]
+        buffer = np.mgrid[0.0:1.0:256j]
         colors = map_to_colors(buffer, "algae")
         
         GL.glActiveTexture(GL.GL_TEXTURE1)
@@ -165,17 +165,17 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(hv.bricks) * 6 * 4
-        self.v = na.fromiter(self._get_brick_vertices(offset),
+        self.v = np.fromiter(self._get_brick_vertices(offset),
                              dtype = 'float32', count = num * 3)
         self.vertices = vbo.VBO(self.v)
 
-        self.t = na.fromiter(self._get_texture_vertices(),
+        self.t = np.fromiter(self._get_texture_vertices(),
                              dtype = 'float32', count = num * 3)
         self.tvertices = vbo.VBO(self.t)
 
         self.ng = len(hv.bricks)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_bricks()
@@ -373,8 +373,8 @@
 
     def reset_view(self):   
         print "RESETTING"
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
     def translate(self, axis, value):


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/gui/opengl_widgets/small_apps.py
--- a/yt/gui/opengl_widgets/small_apps.py
+++ b/yt/gui/opengl_widgets/small_apps.py
@@ -30,7 +30,7 @@
 from OpenGL.arrays import vbo, ArrayDatatype
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 ESCAPE = '\033'
@@ -235,7 +235,7 @@
 
     @classmethod
     def from_image_file(cls, fn, tex_unit = GL.GL_TEXTURE0):
-        buffer = na.array(Image.open(fn))
+        buffer = np.array(Image.open(fn))
         print "Uploading buffer", buffer.min(), buffer.max(), buffer.shape, buffer.dtype
         obj = cls(tex_unit)
         obj.upload_image(buffer)
@@ -260,8 +260,8 @@
     @classmethod
     def from_image_files(cls, left_fn, right_fn, tex_unit = GL.GL_TEXTURE0):
         print "Uploading pairs from %s and %s" % (left_fn, right_fn)
-        left_buffer = na.array(Image.open(left_fn))
-        right_buffer = na.array(Image.open(right_fn))
+        left_buffer = np.array(Image.open(left_fn))
+        right_buffer = np.array(Image.open(right_fn))
         obj = cls(tex_unit)
         obj.left_image.upload_image(left_buffer)
         obj.right_image.upload_image(right_buffer)
@@ -294,7 +294,7 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
@@ -408,7 +408,7 @@
 
         GL.glActiveTexture(GL.GL_TEXTURE0)
         id_field = GL.glGenTextures(1)
-        upload = na.log10(grid["Density"].astype("float32")).copy()
+        upload = np.log10(grid["Density"].astype("float32")).copy()
         self.mi = min(upload.min(), self.mi)
         self.ma = max(upload.max(), self.ma)
         #upload = (255*(upload - -31.0) / (-25.0 - -31.0)).astype("uint8")
@@ -452,13 +452,13 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
         self.ng = len(pf.h.grids)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float')
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float')
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_grids()


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -29,7 +29,7 @@
 import logging, threading
 import sys
 import urllib, urllib2
-import numpy as na
+import numpy as np
 
 from yt.utilities.bottle import \
     server_names, debug, route, run, request, ServerAdapter, response
@@ -134,7 +134,7 @@
         bp['binary'] = []
         for bkey in bkeys:
             bdata = bp.pop(bkey) # Get the binary data
-            if isinstance(bdata, na.ndarray):
+            if isinstance(bdata, np.ndarray):
                 bdata = bdata.tostring()
             bpserver = BinaryDelivery(bdata, bkey)
             self.binary_payloads.append(bpserver)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -30,7 +30,7 @@
 import cStringIO
 import logging
 import uuid
-import numpy as na
+import numpy as np
 import time
 import urllib
 import urllib2


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import os
-import numpy as na
+import numpy as np
 import zipfile
 import sys
 
@@ -92,9 +92,9 @@
                                     dd*DW[0] / (64*256),
                                     dd*DW[0])
         if self.pf.field_info[self.field].take_log:
-            cmi = na.log10(cmi)
-            cma = na.log10(cma)
-            to_plot = apply_colormap(na.log10(frb[self.field]), color_bounds = (cmi, cma))
+            cmi = np.log10(cmi)
+            cma = np.log10(cma)
+            to_plot = apply_colormap(np.log10(frb[self.field]), color_bounds = (cmi, cma))
         else:
             to_plot = apply_colormap(frb[self.field], color_bounds = (cmi, cma))
         rv = write_png_to_string(to_plot)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/gui/reason/pyro_queue.py
--- a/yt/gui/reason/pyro_queue.py
+++ b/yt/gui/reason/pyro_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/gui/reason/widget_builders.py
--- a/yt/gui/reason/widget_builders.py
+++ b/yt/gui/reason/widget_builders.py
@@ -35,7 +35,7 @@
         self._tf = tf
 
         self.center = self.pf.domain_center
-        self.normal_vector = na.array([0.7,1.0,0.3])
+        self.normal_vector = np.array([0.7,1.0,0.3])
         self.north_vector = [0.,0.,1.]
         self.steady_north = True
         self.fields = ['Density']
@@ -54,7 +54,7 @@
             roi = self.pf.h.region(self.center, self.center-self.width, self.center+self.width)
             self.mi, self.ma = roi.quantities['Extrema'](self.fields[0])[0]
             if self.log_fields[0]:
-                self.mi, self.ma = na.log10(self.mi), na.log10(self.ma)
+                self.mi, self.ma = np.log10(self.mi), np.log10(self.ma)
 
         self._tf = ColorTransferFunction((self.mi-2, self.ma+2), nbins=nbins)
 
@@ -87,10 +87,10 @@
     dd = pf.h.all_data()
     if value is None or rel_val:
         if value is None: value = 0.5
-        mi, ma = na.log10(dd.quantities["Extrema"]("Density")[0])
+        mi, ma = np.log10(dd.quantities["Extrema"]("Density")[0])
         value = 10.0**(value*(ma - mi) + mi)
     vert = dd.extract_isocontours("Density", value)
-    na.multiply(vert, 100, vert)
+    np.multiply(vert, 100, vert)
     return vert
 
 def get_streamlines(pf):


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -70,7 +70,7 @@
         if onmax: 
             center = pf.h.find_max('Density')[1]
         else:
-            center = na.array(center)
+            center = np.array(center)
         axis = inv_axis_names[axis.lower()]
         coord = center[axis]
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
@@ -203,7 +203,7 @@
     def _pf_info(self):
         tr = {}
         for k, v in self.pf._mrep._attrs.items():
-            if isinstance(v, na.ndarray):
+            if isinstance(v, np.ndarray):
                 tr[k] = v.tolist()
             else:
                 tr[k] = v
@@ -237,9 +237,9 @@
     def deliver_isocontour(self, field, value, rel_val = False):
         ph = PayloadHandler()
         vert = get_isocontour(self.pf, field, value, rel_val)
-        normals = na.empty(vert.shape)
+        normals = np.empty(vert.shape)
         for i in xrange(vert.shape[0]/3):
-            n = na.cross(vert[i*3,:], vert[i*3+1,:])
+            n = np.cross(vert[i*3,:], vert[i*3+1,:])
             normals[i*3:i*3+3,:] = n[None,:]
         ph.widget_payload(self, {'ptype':'isocontour',
                                  'binary': ['vert', 'normals'],
@@ -260,20 +260,20 @@
         # Assume that path comes in as a list of matrice
         # Assume original vector is (0., 0., 1.), up is (0., 1., 0.)
         
-        views = [na.array(view).transpose() for view in views]
+        views = [np.array(view).transpose() for view in views]
 
-        times = na.linspace(0.0,1.0,len(times))
+        times = np.linspace(0.0,1.0,len(times))
                 
         # This is wrong.
-        reflect = na.array([[1,0,0],[0,1,0],[0,0,-1]])
+        reflect = np.array([[1,0,0],[0,1,0],[0,0,-1]])
 
-        rots = na.array([R[0:3,0:3] for R in views])
+        rots = np.array([R[0:3,0:3] for R in views])
 
-        rots = na.array([na.dot(reflect,rot) for rot in rots])
+        rots = np.array([np.dot(reflect,rot) for rot in rots])
 
-        centers = na.array([na.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
+        centers = np.array([np.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
 
-        ups = na.array([na.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
+        ups = np.array([np.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
 
         #print 'views'
         #for view in views: print view
@@ -284,12 +284,12 @@
         #print 'ups'
         #for up in ups: print up
 
-        pos = na.empty((N,3), dtype="float64")
-        uv = na.empty((N,3), dtype="float64")
-        f = na.zeros((N,3), dtype="float64")
+        pos = np.empty((N,3), dtype="float64")
+        uv = np.empty((N,3), dtype="float64")
+        f = np.zeros((N,3), dtype="float64")
         for i in range(3):
-            pos[:,i] = create_spline(times, centers[:,i], na.linspace(0.0,1.0,N))
-            uv[:,i] = create_spline(times, ups[:,i], na.linspace(0.0,1.0,N))
+            pos[:,i] = create_spline(times, centers[:,i], np.linspace(0.0,1.0,N))
+            uv[:,i] = create_spline(times, ups[:,i], np.linspace(0.0,1.0,N))
     
         path = [pos.tolist(), f.tolist(), uv.tolist()]
     


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -33,6 +33,7 @@
 # First module imports
 import sys, types, os, glob, cPickle, time
 import numpy as na # For historical reasons
+import numpy as np # For modern purposes
 import numpy # In case anyone wishes to use it by name
 
 # This next item will handle most of the actual startup procedures, but it will
@@ -52,7 +53,7 @@
 if __level >= int(ytcfgDefaults["loglevel"]):
     # This won't get displayed.
     mylog.debug("Turning off NumPy error reporting")
-    na.seterr(all = 'ignore')
+    np.seterr(all = 'ignore')
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -25,7 +25,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.visualization.image_writer import write_image, write_bitmap
@@ -61,7 +61,7 @@
 def _rchild_id(id): return (id<<1) + 2
 def _parent_id(id): return (id-1)>>1
 
-steps = na.array([[-1, -1, -1],
+steps = np.array([[-1, -1, -1],
                   [-1, -1,  0],
                   [-1, -1,  1],
                   [-1,  0, -1],
@@ -319,31 +319,31 @@
         if l_max is None:
             self.l_max = self.pf.hierarchy.max_level+1
         else:
-            self.l_max = na.min([l_max,self.pf.hierarchy.max_level+1])
+            self.l_max = np.min([l_max,self.pf.hierarchy.max_level+1])
 
         if le is None:
             self.domain_left_edge = pf.domain_left_edge
         else:
-            self.domain_left_edge = na.array(le)
+            self.domain_left_edge = np.array(le)
 
         if re is None:
             self.domain_right_edge = pf.domain_right_edge
         else:
-            self.domain_right_edge = na.array(re)
+            self.domain_right_edge = np.array(re)
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
 
         levels = pf.hierarchy.get_levels()
         root_grids = levels.next()
         covering_grids = root_grids
-        vol_needed = na.prod(self.domain_right_edge-self.domain_left_edge)
+        vol_needed = np.prod(self.domain_right_edge-self.domain_left_edge)
 
         for i in range(self.pf.hierarchy.max_level):
-            root_l_data = na.clip(na.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
-            root_r_data = na.clip(na.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_l_data = np.clip(np.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_r_data = np.clip(np.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
             
-            vol = na.prod(root_r_data-root_l_data,axis=1).sum()
+            vol = np.prod(root_r_data-root_l_data,axis=1).sum()
             if vol >= vol_needed:
                 covering_grids = root_grids
                 root_grids = levels.next()
@@ -356,18 +356,18 @@
         self.domain_left_edge = ((self.domain_left_edge)/rgdds).astype('int64')*rgdds
         self.domain_right_edge = (((self.domain_right_edge)/rgdds).astype('int64')+1)*rgdds
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
         
         self.my_l_corner = self.domain_left_edge
         self.my_r_corner = self.domain_right_edge
 
         #mylog.info('Making kd tree from le %s to %s'% (self.domain_left_edge, self.domain_right_edge))
         
-        root_l_data = na.array([grid.LeftEdge for grid in root_grids])
-        root_r_data = na.array([grid.RightEdge for grid in root_grids])
-        root_we_want = na.all(root_l_data < self.my_r_corner,axis=1)*\
-                       na.all(root_r_data > self.my_l_corner,axis=1)
+        root_l_data = np.array([grid.LeftEdge for grid in root_grids])
+        root_r_data = np.array([grid.RightEdge for grid in root_grids])
+        root_we_want = np.all(root_l_data < self.my_r_corner,axis=1)*\
+                       np.all(root_r_data > self.my_l_corner,axis=1)
         
         root_grids = root_grids[root_we_want]
 
@@ -550,7 +550,7 @@
         center cell (i,j,k) is ommitted.
         
         """
-        position = na.array(position)
+        position = np.array(position)
         grid = self.locate_brick(position).grid
         ci = ((position-grid.LeftEdge)/grid.dds).astype('int64')
         return self.locate_neighbors(grid,ci)
@@ -583,20 +583,20 @@
         center cell (i,j,k) is ommitted.
         
         """
-        ci = na.array(ci)
+        ci = np.array(ci)
         center_dds = grid.dds
-        position = grid.LeftEdge + (na.array(ci)+0.5)*grid.dds
-        grids = na.empty(26, dtype='object')
-        cis = na.empty([26,3], dtype='int64')
+        position = grid.LeftEdge + (np.array(ci)+0.5)*grid.dds
+        grids = np.empty(26, dtype='object')
+        cis = np.empty([26,3], dtype='int64')
         offs = 0.5*(center_dds + self.sdx)
 
         new_cis = ci + steps
-        in_grid = na.all((new_cis >=0)*
+        in_grid = np.all((new_cis >=0)*
                          (new_cis < grid.ActiveDimensions),axis=1)
         new_positions = position + steps*offs
         grids[in_grid] = grid
                 
-        get_them = na.argwhere(in_grid != True).ravel()
+        get_them = np.argwhere(in_grid != True).ravel()
         cis[in_grid] = new_cis[in_grid]
 
         if (in_grid != True).sum()>0:
@@ -668,7 +668,7 @@
                     dds = []
                     for i,field in enumerate(self.fields):
                         vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                        if self.log_fields[i]: vcd = na.log10(vcd)
+                        if self.log_fields[i]: vcd = np.log10(vcd)
                         dds.append(vcd)
                     current_saved_grids.append(current_node.grid)
                     current_vcds.append(dds)
@@ -677,7 +677,7 @@
                           current_node.li[1]:current_node.ri[1]+1,
                           current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
                 
-                if na.any(current_node.r_corner-current_node.l_corner == 0):
+                if np.any(current_node.r_corner-current_node.l_corner == 0):
                     current_node.brick = None
                 else:
                     current_node.brick = PartitionedGrid(current_node.grid.id, data,
@@ -686,8 +686,8 @@
                                                          current_node.dims.astype('int64'))
                 self.bricks.append(current_node.brick)
                 self.brick_dimensions.append(current_node.dims)
-        self.bricks = na.array(self.bricks)
-        self.brick_dimensions = na.array(self.brick_dimensions)
+        self.bricks = np.array(self.bricks)
+        self.brick_dimensions = np.array(self.brick_dimensions)
         del current_saved_grids, current_vcds
         self.bricks_loaded = True
 
@@ -701,7 +701,7 @@
             dds = []
             for i,field in enumerate(self.fields):
                 vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = na.log10(vcd)
+                if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(current_node.grid)
                 self.current_vcds.append(dds)
@@ -734,14 +734,14 @@
         dds = thisnode.grid.dds
         gle = thisnode.grid.LeftEdge
         gre = thisnode.grid.RightEdge
-        thisnode.li = na.rint((thisnode.l_corner-gle)/dds).astype('int32')
-        thisnode.ri = na.rint((thisnode.r_corner-gle)/dds).astype('int32')
+        thisnode.li = np.rint((thisnode.l_corner-gle)/dds).astype('int32')
+        thisnode.ri = np.rint((thisnode.r_corner-gle)/dds).astype('int32')
         thisnode.dims = (thisnode.ri - thisnode.li).astype('int32')
         # Here the cost is actually inversely proportional to 4**Level (empirical)
-        #thisnode.cost = (na.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
+        #thisnode.cost = (np.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
         thisnode.cost = 1.0
         # Here is the old way
-        # thisnode.cost = na.prod(thisnode.dims).astype('int64')
+        # thisnode.cost = np.prod(thisnode.dims).astype('int64')
 
     def initialize_leafs(self):
         for node in self.depth_traverse():
@@ -754,7 +754,7 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(self.comm.size))
+        par_tree_depth = long(np.log2(self.comm.size))
         for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
                 # There are self.comm.size nodes that meet this criteria
@@ -767,7 +767,7 @@
                 del node.grids
             except:
                 pass
-            if not na.isreal(node.grid):
+            if not np.isreal(node.grid):
                 node.grid = node.grid.id
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
@@ -942,7 +942,7 @@
         v = 0.0
         for node in self.depth_traverse():
             if node.grid is not None:
-                v += na.prod(node.r_corner - node.l_corner)
+                v += np.prod(node.r_corner - node.l_corner)
         return v
 
     def count_cells(self):
@@ -957,10 +957,10 @@
         Total volume of the tree.
         
         """
-        c = na.int64(0)
+        c = np.int64(0)
         for node in self.depth_traverse():
             if node.grid is not None:
-                c += na.prod(node.ri - node.li).astype('int64')
+                c += np.prod(node.ri - node.li).astype('int64')
         return c
 
     def _build(self, grids, parent, l_corner, r_corner):
@@ -994,12 +994,12 @@
         current_node.r_corner = r_corner
         # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(self.comm.size))
+        par_tree_depth = int(np.log2(self.comm.size))
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
         pbar = get_pbar("Building kd-Tree",
-                na.prod(self.domain_right_edge-self.domain_left_edge))
+                np.prod(self.domain_right_edge-self.domain_left_edge))
 
         while current_node is not None:
             pbar.update(volume_partitioned)
@@ -1034,12 +1034,12 @@
                     if len(thisgrid.Children) > 0 and thisgrid.Level < self.l_max:
                         # Get the children that are actually in the current volume
                         children = [child.id - self._id_offset for child in thisgrid.Children  
-                                    if na.all(child.LeftEdge < current_node.r_corner) & 
-                                    na.all(child.RightEdge > current_node.l_corner)]
+                                    if np.all(child.LeftEdge < current_node.r_corner) & 
+                                    np.all(child.RightEdge > current_node.l_corner)]
 
                         # If we have children, get all the new grids, and keep building the tree
                         if len(children) > 0:
-                            current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
+                            current_node.grids = self.pf.hierarchy.grids[np.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
                             #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
@@ -1048,7 +1048,7 @@
                     # Else make a leaf node (brick container)
                     #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    volume_partitioned += na.prod(current_node.r_corner-current_node.l_corner)
+                    volume_partitioned += np.prod(current_node.r_corner-current_node.l_corner)
                     # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
@@ -1078,7 +1078,7 @@
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1089,7 +1089,7 @@
         left and right children.
         '''
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
 
@@ -1106,8 +1106,8 @@
         current_node.split_pos = split
         #less_ids0 = (data[:,0] < split)
         #greater_ids0 = (split < data[:,1])
-        #assert(na.all(less_ids0 == less_ids))
-        #assert(na.all(greater_ids0 == greater_ids))
+        #assert(np.all(less_ids0 == less_ids))
+        #assert(np.all(greater_ids0 == greater_ids))
 
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
@@ -1143,7 +1143,7 @@
             Position of the back center from which to start moving forward.
         front_center: array_like
             Position of the front center to which the traversal progresses.
-        image: na.array
+        image: np.array
             Image plane to contain resulting ray cast.
 
         Returns
@@ -1176,12 +1176,12 @@
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(self.comm.size))
+        rounds = int(np.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+self.comm.rank)
+        path = np.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1215,7 +1215,7 @@
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = 1.0 - na.sum(self.image,axis=2)
+                    ta = 1.0 - np.sum(self.image,axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1237,8 +1237,8 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    #ta = na.exp(-na.sum(arr2,axis=2))
-                    ta = 1.0 - na.sum(arr2, axis=2)
+                    #ta = np.exp(-np.sum(arr2,axis=2))
+                    ta = 1.0 - np.sum(arr2, axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1292,8 +1292,8 @@
                     self.bricks.append(node.brick)
                     self.brick_dimensions.append(node.dims)
 
-            self.bricks = na.array(self.bricks)
-            self.brick_dimensions = na.array(self.brick_dimensions)
+            self.bricks = np.array(self.bricks)
+            self.brick_dimensions = np.array(self.brick_dimensions)
 
             self.bricks_loaded=True
             f.close()
@@ -1333,12 +1333,12 @@
         raise NotImplementedError()
         f = h5py.File(fn,"w")
         Nkd = len(self.tree)
-        kd_l_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_r_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_grids = na.zeros( (Nkd) )
-        kd_split_axs = na.zeros( (Nkd), dtype='int32')
-        kd_split_pos = na.zeros( (Nkd), dtype='float64')
-        kd_owners = na.zeros( (Nkd), dtype='int32')
+        kd_l_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_r_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_grids = np.zeros( (Nkd) )
+        kd_split_axs = np.zeros( (Nkd), dtype='int32')
+        kd_split_pos = np.zeros( (Nkd), dtype='float64')
+        kd_owners = np.zeros( (Nkd), dtype='int32')
         f.create_group("/bricks")
         for i, tree_item in enumerate(self.tree.iteritems()):
             kdid = tree_item[0]
@@ -1369,17 +1369,17 @@
         f.close()
         
     def corners_to_line(self,lc, rc):
-        x = na.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
+        x = np.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
                        rc[0], rc[0], rc[0], rc[0], rc[0],
                        rc[0], lc[0], lc[0], rc[0],
                        rc[0], lc[0], lc[0] ])
         
-        y = na.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
+        y = np.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1],
                        rc[1], rc[1], lc[1] ])
         
-        z = na.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
+        z = np.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
                        lc[2], rc[2], rc[2], lc[2], lc[2],
                        rc[2], rc[2], rc[2], rc[2],
                        lc[2], lc[2], lc[2] ])


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ b/yt/utilities/answer_testing/hydro_tests.py
@@ -99,11 +99,11 @@
     field = None
 
     def run(self):
-        na.random.seed(4333)
-        start_point = na.random.random(self.pf.dimensionality) * \
+        np.random.seed(4333)
+        start_point = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
-        end_point   = na.random.random(self.pf.dimensionality) * \
+        end_point   = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -55,10 +55,10 @@
 
 class ArrayDelta(ValueDelta):
     def __repr__(self):
-        nabove = len(na.where(self.delta > self.acceptable)[0])
+        nabove = len(np.where(self.delta > self.acceptable)[0])
         return "ArrayDelta: Delta max of %s, acceptable of %s.\n" \
                "%d of %d points above the acceptable limit" % \
-               (na.nanmax(self.delta), self.acceptable, nabove,
+               (np.nanmax(self.delta), self.acceptable, nabove,
                 self.delta.size)
 
 class ShapeMismatch(RegressionTestException):
@@ -122,8 +122,8 @@
         """
         if a1.shape != a2.shape:
             raise ShapeMismatch(a1, a2)
-        delta = na.abs(a1 - a2).astype("float64")/(a1 + a2)
-        if na.nanmax(delta) > acceptable:
+        delta = np.abs(a1 - a2).astype("float64")/(a1 + a2)
+        if np.nanmax(delta) > acceptable:
             raise ArrayDelta(delta, acceptable)
         return True
 
@@ -134,7 +134,7 @@
         difference is greater than `acceptable` it is considered a failure and
         an appropriate exception is raised.
         """
-        delta = na.abs(v1 - v2)/(v1 + v2)
+        delta = np.abs(v1 - v2)/(v1 + v2)
         if delta > acceptable:
             raise ValueDelta(delta, acceptable)
         return True


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ b/yt/utilities/answer_testing/particle_tests.py
@@ -32,13 +32,13 @@
         # Tests to make sure there are no particle positions aren't changing
         # drastically. This is very unlikely to be a problem.
         all = self.pf.h.all_data()
-        min = na.empty(3,dtype='float64')
+        min = np.empty(3,dtype='float64')
         max = min.copy()
         dims = ["particle_position_x","particle_position_y",
             "particle_position_z"]
         for i in xrange(3):
-            min[i] = na.min(all[dims[i]])
-            max[i] = na.max(all[dims[i]])
+            min[i] = np.min(all[dims[i]])
+            max[i] = np.max(all[dims[i]])
         self.result = (min,max)
     
     def compare(self, old_result):


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1212,7 +1212,7 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
         if args.axis == 4:
             axes = range(3)
         else:
@@ -1266,12 +1266,12 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
 
         L = args.viewpoint
         if L is None:
             L = [1.]*3
-        L = na.array(args.viewpoint)
+        L = np.array(args.viewpoint)
 
         unit = args.unit
         if unit is None:
@@ -1302,7 +1302,7 @@
             roi = pf.h.region(center, center-width, center+width)
             mi, ma = roi.quantities['Extrema'](field)[0]
             if log:
-                mi, ma = na.log10(mi), na.log10(ma)
+                mi, ma = np.log10(mi), np.log10(ma)
         else:
             mi, ma = myrange[0], myrange[1]
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 c_kms = 2.99792458e5 # c in km/s
 G = 6.67259e-8 # cgs
@@ -49,40 +49,40 @@
 
     def ComovingTransverseDistance(self,z_i,z_f):
          if (self.OmegaCurvatureNow > 0):
-             return (self.HubbleDistance() / na.sqrt(self.OmegaCurvatureNow) * 
-                     na.sinh(na.sqrt(self.OmegaCurvatureNow) * 
+             return (self.HubbleDistance() / np.sqrt(self.OmegaCurvatureNow) * 
+                     np.sinh(np.sqrt(self.OmegaCurvatureNow) * 
                           self.ComovingRadialDistance(z_i,z_f) / 
                           self.HubbleDistance()))
          elif (self.OmegaCurvatureNow < 0):
-             return (self.HubbleDistance() / na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
-                     sin(na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
+             return (self.HubbleDistance() / np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
+                     sin(np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
                          self.ComovingRadialDistance(z_i,z_f) / self.HubbleDistance()))
          else:
              return self.ComovingRadialDistance(z_i,z_f)
 
     def ComovingVolume(self,z_i,z_f):
         if (self.OmegaCurvatureNow > 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      ana.sinh(na.fabs(self.OmegaCurvatureNow) * 
+                      anp.sinh(np.fabs(self.OmegaCurvatureNow) * 
                             self.ComovingTransverseDistance(z_i,z_f) / 
-                            self.HubbleDistance()) / na.sqrt(self.OmegaCurvatureNow)) / 1e9)
+                            self.HubbleDistance()) / np.sqrt(self.OmegaCurvatureNow)) / 1e9)
         elif (self.OmegaCurvatureNow < 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / 
-                     na.fabs(self.OmegaCurvatureNow) * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / 
+                     np.fabs(self.OmegaCurvatureNow) * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      asin(na.fabs(self.OmegaCurvatureNow) * 
+                      asin(np.fabs(self.OmegaCurvatureNow) * 
                            self.ComovingTransverseDistance(z_i,z_f) / 
                            self.HubbleDistance()) / 
-                      na.sqrt(na.fabs(self.OmegaCurvatureNow))) / 1e9)
+                      np.sqrt(np.fabs(self.OmegaCurvatureNow))) / 1e9)
         else:
-             return (4 * na.pi * na.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
+             return (4 * np.pi * np.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
                      3 / 1e9)
 
     def AngularDiameterDistance(self,z_i,z_f):
@@ -100,18 +100,18 @@
         return (romberg(self.AgeIntegrand,z,1000) / self.HubbleConstantNow * kmPerMpc)
 
     def AngularScale_1arcsec_kpc(self,z_i,z_f):
-        return (self.AngularDiameterDistance(z_i,z_f) / 648. * na.pi)
+        return (self.AngularDiameterDistance(z_i,z_f) / 648. * np.pi)
 
     def CriticalDensity(self,z):
-        return (3.0 / 8.0 / na.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
+        return (3.0 / 8.0 / np.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
                 (self.OmegaLambdaNow + ((1 + z)**3.0) * self.OmegaMatterNow))
 
     def AgeIntegrand(self,z):
         return (1 / (z + 1) / self.ExpansionFactor(z))
 
     def ExpansionFactor(self,z):
-        return na.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
-                    self.OmegaCurvatureNow * na.sqrt(1 + z) + 
+        return np.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
+                    self.OmegaCurvatureNow * np.sqrt(1 + z) + 
                     self.OmegaLambdaNow)
 
     def InverseExpansionFactor(self,z):
@@ -162,8 +162,8 @@
         """
         # Changed 2.52e17 to 2.52e19 because H_0 is in km/s/Mpc, 
         # instead of 100 km/s/Mpc.
-        return 2.52e19 / na.sqrt(self.OmegaMatterNow) / \
-            self.HubbleConstantNow / na.power(1 + self.InitialRedshift,1.5)
+        return 2.52e19 / np.sqrt(self.OmegaMatterNow) / \
+            self.HubbleConstantNow / np.power(1 + self.InitialRedshift,1.5)
 
     def ComputeRedshiftFromTime(self,time):
         """
@@ -183,18 +183,18 @@
  
         # 1) For a flat universe with OmegaMatterNow = 1, it's easy.
  
-        if ((na.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
+        if ((np.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            a = na.power(time/self.InitialTime,2.0/3.0)
+            a = np.power(time/self.InitialTime,2.0/3.0)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
         #    Actually, this is a little tricky since we must solve an equation
-        #    of the form eta - na.sinh(eta) + x = 0..
+        #    of the form eta - np.sinh(eta) + x = 0..
  
         if ((self.OmegaMatterNow < 1) and 
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            x = 2*TimeHubble0*na.power(1.0 - self.OmegaMatterNow, 1.5) / \
+            x = 2*TimeHubble0*np.power(1.0 - self.OmegaMatterNow, 1.5) / \
                 self.OmegaMatterNow;
  
             # Compute eta in a three step process, first from a third-order
@@ -203,12 +203,12 @@
             # eta.  This works well because parts 1 & 2 are an excellent approximation
             # when x is small and part 3 converges quickly when x is large. 
  
-            eta = na.power(6*x,1.0/3.0)                # part 1
-            eta = na.power(120*x/(20+eta*eta),1.0/3.0) # part 2
+            eta = np.power(6*x,1.0/3.0)                # part 1
+            eta = np.power(120*x/(20+eta*eta),1.0/3.0) # part 2
             for i in range(40):                      # part 3
                 eta_old = eta
-                eta = na.arcsinh(eta + x)
-                if (na.fabs(eta-eta_old) < ETA_TOLERANCE): 
+                eta = np.arcsinh(eta + x)
+                if (np.fabs(eta-eta_old) < ETA_TOLERANCE): 
                     break
                 if (i == 39):
                     print "No convergence after %d iterations." % i
@@ -216,7 +216,7 @@
             # Now use eta to compute the expansion factor (eq. 13-10, part 2).
  
             a = self.OmegaMatterNow/(2.0*(1.0 - self.OmegaMatterNow))*\
-                (na.cosh(eta) - 1.0)
+                (np.cosh(eta) - 1.0)
 
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
         #    Easy, but skip it for now.
@@ -228,10 +228,10 @@
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
+        if ((np.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow > OMEGA_TOLERANCE)):
-            a = na.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
-                na.power(na.sinh(1.5 * na.sqrt(1.0 - self.OmegaMatterNow)*\
+            a = np.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
+                np.power(np.sinh(1.5 * np.sqrt(1.0 - self.OmegaMatterNow)*\
                                      TimeHubble0),2.0/3.0)
 
 
@@ -249,29 +249,29 @@
         # 1) For a flat universe with OmegaMatterNow = 1, things are easy.
  
         if ((self.OmegaMatterNow == 1.0) and (self.OmegaLambdaNow == 0.0)):
-            TimeHubble0 = 2.0/3.0/na.power(1+z,1.5)
+            TimeHubble0 = 2.0/3.0/np.power(1+z,1.5)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
  
         if ((self.OmegaMatterNow < 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (na.sinh(eta) - eta)
+            eta = np.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (np.sinh(eta) - eta)
  
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
  
         if ((self.OmegaMatterNow > 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (eta - na.sin(eta))
+            eta = np.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (eta - np.sin(eta))
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
-            TimeHubble0 = 2.0/3.0/na.sqrt(1-self.OmegaMatterNow)*\
-                na.arcsinh(na.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
-                               na.power(1+z,1.5))
+        if ((np.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
+            TimeHubble0 = 2.0/3.0/np.sqrt(1-self.OmegaMatterNow)*\
+                np.arcsinh(np.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
+                               np.power(1+z,1.5))
   
         # Now convert from Time * H0 to time.
   


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -1,6 +1,6 @@
 import os
 import weakref
-import numpy as na
+import numpy as np
 import h5py as h5
 from conversion_abc import *
 from glob import glob
@@ -55,11 +55,11 @@
             grid['domain'] = int(splitup[8].rstrip(','))
             self.current_time = grid['time']
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -94,12 +94,12 @@
         proc_names = glob(self.source_dir+'id*')
         #print 'Reading a dataset from %i Processor Files' % len(proc_names)
         N = len(proc_names)
-        grid_dims = na.empty([N,3],dtype='int64')
-        grid_left_edges = na.empty([N,3],dtype='float64')
-        grid_dds = na.empty([N,3],dtype='float64')
-        grid_levels = na.zeros(N,dtype='int64')
-        grid_parent_ids = -1*na.ones(N,dtype='int64')
-        grid_particle_counts = na.zeros([N,1],dtype='int64')
+        grid_dims = np.empty([N,3],dtype='int64')
+        grid_left_edges = np.empty([N,3],dtype='float64')
+        grid_dds = np.empty([N,3],dtype='float64')
+        grid_levels = np.zeros(N,dtype='int64')
+        grid_parent_ids = -1*np.ones(N,dtype='int64')
+        grid_particle_counts = np.zeros([N,1],dtype='int64')
 
         for i in range(N):
             if i == 0:
@@ -128,12 +128,12 @@
 
             if len(line) == 0: break
             
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
                 grid['dimensions'][grid['dimensions']==0]=1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             # Append all hierachy info before reading this grid's data
@@ -149,7 +149,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -159,8 +159,8 @@
 
         gles = grid_left_edges
         gdims = grid_dims
-        dle = na.min(gles,axis=0)
-        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        dle = np.min(gles,axis=0)
+        dre = np.max(gles+grid_dims*grid_dds,axis=0)
         glis = ((gles - dle)/grid_dds).astype('int64')
         gris = glis + gdims
 
@@ -183,17 +183,17 @@
 
         ## --------- Done with top level nodes --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = ddims
         pars_g.attrs['current_time'] = self.current_time
         pars_g.attrs['domain_left_edge'] = dle
         pars_g.attrs['domain_right_edge'] = dre
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(1)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(1)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         # pars_g.attrs['n_cells'] = grid['ncells']
@@ -224,18 +224,18 @@
                 splitup = line.strip().split()
 
                 if "DIMENSIONS" in splitup:
-                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    grid_dims = np.array(splitup[-3:]).astype('int')
                     line = f.readline()
                     continue
                 elif "CELL_DATA" in splitup:
                     grid_ncells = int(splitup[-1])
                     line = f.readline()
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         grid_dims -= 1
                         grid_dims[grid_dims==0]=1
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         print 'product of dimensions %i not equal to number of cells %i' % \
-                              (na.prod(grid_dims), grid_ncells)
+                              (np.prod(grid_dims), grid_ncells)
                         raise TypeError
                     break
                 else:
@@ -250,7 +250,7 @@
                     if not read_table:
                         line = f.readline() # Read the lookup table line
                         read_table = True
-                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
                     if i == 0:
                         self.fields.append(field)
                     # print 'writing field %s' % field
@@ -259,7 +259,7 @@
 
                 elif 'VECTORS' in splitup:
                     field = splitup[1]
-                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
                     data_x = data[0::3].reshape(grid_dims,order='F')
                     data_y = data[1::3].reshape(grid_dims,order='F')
                     data_z = data[2::3].reshape(grid_dims,order='F')
@@ -291,7 +291,7 @@
             if name in self.field_conversions.keys():
                 this_field.attrs['field_to_cgs'] = self.field_conversions[name]
             else:
-                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+                this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
             
 
     def convert(self, hierarchy=True, data=True):
@@ -327,11 +327,11 @@
         elif "Really" in splitup:
             grid['time'] = splitup[-1]
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -365,19 +365,19 @@
             #    print line
 
             if len(line) == 0: break
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             if grid['read_type'] is 'scalar':
                 grid[grid['read_field']] = \
-                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                    np.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
                 self.fields.append(grid['read_field'])
             elif grid['read_type'] is 'vector':
-                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                data = np.fromfile(f, dtype='>f4', count=3*grid['ncells'])
                 grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
@@ -398,7 +398,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -406,8 +406,8 @@
         pars_g = f.create_group('simulation_parameters')
 
         dle = grid['left_edge'] # True only in this case of one grid for the domain
-        gles = na.array([grid['left_edge']])
-        gdims = na.array([grid['dimensions']])
+        gles = np.array([grid['left_edge']])
+        gdims = np.array([grid['dimensions']])
         glis = ((gles - dle)/grid['dds']).astype('int64')
         gris = glis + gdims
 
@@ -416,18 +416,18 @@
         # grid_dimensions
         gdim = f.create_dataset('grid_dimensions',data=gdims)
 
-        levels = na.array([0]).astype('int64') # unigrid example
+        levels = np.array([0]).astype('int64') # unigrid example
         # grid_level
         level = f.create_dataset('grid_level',data=levels)
 
         ## ----------QUESTIONABLE NEXT LINE--------- ##
         # This data needs two dimensions for now. 
-        n_particles = na.array([[0]]).astype('int64')
+        n_particles = np.array([[0]]).astype('int64')
         #grid_particle_count
         part_count = f.create_dataset('grid_particle_count',data=n_particles)
 
         # Assume -1 means no parent.
-        parent_ids = na.array([-1]).astype('int64')
+        parent_ids = np.array([-1]).astype('int64')
         # grid_parent_id
         pids = f.create_dataset('grid_parent_id',data=parent_ids)
 
@@ -451,8 +451,8 @@
 
         ## --------- Attribute Tables --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = grid['dimensions']
         try:
             pars_g.attrs['current_time'] = grid['time']
@@ -461,10 +461,10 @@
         pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
         pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(0)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(0)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         pars_g.attrs['n_cells'] = grid['ncells']
@@ -481,7 +481,7 @@
         if name in self.field_conversions.keys():
             this_field.attrs['field_to_cgs'] = self.field_conversions[name]
         else:
-            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
 
         # Add particle types
         # Nothing to do here


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.lib as lib
@@ -35,23 +35,23 @@
         self.truncate = truncate
         x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
         return my_vals.reshape(orig_shape)
 
@@ -61,28 +61,28 @@
         self.truncate = truncate
         x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
         y_vals = data_object[self.y_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        y_i = (na.digitize(y_vals, self.y_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        y_i = (np.digitize(y_vals, self.y_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.BilinearlyInterpolate(self.table,
                                  x_vals, y_vals, self.x_bins, self.y_bins,
                                  x_i, y_i, my_vals)
@@ -94,9 +94,9 @@
         self.truncate = truncate
         x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = na.linspace(z0, z1, table.shape[2]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -104,23 +104,23 @@
         y_vals = data_object[self.y_name].ravel().astype('float64')
         z_vals = data_object[self.z_name].ravel().astype('float64')
 
-        x_i = na.digitize(x_vals, self.x_bins) - 1
-        y_i = na.digitize(y_vals, self.y_bins) - 1
-        z_i = na.digitize(z_vals, self.z_bins) - 1
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
-            or na.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
+        x_i = np.digitize(x_vals, self.x_bins) - 1
+        y_i = np.digitize(y_vals, self.y_bins) - 1
+        z_i = np.digitize(z_vals, self.z_bins) - 1
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
+            or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
-                z_i = na.minimum(na.maximum(z_i,0), len(self.z_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
+                z_i = np.minimum(np.maximum(z_i,0), len(self.z_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.TrilinearlyInterpolate(self.table,
                                  x_vals, y_vals, z_vals,
                                  self.x_bins, self.y_bins, self.z_bins,
@@ -135,11 +135,11 @@
         xm = (self.x_bins[x_i+1] - x_vals) / (self.x_bins[x_i+1] - self.x_bins[x_i])
         ym = (self.y_bins[y_i+1] - y_vals) / (self.y_bins[y_i+1] - self.y_bins[y_i])
         zm = (self.z_bins[z_i+1] - z_vals) / (self.z_bins[z_i+1] - self.z_bins[z_i])
-        if na.any(na.isnan(self.table)):
+        if np.any(np.isnan(self.table)):
             raise ValueError
-        if na.any(na.isnan(x) | na.isnan(y) | na.isnan(z)):
+        if np.any(np.isnan(x) | np.isnan(y) | np.isnan(z)):
             raise ValueError
-        if na.any(na.isnan(xm) | na.isnan(ym) | na.isnan(zm)):
+        if np.any(np.isnan(xm) | np.isnan(ym) | np.isnan(zm)):
             raise ValueError
         my_vals  = self.table[x_i  ,y_i  ,z_i  ] * (xm*ym*zm)
         my_vals += self.table[x_i+1,y_i  ,z_i  ] * (x *ym*zm)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math
 
 def periodic_dist(a, b, period):
@@ -48,20 +48,20 @@
 
     Examples
     --------
-    >>> a = na.array([0.1, 0.1, 0.1])
-    >>> b = na.array([0.9, 0,9, 0.9])
+    >>> a = np.array([0.1, 0.1, 0.1])
+    >>> b = np.array([0.9, 0,9, 0.9])
     >>> period = 1.
     >>> dist = periodic_dist(a, b, 1.)
     >>> dist
     0.3464102
     """
-    a = na.array(a)
-    b = na.array(b)
+    a = np.array(a)
+    b = np.array(b)
     if a.size != b.size: RunTimeError("Arrays must be the same shape.")
-    c = na.empty((2, a.size), dtype="float64")
+    c = np.empty((2, a.size), dtype="float64")
     c[0,:] = abs(a - b)
     c[1,:] = period - abs(a - b)
-    d = na.amin(c, axis=0)**2
+    d = np.amin(c, axis=0)**2
     return math.sqrt(d.sum())
 
 def rotate_vector_3D(a, dim, angle):
@@ -87,8 +87,8 @@
     
     Examples
     --------
-    >>> a = na.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
-    >>> b = rotate_vector_3D(a, 2, na.pi/2)
+    >>> a = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
+    >>> b = rotate_vector_3D(a, 2, np.pi/2)
     >>> print b
     [[  1.00000000e+00  -1.00000000e+00   0.00000000e+00]
     [  6.12323400e-17  -1.00000000e+00   1.00000000e+00]
@@ -100,27 +100,27 @@
     mod = False
     if len(a.shape) == 1:
         mod = True
-        a = na.array([a])
+        a = np.array([a])
     if a.shape[1] !=3:
         raise SyntaxError("The second dimension of the array a must be == 3!")
     if dim == 0:
-        R = na.array([[1, 0,0],
-            [0, na.cos(angle), na.sin(angle)],
-            [0, -na.sin(angle), na.cos(angle)]])
+        R = np.array([[1, 0,0],
+            [0, np.cos(angle), np.sin(angle)],
+            [0, -np.sin(angle), np.cos(angle)]])
     elif dim == 1:
-        R = na.array([[na.cos(angle), 0, -na.sin(angle)],
+        R = np.array([[np.cos(angle), 0, -np.sin(angle)],
             [0, 1, 0],
-            [na.sin(angle), 0, na.cos(angle)]])
+            [np.sin(angle), 0, np.cos(angle)]])
     elif dim == 2:
-        R = na.array([[na.cos(angle), na.sin(angle), 0],
-            [-na.sin(angle), na.cos(angle), 0],
+        R = np.array([[np.cos(angle), np.sin(angle), 0],
+            [-np.sin(angle), np.cos(angle), 0],
             [0, 0, 1]])
     else:
         raise SyntaxError("dim must be 0, 1, or 2!")
     if mod:
-        return na.dot(R, a.T).T[0]
+        return np.dot(R, a.T).T[0]
     else:
-        return na.dot(R, a.T).T
+        return np.dot(R, a.T).T
     
 
 def modify_reference_frame(CoM, L, P, V):
@@ -164,9 +164,9 @@
     
     Examples
     --------
-    >>> CoM = na.array([0.5, 0.5, 0.5])
-    >>> L = na.array([1, 0, 0])
-    >>> P = na.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
+    >>> CoM = np.array([0.5, 0.5, 0.5])
+    >>> L = np.array([1, 0, 0])
+    >>> P = np.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
     >>> V = p.copy()
     >>> LL, PP, VV = modify_reference_frame(CoM, L, P, V)
     >>> LL
@@ -183,7 +183,7 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
-    if (L == na.array([0, 0, 1.])).all():
+    if (L == np.array([0, 0, 1.])).all():
         # Whew! Nothing to do!
         return L, P, V
     # First translate the positions to center of mass reference frame.
@@ -191,7 +191,7 @@
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
     LL[2] = 0.
-    theta = na.arccos(na.inner(LL, [1.,0,0])/na.inner(LL,LL)**.5)
+    theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
     if L[1] < 0:
         theta = -theta
     # Now rotate all the position, velocity, and L vectors by this much around
@@ -200,7 +200,7 @@
     V = rotate_vector_3D(V, 2, theta)
     L = rotate_vector_3D(L, 2, theta)
     # Now find the angle between L and the z-axis.
-    theta = na.arccos(na.inner(L, [0,0,1])/na.inner(L,L)**.5)
+    theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
     # This time we rotate around the y axis.
     P = rotate_vector_3D(P, 1, theta)
     V = rotate_vector_3D(V, 1, theta)
@@ -241,10 +241,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> circV = compute_rotational_velocity(CoM, L, P, V)
     >>> circV
     array([ 1.        ,  0.        ,  0.        ,  1.41421356])
@@ -254,13 +254,13 @@
     L, P, V = modify_reference_frame(CoM, L, P, V)
     # Find the vector in the plane of the galaxy for each position point
     # that is perpendicular to the radial vector.
-    radperp = na.cross([0, 0, 1], P)
+    radperp = np.cross([0, 0, 1], P)
     # Find the component of the velocity along the radperp vector.
     # Unf., I don't think there's a better way to do this.
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rp in enumerate(radperp):
-        temp = na.dot(rp, V[i]) / na.dot(rp, rp) * rp
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rp, V[i]) / np.dot(rp, rp) * rp
+        res[i] = np.dot(temp, temp)**0.5
     return res
     
 def compute_parallel_velocity(CoM, L, P, V):
@@ -296,10 +296,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> paraV = compute_parallel_velocity(CoM, L, P, V)
     >>> paraV
     array([10, -1,  1, -1])
@@ -342,10 +342,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> radV = compute_radial_velocity(CoM, L, P, V)
     >>> radV
     array([ 1.        ,  1.41421356 ,  0.        ,  0.])
@@ -357,10 +357,10 @@
     # with the cylindrical radial vector for this point.
     # Unf., I don't think there's a better way to do this.
     P[:,2] = 0
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rad in enumerate(P):
-        temp = na.dot(rad, V[i]) / na.dot(rad, rad) * rad
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rad, V[i]) / np.dot(rad, rad) * rad
+        res[i] = np.dot(temp, temp)**0.5
     return res
 
 def compute_cylindrical_radius(CoM, L, P, V):
@@ -396,10 +396,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> cyl_r = compute_cylindrical_radius(CoM, L, P, V)
     >>> cyl_r
     array([ 1.        ,  1.41421356,  0.        ,  1.41421356])
@@ -409,7 +409,7 @@
     # Demote all the positions to the z=0 plane, which makes the distance
     # calculation very easy.
     P[:,2] = 0
-    return na.sqrt((P * P).sum(axis=1))
+    return np.sqrt((P * P).sum(axis=1))
     
 def ortho_find(vec1):
     r"""Find two complementary orthonormal vectors to a given vector.
@@ -489,9 +489,9 @@
     >>> c
     array([-0.16903085,  0.84515425, -0.50709255])
     """
-    vec1 = na.array(vec1, dtype=na.float64)
+    vec1 = np.array(vec1, dtype=np.float64)
     # Normalize
-    norm = na.sqrt(na.vdot(vec1, vec1))
+    norm = np.sqrt(np.vdot(vec1, vec1))
     if norm == 0:
         raise ValueError("Zero vector used as input.")
     vec1 /= norm
@@ -513,9 +513,9 @@
         z2 = 0.0
         x2 = -(y1 / x1)
         norm2 = (1.0 + z2 ** 2.0) ** (0.5)
-    vec2 = na.array([x2,y2,z2])
+    vec2 = np.array([x2,y2,z2])
     vec2 /= norm2
-    vec3 = na.cross(vec1, vec2)
+    vec3 = np.cross(vec1, vec2)
     return vec1, vec2, vec3
 
 def quartiles(a, axis=None, out=None, overwrite_input=False):
@@ -570,7 +570,7 @@
 
     Examples
     --------
-    >>> a = na.arange(100).reshape(10,10)
+    >>> a = np.arange(100).reshape(10,10)
     >>> a
     array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
            [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
@@ -601,7 +601,7 @@
             a.sort(axis=axis)
             sorted = a
     else:
-        sorted = na.sort(a, axis=axis)
+        sorted = np.sort(a, axis=axis)
     if axis is None:
         axis = 0
     indexer = [slice(None)] * sorted.ndim
@@ -619,8 +619,8 @@
             indexer[axis] = slice(index, index+1)
         # Use mean in odd and even case to coerce data type
         # and check, use out array.
-        result.append(na.mean(sorted[indexer], axis=axis, out=out))
-    return na.array(result)
+        result.append(np.mean(sorted[indexer], axis=axis, out=out))
+    return np.array(result)
 
 def get_rotation_matrix(theta, rot_vector):
     """
@@ -656,20 +656,20 @@
     array([[ 0.70710678,  0.        ,  0.70710678],
            [ 0.        ,  1.        ,  0.        ],
            [-0.70710678,  0.        ,  0.70710678]])
-    >>> na.dot(rot,a)
+    >>> np.dot(rot,a)
     array([ 0.,  1.,  0.])
     # since a is an eigenvector by construction
-    >>> na.dot(rot,[1,0,0])
+    >>> np.dot(rot,[1,0,0])
     array([ 0.70710678,  0.        , -0.70710678])
     """
 
     ux = rot_vector[0]
     uy = rot_vector[1]
     uz = rot_vector[2]
-    cost = na.cos(theta)
-    sint = na.sin(theta)
+    cost = np.cos(theta)
+    sint = np.sin(theta)
     
-    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+    R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
                   [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import abc
 import json
 import urllib2
@@ -97,10 +97,10 @@
         if hasattr(self, "_pf_mrep"):
             self._pf_mrep.upload()
         for i in metadata:
-            if isinstance(metadata[i], na.ndarray):
+            if isinstance(metadata[i], np.ndarray):
                 metadata[i] = metadata[i].tolist()
             elif hasattr(metadata[i], 'dtype'):
-                metadata[i] = na.asscalar(metadata[i])
+                metadata[i] = np.asscalar(metadata[i])
         metadata['obj_type'] = self.type
         if len(chunks) == 0:
             chunk_info = {'chunks': []}
@@ -129,7 +129,7 @@
         for i, (cn, cv) in enumerate(chunks):
             remaining = cv.size * cv.itemsize
             f = TemporaryFile()
-            na.save(f, cv)
+            np.save(f, cv)
             f.seek(0)
             pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i+1, len(chunks)))
             datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import get_rotation_matrix
@@ -52,7 +52,7 @@
            
         """
         self.steady_north = steady_north
-        if na.all(north_vector == normal_vector):
+        if np.all(north_vector == normal_vector):
             mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
             north_vector = None
         if north_vector is not None: self.steady_north = True
@@ -60,23 +60,23 @@
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        normal_vector /= np.sqrt( np.dot(normal_vector, normal_vector))
         if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
+            vecs = np.identity(3)
+            t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
-            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            north_vector = na.cross(normal_vector, east_vector).ravel()
+            east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-            east_vector = na.cross(north_vector, normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+                north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
+            east_vector = np.cross(north_vector, normal_vector).ravel()
+        north_vector /= np.sqrt(np.dot(north_vector, north_vector))
+        east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
         self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        self.inv_mat = np.linalg.pinv(self.unit_vectors)
         
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -73,7 +73,7 @@
             for g in self.grids:
                 for f in fields:
                     if f not in self.queue[g.id]:
-                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        d = np.zeros(g.ActiveDimensions, dtype='float64')
                         self.queue[g.id][f] = d
                 for f in pfields:
                     self.queue[g.id][f] = self._read(g, f)
@@ -87,12 +87,12 @@
         fi = self.pf.field_info[f]
         if fi.particle_type and g.NumberOfParticles == 0:
             # because this gets upcast to float
-            return na.array([],dtype='float64')
+            return np.array([],dtype='float64')
         try:
             temp = self.pf.h.io._read_data_set(g, f)
         except:# self.pf.hierarchy.io._read_exception as exc:
             if fi.not_in_all:
-                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+                temp = np.zeros(g.ActiveDimensions, dtype='float64')
             else:
                 raise
         return temp
@@ -137,9 +137,9 @@
         msg = dict(grid_id = grid.id, field = field, op="read")
         mylog.debug("Requesting %s for %s from %s", field, grid, dest)
         if self.pf.field_info[field].particle_type:
-            data = na.empty(grid.NumberOfParticles, 'float64')
+            data = np.empty(grid.NumberOfParticles, 'float64')
         else:
-            data = na.empty(grid.ActiveDimensions, 'float64')
+            data = np.empty(grid.ActiveDimensions, 'float64')
         hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
         self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
         mylog.debug("Waiting for data.")


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -27,7 +27,7 @@
 import cStringIO
 import itertools
 import logging
-import numpy as na
+import numpy as np
 import sys
 
 from yt.funcs import *
@@ -131,13 +131,13 @@
         # Note that we're doing this in advance, and with a simple means
         # of choosing them; more advanced methods will be explored later.
         if self._use_all:
-            self.my_obj_ids = na.arange(len(self._objs))
+            self.my_obj_ids = np.arange(len(self._objs))
         else:
             if not round_robin:
-                self.my_obj_ids = na.array_split(
-                                na.arange(len(self._objs)), self._skip)[self._offset]
+                self.my_obj_ids = np.array_split(
+                                np.arange(len(self._objs)), self._skip)[self._offset]
             else:
-                self.my_obj_ids = na.arange(len(self._objs))[self._offset::self._skip]
+                self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
         
     def __iter__(self):
         for gid in self.my_obj_ids:
@@ -421,14 +421,14 @@
             njobs, my_size)
         raise RuntimeError
     my_rank = my_communicator.rank
-    all_new_comms = na.array_split(na.arange(my_size), njobs)
+    all_new_comms = np.array_split(np.arange(my_size), njobs)
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
             break
     if parallel_capable:
         communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
-    obj_ids = na.arange(len(objects))
+    obj_ids = np.arange(len(objects))
 
     to_share = {}
     # If our objects object is slice-aware, like time series data objects are,
@@ -525,14 +525,14 @@
         #   cat
         #   join
         # data is selected to be of types:
-        #   na.ndarray
+        #   np.ndarray
         #   dict
         #   data field dict
         if datatype is not None:
             pass
         elif isinstance(data, types.DictType):
             datatype == "dict"
-        elif isinstance(data, na.ndarray):
+        elif isinstance(data, np.ndarray):
             datatype == "array"
         elif isinstance(data, types.ListType):
             datatype == "list"
@@ -549,14 +549,14 @@
             field_keys = data.keys()
             field_keys.sort()
             size = data[field_keys[0]].shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
@@ -581,16 +581,16 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = na.zeros(0, dtype=dtype) # This only works for
+                    data = np.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
@@ -608,7 +608,7 @@
     def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
-        if isinstance(data, na.ndarray) and \
+        if isinstance(data, np.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
             if self.comm.rank == root:
                 info = (data.shape, data.dtype)
@@ -616,7 +616,7 @@
                 info = ()
             info = self.comm.bcast(info, root=root)
             if self.comm.rank != root:
-                data = na.empty(info[0], dtype=info[1])
+                data = np.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
             self.comm.Bcast([data, mpi_type], root = root)
             return data
@@ -636,7 +636,7 @@
     @parallel_passthrough
     def mpi_allreduce(self, data, dtype=None, op='sum'):
         op = op_names[op]
-        if isinstance(data, na.ndarray) and data.dtype != na.bool:
+        if isinstance(data, np.ndarray) and data.dtype != np.bool:
             if dtype is None:
                 dtype = data.dtype
             if dtype != data.dtype:
@@ -743,7 +743,7 @@
         return (obj._owner == self.comm.rank)
 
     def send_quadtree(self, target, buf, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
         self.comm.Send([sizebuf, MPI.LONG], dest=target)
         self.comm.Send([buf[0], MPI.INT], dest=target)
@@ -751,11 +751,11 @@
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
     def recv_quadtree(self, target, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
-        buf = [na.empty((sizebuf[0],), 'int32'),
-               na.empty((sizebuf[0], args[2]),'float64'),
-               na.empty((sizebuf[0],),'float64')]
+        buf = [np.empty((sizebuf[0],), 'int32'),
+               np.empty((sizebuf[0], args[2]),'float64'),
+               np.empty((sizebuf[0],),'float64')]
         self.comm.Recv([buf[0], MPI.INT], source=target)
         self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
         self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
@@ -775,8 +775,8 @@
         sys.exit()
 
         args = qt.get_args() # Will always be the same
-        tgd = na.array([args[0], args[1]], dtype='int64')
-        sizebuf = na.zeros(1, 'int64')
+        tgd = np.array([args[0], args[1]], dtype='int64')
+        sizebuf = np.zeros(1, 'int64')
 
         while mask < size:
             if (mask & rank) != 0:
@@ -802,9 +802,9 @@
             sizebuf[0] = buf[0].size
         self.comm.Bcast([sizebuf, MPI.LONG], root=0)
         if rank != 0:
-            buf = [na.empty((sizebuf[0],), 'int32'),
-                   na.empty((sizebuf[0], args[2]),'float64'),
-                   na.empty((sizebuf[0],),'float64')]
+            buf = [np.empty((sizebuf[0],), 'int32'),
+                   np.empty((sizebuf[0], args[2]),'float64'),
+                   np.empty((sizebuf[0],),'float64')]
         self.comm.Bcast([buf[0], MPI.INT], root=0)
         self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
         self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
@@ -816,7 +816,7 @@
 
 
     def send_array(self, arr, dest, tag = 0):
-        if not isinstance(arr, na.ndarray):
+        if not isinstance(arr, np.ndarray):
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)
             return
@@ -830,7 +830,7 @@
         dt, ne = self.comm.recv(source=source, tag=tag)
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)
-        arr = na.empty(ne, dtype=dt)
+        arr = np.empty(ne, dtype=dt)
         tmp = arr.view(self.__tocast)
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
@@ -841,11 +841,11 @@
             for i in range(send.shape[0]):
                 recv.append(self.alltoallv_array(send[i,:].copy(), 
                                                  total_size, offsets, sizes))
-            recv = na.array(recv)
+            recv = np.array(recv)
             return recv
         offset = offsets[self.comm.rank]
         tmp_send = send.view(self.__tocast)
-        recv = na.empty(total_size, dtype=send.dtype)
+        recv = np.empty(total_size, dtype=send.dtype)
         recv[offset:offset+send.size] = send[:]
         dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
         roff = [off * dtr for off in offsets]
@@ -867,7 +867,7 @@
 
 communication_system = CommunicationSystem()
 if parallel_capable:
-    ranks = na.arange(MPI.COMM_WORLD.size)
+    ranks = np.arange(MPI.COMM_WORLD.size)
     communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
@@ -926,13 +926,13 @@
         xax, yax = x_dict[axis], y_dict[axis]
         cc = MPI.Compute_dims(self.comm.size, 2)
         mi = self.comm.rank
-        cx, cy = na.unravel_index(mi, cc)
-        x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
+        cx, cy = np.unravel_index(mi, cc)
+        x = np.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
 
         DLE, DRE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        LE = na.ones(3, dtype='float64') * DLE
-        RE = na.ones(3, dtype='float64') * DRE
+        LE = np.ones(3, dtype='float64') * DLE
+        RE = np.ones(3, dtype='float64') * DRE
         LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
         RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
         LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
@@ -943,7 +943,7 @@
         return True, reg
 
     def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
-        LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
+        LE, RE = np.array(ds.left_edge), np.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
         if (LE == self.pf.domain_left_edge).all() and \
@@ -973,13 +973,13 @@
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \
@@ -1000,13 +1000,13 @@
         
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import time, threading, random
 
 from yt.funcs import *
@@ -142,8 +142,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
@@ -170,8 +170,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -21,7 +21,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 
 import matplotlib
 import matplotlib.colors as cc
@@ -83,14 +83,14 @@
 matplotlib.rc('image', cmap="algae")
 
 # This next colormap was designed by Tune Kamae and converted here by Matt
-_vs = na.linspace(0,1,255)
-_kamae_red = na.minimum(255,
-                113.9*na.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
+_vs = np.linspace(0,1,255)
+_kamae_red = np.minimum(255,
+                113.9*np.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
               + 3587.9*_vs+2563.4)/255.0
-_kamae_grn = na.minimum(255,
-                70.0*na.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
-_kamae_blu = na.minimum(255,
-                194.5*_vs**2.88+99.72*na.exp(-77.24*(_vs-0.742)**2.0)
+_kamae_grn = np.minimum(255,
+                70.0*np.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
+_kamae_blu = np.minimum(255,
+                194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
 cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
@@ -121,15 +121,15 @@
 _h_cubehelix = 1.0
 
 _cubehelix_data = {
-        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
 }
 
 add_cmap("cubehelix", _cubehelix_data)
 
 # Add colormaps in _colormap_data.py that weren't defined here
-_vs = na.linspace(0,1,255)
+_vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps:
         cdict = { 'red': zip(_vs,v[0],v[0]),
@@ -143,5 +143,5 @@
     r = cmap._lut[:-3, 0]
     g = cmap._lut[:-3, 1]
     b = cmap._lut[:-3, 2]
-    a = na.ones(b.shape)
+    a = np.ones(b.shape)
     return [r, g, b, a]


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import pyx
-import numpy as na
+import numpy as np
 from matplotlib import cm
 from _mpl_imports import FigureCanvasAgg
 
@@ -243,7 +243,7 @@
             if xdata == None:
                 self.canvas.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 self.canvas.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
         else:
             plot = pyx.graph.graphxy \
@@ -253,7 +253,7 @@
             if xdata == None:
                 plot.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 plot.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
             self.canvas.insert(plot)
         self.axes_drawn = True
@@ -495,7 +495,7 @@
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
         # Convert the colormap into a string
-        x = na.linspace(1,0,256)
+        x = np.linspace(1,0,256)
         cm_string = cm.cmap_d[name](x, bytes=True)[:,0:3].tostring()
 
         cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,7 +29,7 @@
     y_dict, \
     axis_names
 import _MPL
-import numpy as na
+import numpy as np
 import weakref
 
 class FixedResolutionBuffer(object):
@@ -352,7 +352,7 @@
         """
         import numdisplay
         numdisplay.open()
-        if take_log: data=na.log10(self[field])
+        if take_log: data=np.log10(self[field])
         else: data=self[field]
         numdisplay.display(data)    
 
@@ -374,7 +374,7 @@
     """
     def __getitem__(self, item):
         if item in self.data: return self.data[item]
-        indices = na.argsort(self.data_source['dx'])[::-1]
+        indices = np.argsort(self.data_source['dx'])[::-1]
         buff = _MPL.CPixelize( self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
                                self.data_source['px'],  self.data_source['py'],
                                self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -21,7 +21,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import types, os
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer, ObliqueFixedResolutionBuffer
@@ -163,7 +163,7 @@
         """
         self.xlim = (low[0], high[0])
         self.ylim = (low[1], high[1])
-        return na.log10(self.buffer)
+        return np.log10(self.buffer)
 
     def set_width(self, width):
         """
@@ -283,7 +283,7 @@
 
     def __call__(self, val):
         self.pylab.clf()
-        self.pylab.imshow(na.log10(val), interpolation='nearest')
+        self.pylab.imshow(np.log10(val), interpolation='nearest')
         self.pylab.savefig("wimage_%03i.png" % self.tile_id)
 
 class TransportAppender(object):
@@ -297,13 +297,13 @@
     def __call__(self, val):
         from yt.utilities.lib import write_png_to_string
         from yt.visualization.image_writer import map_to_colors
-        image = na.log10(val)
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        image = np.log10(val)
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
         image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
         to_plot = map_to_colors(image, "algae")
-        to_plot = na.clip(to_plot, 0, 255)
+        to_plot = np.clip(to_plot, 0, 255)
         s = write_png_to_string(to_plot)
         response_body = "data:image/png;base64," + base64.encodestring(s)
         tf.close()


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -23,7 +23,7 @@
 import types
 import imp
 import os
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import _colormap_data as cmd
@@ -44,7 +44,7 @@
 
         >>> image = scale_image(image, min=0, max=1000)
     """
-    if isinstance(image, na.ndarray) and image.dtype == na.uint8:
+    if isinstance(image, np.ndarray) and image.dtype == np.uint8:
         return image
     if isinstance(image, (types.TupleType, types.ListType)):
         image, mi, ma = image
@@ -52,7 +52,7 @@
         mi = image.min()
     if ma is None:
         ma = image.max()
-    image = (na.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
+    image = (np.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
     return image
 
 def multi_image_composite(fn, red_channel, blue_channel,
@@ -97,22 +97,22 @@
     Examples
     --------
 
-        >>> red_channel = na.log10(frb["Temperature"])
-        >>> blue_channel = na.log10(frb["Density"])
+        >>> red_channel = np.log10(frb["Temperature"])
+        >>> blue_channel = np.log10(frb["Density"])
         >>> multi_image_composite("multi_channel1.png", red_channel, blue_channel)
 
     """
     red_channel = scale_image(red_channel)
     blue_channel = scale_image(blue_channel)
     if green_channel is None:
-        green_channel = na.zeros(red_channel.shape, dtype='uint8')
+        green_channel = np.zeros(red_channel.shape, dtype='uint8')
     else:
         green_channel = scale_image(green_channel)
     if alpha_channel is None:
-        alpha_channel = na.zeros(red_channel.shape, dtype='uint8') + 255
+        alpha_channel = np.zeros(red_channel.shape, dtype='uint8') + 255
     else:
         alpha_channel = scale_image(alpha_channel) 
-    image = na.array([red_channel, green_channel, blue_channel, alpha_channel])
+    image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
@@ -141,16 +141,16 @@
         The upper limit to clip values to in the output, if converting to uint8.
         If `bitmap_array` is already uint8, this will be ignore.
     """
-    if bitmap_array.dtype != na.uint8:
+    if bitmap_array.dtype != np.uint8:
         if max_val is None: max_val = bitmap_array.max()
-        bitmap_array = na.clip(bitmap_array / max_val, 0.0, 1.0) * 255
+        bitmap_array = np.clip(bitmap_array / max_val, 0.0, 1.0) * 255
         bitmap_array = bitmap_array.astype("uint8")
     if len(bitmap_array.shape) != 3 or bitmap_array.shape[-1] not in (3,4):
         raise RuntimeError
     if bitmap_array.shape[-1] == 3:
         s1, s2 = bitmap_array.shape[:2]
-        alpha_channel = 255*na.ones((s1,s2,1), dtype='uint8')
-        bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
+        alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
+        bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
         for channel in range(bitmap_array.shape[2]):
             bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
@@ -229,14 +229,14 @@
     """
     image = func(image)
     if color_bounds is None:
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
     else:
         color_bounds = [func(c) for c in color_bounds]
     image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
     to_plot = map_to_colors(image, cmap_name)
-    to_plot = na.clip(to_plot, 0, 255)
+    to_plot = np.clip(to_plot, 0, 255)
     return to_plot
 
 def annotate_image(image, text, xpos, ypos, font_name = "Vera",
@@ -279,7 +279,7 @@
     >>> annotate_image(bitmap, "Hello!", 0, 100)
     >>> write_bitmap(bitmap, "saved.png")
     """
-    if len(image.shape) != 3 or image.dtype != na.uint8:
+    if len(image.shape) != 3 or image.dtype != np.uint8:
         raise RuntimeError("This routine requires a UINT8 bitmapped image.")
     font_path = os.path.join(imp.find_module("matplotlib")[1],
                              "mpl-data/fonts/ttf/",
@@ -295,10 +295,10 @@
         print "Your color map was not found in the extracted colormap file."
         raise KeyError(cmap_name)
     lut = cmd.color_map_luts[cmap_name]
-    x = na.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+    x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
     shape = buff.shape
-    mapped = na.dstack(
-            [(na.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
+    mapped = np.dstack(
+            [(np.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
     return mapped.copy("C")
 
 def strip_colormap_data(fn = "color_map_data.py",


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -26,7 +26,7 @@
 from matplotlib import figure
 import shutil
 import tempfile
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -71,7 +71,7 @@
 
     def add_image(self, fn, descr):
         self.image_metadata.append(descr)
-        self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+        self.images.append((os.path.basename(fn), np.fromfile(fn, dtype='c')))
 
 class PlotCollection(object):
     __id_counter = 0
@@ -122,7 +122,7 @@
         elif center == "center" or center == "c":
             self.c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         else:
-            self.c = na.array(center, dtype='float64')
+            self.c = np.array(center, dtype='float64')
         mylog.info("Created plot collection with default plot-center = %s",
                     list(self.c))
 
@@ -1884,7 +1884,7 @@
         norm = matplotlib.colors.Normalize()
     ax = pylab.figure().gca()
     ax.autoscale(False)
-    axi = ax.imshow(na.random.random((npix, npix)),
+    axi = ax.imshow(np.random.random((npix, npix)),
                     extent = extent, norm = norm,
                     origin = 'lower')
     cb = pylab.colorbar(axi, norm = norm)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -52,25 +52,25 @@
     def convert_to_plot(self, plot, coord, offset = True):
         # coord should be a 2 x ncoord array-like datatype.
         try:
-            ncoord = na.array(coord).shape[1]
+            ncoord = np.array(coord).shape[1]
         except IndexError:
             ncoord = 1
 
         # Convert the data and plot limits to tiled numpy arrays so that
         # convert_to_plot is automatically vectorized.
 
-        x0 = na.tile(plot.xlim[0],ncoord)
-        x1 = na.tile(plot.xlim[1],ncoord)
-        xx0 = na.tile(plot._axes.get_xlim()[0],ncoord)
-        xx1 = na.tile(plot._axes.get_xlim()[1],ncoord)
+        x0 = np.tile(plot.xlim[0],ncoord)
+        x1 = np.tile(plot.xlim[1],ncoord)
+        xx0 = np.tile(plot._axes.get_xlim()[0],ncoord)
+        xx1 = np.tile(plot._axes.get_xlim()[1],ncoord)
         
-        y0 = na.tile(plot.ylim[0],ncoord)
-        y1 = na.tile(plot.ylim[1],ncoord)
-        yy0 = na.tile(plot._axes.get_ylim()[0],ncoord)
-        yy1 = na.tile(plot._axes.get_ylim()[1],ncoord)
+        y0 = np.tile(plot.ylim[0],ncoord)
+        y1 = np.tile(plot.ylim[1],ncoord)
+        yy0 = np.tile(plot._axes.get_ylim()[0],ncoord)
+        yy1 = np.tile(plot._axes.get_ylim()[1],ncoord)
         
         # We need a special case for when we are only given one coordinate.
-        if na.array(coord).shape == (2,):
+        if np.array(coord).shape == (2,):
             return ((coord[0]-x0)/(x1-x0)*(xx1-xx0) + xx0,
                     (coord[1]-y0)/(y1-y0)*(yy1-yy0) + yy0)
         else:
@@ -195,10 +195,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = na.meshgrid(na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
+            nn = np.sqrt(pixX**2 + pixY**2)
             pixX /= nn
             pixY /= nn
         plot._axes.quiver(X,Y, pixX, pixY, scale=self.scale, scale_units=self.scale_units)
@@ -250,12 +250,12 @@
         #appropriate shift to the coppied field.  
 
         #set the cumulative arrays for the periodic shifting.
-        AllX = na.zeros(plot.data["px"].size, dtype='bool')
-        AllY = na.zeros(plot.data["py"].size, dtype='bool')
+        AllX = np.zeros(plot.data["px"].size, dtype='bool')
+        AllY = np.zeros(plot.data["py"].size, dtype='bool')
         XShifted = plot.data["px"].copy()
         YShifted = plot.data["py"].copy()
         dom_x, dom_y = plot._period
-        for shift in na.mgrid[-1:1:3j]:
+        for shift in np.mgrid[-1:1:3j]:
             xlim = ((plot.data["px"] + shift*dom_x >= x0)
                  &  (plot.data["px"] + shift*dom_x <= x1))
             ylim = ((plot.data["py"] + shift*dom_y >= y0)
@@ -269,24 +269,24 @@
         wI = (AllX & AllY)
 
         # We want xi, yi in plot coordinates
-        xi, yi = na.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
+        xi, yi = np.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
                           yy0:yy1:numPoints_y/(self.factor*1j)]
 
         # This converts XShifted and YShifted into plot coordinates
         x = (XShifted[wI]-x0)*dx + xx0
         y = (YShifted[wI]-y0)*dy + yy0
         z = plot.data[self.field][wI]
-        if plot.pf.field_info[self.field].take_log: z=na.log10(z)
+        if plot.pf.field_info[self.field].take_log: z=np.log10(z)
 
         # Both the input and output from the triangulator are in plot
         # coordinates
         zi = self.triang(x,y).nn_interpolator(z)(xi,yi)
         
         if plot.pf.field_info[self.field].take_log and self.clim is not None: 
-            self.clim = (na.log10(self.clim[0]), na.log10(self.clim[1]))
+            self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = na.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
         plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -324,9 +324,9 @@
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
         if self.periodic:
-            pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+            pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
         else:
-            pxs, pys = na.mgrid[0:0:1j,0:0:1j]
+            pxs, pys = np.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
@@ -339,7 +339,7 @@
             visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
                        ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
             if visible.nonzero()[0].size == 0: continue
-            verts = na.array(
+            verts = np.array(
                 [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
                  (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
@@ -352,8 +352,8 @@
             if self.draw_ids:
                 visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
                                ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
-                active_ids = na.unique(plot.data['GridIndices'])
-                for i in na.where(visible_ids)[0]:
+                active_ids = np.unique(plot.data['GridIndices'])
+                for i in np.where(visible_ids)[0]:
                     plot._axes.text(
                         left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
                         left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
@@ -418,18 +418,18 @@
                              plot.data[self.field_y],
                              int(nx), int(ny),
                            (x0, x1, y0, y1),)
-        r0 = na.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
+        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
                       self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = na.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
+        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
         lines[0,:,:,:] = r0
-        mag = na.sqrt(pixX**2 + pixY**2)
-        scale = na.sqrt(nx*ny) / (self.factor * mag.mean())
+        mag = np.sqrt(pixX**2 + pixY**2)
+        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
         dt = 1.0 / (self.nsample-1)
         for i in range(1,self.nsample):
             xt = lines[i-1,0,:,:]
             yt = lines[i-1,1,:,:]
-            ix = na.maximum(na.minimum((xt).astype('int'), nx-1), 0)
-            iy = na.maximum(na.minimum((yt).astype('int'), ny-1), 0)
+            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
+            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
         for i in range(self.data_size[0]):
@@ -517,18 +517,18 @@
         max_dx = plot.data['pdx'].max()
         w_min_x = 250.0 * min_dx
         w_max_x = 1.0 / self.factor
-        min_exp_x = na.ceil(na.log10(w_min_x*plot.data.pf[self.unit])
-                           /na.log10(self.factor))
-        max_exp_x = na.floor(na.log10(w_max_x*plot.data.pf[self.unit])
-                            /na.log10(self.factor))
+        min_exp_x = np.ceil(np.log10(w_min_x*plot.data.pf[self.unit])
+                           /np.log10(self.factor))
+        max_exp_x = np.floor(np.log10(w_max_x*plot.data.pf[self.unit])
+                            /np.log10(self.factor))
         n_x = max_exp_x - min_exp_x + 1
-        widths = na.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
+        widths = np.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
         widths /= plot.data.pf[self.unit]
         left_edge_px = (center[xi] - widths/2.0 - x0)*dx
         left_edge_py = (center[yi] - widths/2.0 - y0)*dy
         right_edge_px = (center[xi] + widths/2.0 - x0)*dx
         right_edge_py = (center[yi] + widths/2.0 - y0)*dy
-        verts = na.array(
+        verts = np.array(
                 [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                  (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
         visible =  ( right_edge_px - left_edge_px > 25 ) & \
@@ -635,7 +635,7 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0] / self.factor
         ny = plot.image._A.shape[1] / self.factor
-        indices = na.argsort(plot.data['dx'])[::-1]
+        indices = np.argsort(plot.data['dx'])[::-1]
         pixX = _MPL.CPixelize( plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
@@ -650,8 +650,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = na.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = na.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
+        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -687,7 +687,7 @@
         DomainWidth = DomainRight - DomainLeft
         
         nx, ny = plot.image._A.shape
-        buff = na.zeros((nx,ny),dtype='float64')
+        buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
@@ -701,7 +701,7 @@
                                  clump['dx']*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
-            buff = na.maximum(temp, buff)
+            buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
                                      **self.plot_args)
         plot._axes.hold(False)
@@ -845,7 +845,7 @@
             if size < self.min_size or size > self.max_size: continue
             # This could use halo.maximum_radius() instead of width
             if self.width is not None and \
-                na.abs(halo.center_of_mass() - 
+                np.abs(halo.center_of_mass() - 
                        plot.data.center)[plot.data.axis] > \
                    self.width:
                 continue
@@ -1093,8 +1093,8 @@
         LE[zax] = data.center[zax] - self.width*0.5
         RE[zax] = data.center[zax] + self.width*0.5
         if self.region is not None \
-            and na.all(self.region.left_edge <= LE) \
-            and na.all(self.region.right_edge >= RE):
+            and np.all(self.region.left_edge <= LE) \
+            and np.all(self.region.right_edge >= RE):
             return self.region
         self.region = data.pf.h.periodic_region(
             data.center, LE, RE)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -183,21 +183,21 @@
         if (zmin in (None,'min')) or (zmax in (None,'max')):    
             imbuff = self._axes.images[-1]._A
             if zmin == 'min':
-                zmin = na.nanmin(imbuff[na.nonzero(imbuff)])
+                zmin = np.nanmin(imbuff[np.nonzero(imbuff)])
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(imbuff))
+                    zmax = min(zmin*10**(dex),np.nanmax(imbuff))
             if zmax == 'max':
-                zmax = na.nanmax(imbuff)
+                zmax = np.nanmax(imbuff)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(imbuff))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(imbuff))
         if self.colorbar is not None:
             if ticks is not None:
-                ticks = na.sort(ticks)
+                ticks = np.sort(ticks)
                 self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                 self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
             elif minmaxtick:
                 if self.log_field: 
-                    ticks = na.array(self.colorbar._ticker()[1],dtype='float')
+                    ticks = np.array(self.colorbar._ticker()[1],dtype='float')
                     ticks = [zmin] + ticks.tolist() + [zmax]
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
@@ -205,11 +205,11 @@
                     mylog.error('Sorry, we do not support minmaxtick for linear fields.  It likely comes close by default')
             elif nticks is not None:
                 if self.log_field:
-                    lin = na.linspace(na.log10(zmin),na.log10(zmax),nticks)
+                    lin = np.linspace(np.log10(zmin),np.log10(zmax),nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(10**lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (10**x) for x in lin])
                 else: 
-                    lin = na.linspace(zmin,zmax,nticks)
+                    lin = np.linspace(zmin,zmax,nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % x for x in lin])
 
@@ -218,7 +218,7 @@
                     self.colorbar.locator = self._old_locator
                 if hasattr(self,'_old_formatter'):
                     self.colorbar.formatter = self._old_formatter
-        self.norm.autoscale(na.array([zmin,zmax], dtype='float64'))
+        self.norm.autoscale(np.array([zmin,zmax], dtype='float64'))
         self.image.changed()
         if self.colorbar is not None:
             mpl_notify(self.image, self.colorbar)
@@ -343,7 +343,7 @@
             self.colorbar.formatter = ttype()
 
     def __init_temp_image(self, setup_colorbar):
-        temparray = na.ones(self.size)
+        temparray = np.ones(self.size)
         self.image = \
             self._axes.imshow(temparray, interpolation='nearest',
                              norm = self.norm, aspect=1.0, picker=True,
@@ -394,20 +394,20 @@
         if self[self.axis_names["Z"]].size == 0:
             raise YTNoDataInObjectError(self.data)
         mylog.debug("Received buffer of min %s and max %s (data: %s %s)",
-                    na.nanmin(buff), na.nanmax(buff),
+                    np.nanmin(buff), np.nanmax(buff),
                     self[self.axis_names["Z"]].min(),
                     self[self.axis_names["Z"]].max())
         if self.log_field:
-            bI = na.where(buff > 0)
+            bI = np.where(buff > 0)
             if len(bI[0]) == 0:
                 newmin = 1e-99
                 newmax = 1e-99
             else:
-                newmin = na.nanmin(buff[bI])
-                newmax = na.nanmax(buff[bI])
+                newmin = np.nanmin(buff[bI])
+                newmax = np.nanmax(buff[bI])
         else:
-            newmin = na.nanmin(buff)
-            newmax = na.nanmax(buff)
+            newmin = np.nanmin(buff)
+            newmax = np.nanmax(buff)
         aspect = (self.ylim[1]-self.ylim[0])/(self.xlim[1]-self.xlim[0])
         if self.image._A.size != buff.size:
             self._axes.clear()
@@ -418,7 +418,7 @@
             self.image.set_data(buff)
         if self._axes.get_aspect() != aspect: self._axes.set_aspect(aspect)
         if self.do_autoscale:
-            self.norm.autoscale(na.array((newmin,newmax), dtype='float64'))
+            self.norm.autoscale(np.array((newmin,newmax), dtype='float64'))
         self._reset_image_parameters()
         self._run_callbacks()
 
@@ -476,8 +476,8 @@
         self._redraw_image()
 
     def autoscale(self):
-        zmin = na.nanmin(self._axes.images[-1]._A)
-        zmax = na.nanmax(self._axes.images[-1]._A)
+        zmin = np.nanmin(self._axes.images[-1]._A)
+        zmax = np.nanmax(self._axes.images[-1]._A)
         self.set_zlim(zmin, zmax)
 
     def switch_y(self, *args, **kwargs):
@@ -558,16 +558,16 @@
         numPoints_y = int(width)
         dx = numPoints_x / (x1-x0)
         dy = numPoints_y / (y1-y0)
-        xlim = na.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
+        xlim = np.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
                               self.data["px"]-2.0*self.data['pdx'] <= x1)
-        ylim = na.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
+        ylim = np.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
                               self.data["py"]-2.0*self.data['pdy'] <= y1)
-        wI = na.where(na.logical_and(xlim,ylim))
-        xi, yi = na.mgrid[0:numPoints_x, 0:numPoints_y]
+        wI = np.where(np.logical_and(xlim,ylim))
+        xi, yi = np.mgrid[0:numPoints_x, 0:numPoints_y]
         x = (self.data["px"][wI]-x0)*dx
         y = (self.data["py"][wI]-y0)*dy
         z = self.data[self.axis_names["Z"]][wI]
-        if self.log_field: z=na.log10(z)
+        if self.log_field: z=np.log10(z)
         buff = de.Triangulation(x,y).nn_interpolator(z)(xi,yi)
         buff = buff.clip(z.min(), z.max())
         if self.log_field: buff = 10**buff
@@ -603,7 +603,7 @@
         else:
             height = width
         self.pix = (width,height)
-        indices = na.argsort(self.data['dx'])[::-1]
+        indices = np.argsort(self.data['dx'])[::-1]
         buff = _MPL.CPixelize( self.data['x'], self.data['y'], self.data['z'],
                                self.data['px'], self.data['py'],
                                self.data['pdx'], self.data['pdy'], self.data['pdz'],
@@ -756,7 +756,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)
@@ -823,7 +823,7 @@
             cb(self)
 
     def __init_colorbar(self):
-        temparray = na.ones((self.x_bins.size, self.y_bins.size))
+        temparray = np.ones((self.x_bins.size, self.y_bins.size))
         self.norm = matplotlib.colors.Normalize()
         self.image = self._axes.pcolormesh(self.x_bins, self.y_bins,
                                       temparray, shading='flat',
@@ -858,13 +858,13 @@
         #self._redraw_image()
         if (zmin is None) or (zmax is None):    
             if zmin == 'min':
-                zmin = na.nanmin(self._axes.images[-1]._A)
+                zmin = np.nanmin(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(self._axes.images[-1]._A))
+                    zmax = min(zmin*10**(dex),np.nanmax(self._axes.images[-1]._A))
             if zmax == 'max':
-                zmax = na.nanmax(self._axes.images[-1]._A)
+                zmax = np.nanmax(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(self._axes.images[-1]._A))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(self._axes.images[-1]._A))
         self._zlim = (zmin, zmax)
 
     def set_log_field(self, val):
@@ -883,8 +883,8 @@
     def _redraw_image(self):
         vals = self.data[self.fields[2]].transpose()
         used_bin = self.data["UsedBins"].transpose()
-        vmin = na.nanmin(vals[used_bin])
-        vmax = na.nanmax(vals[used_bin])
+        vmin = np.nanmin(vals[used_bin])
+        vmax = np.nanmax(vals[used_bin])
         if self._zlim is not None: vmin, vmax = self._zlim
         if self._log_z:
             # We want smallest non-zero vmin
@@ -892,10 +892,10 @@
                                                 clip=False)
             self.ticker = matplotlib.ticker.LogLocator()
             if self._zlim is None:
-                vI = na.where(vals > 0)
+                vI = np.where(vals > 0)
                 vmin = vals[vI].min()
                 vmax = vals[vI].max()
-            self.norm.autoscale(na.array((vmin,vmax), dtype='float64'))
+            self.norm.autoscale(np.array((vmin,vmax), dtype='float64'))
         else:
             self.norm=matplotlib.colors.Normalize(vmin=vmin, vmax=vmax,
                                                   clip=False)
@@ -979,7 +979,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -31,7 +31,7 @@
 import __builtin__
 from functools import wraps
 
-import numpy as na
+import numpy as np
 from ._mpl_imports import *
 from .color_maps import yt_colormaps, is_colormap
 from .image_writer import \
@@ -120,7 +120,7 @@
             ticks = []
         return ticks
 
-log_transform = FieldTransform('log10', na.log10, LogLocator())
+log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
 def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
@@ -162,7 +162,7 @@
     if not iterable(width):
         width = (width, width)
     Wx, Wy = width
-    width = na.array((Wx/pf[unit], Wy/pf[unit]))
+    width = np.array((Wx/pf[unit], Wy/pf[unit]))
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -172,11 +172,11 @@
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
     # Transforming to the cutting plane coordinate system
-    center = na.array(center)
+    center = np.array(center)
     center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
     (normal,perp1,perp2) = ortho_find(normal)
-    mat = na.transpose(na.column_stack((perp1,perp2,normal)))
-    center = na.dot(mat,center)
+    mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+    center = np.dot(mat,center)
     width = width/pf.domain_width.min()
 
     bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
@@ -1101,7 +1101,7 @@
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
             x_width = self.xlim[1] - self.xlim[0]
-            zoom_fac = na.log10(x_width*self.pf['unitary'])/na.log10(min_zoom)
+            zoom_fac = np.log10(x_width*self.pf['unitary'])/np.log10(min_zoom)
             zoom_fac = 100.0*max(0.0, zoom_fac)
             ticks = self.get_ticks(field)
             payload = {'type':'png_string',
@@ -1145,12 +1145,12 @@
 
         raw_data = self._frb.data_source
         b = self._frb.bounds
-        xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
+        xi, yi = np.mgrid[b[0]:b[1]:(vi / 8) * 1j,
                           b[2]:b[3]:(vj / 8) * 1j]
         x = raw_data['px']
         y = raw_data['py']
         z = raw_data[field]
-        if logit: z = na.log10(z)
+        if logit: z = np.log10(z)
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         ax.contour(fvals, number, colors='w')
@@ -1169,8 +1169,8 @@
         fy = "%s-velocity" % (axis_names[y_dict[axis]])
         px = new_frb[fx][::-1,:]
         py = new_frb[fy][::-1,:]
-        x = na.mgrid[0:vi-1:ny*1j]
-        y = na.mgrid[0:vj-1:nx*1j]
+        x = np.mgrid[0:vi-1:ny*1j]
+        y = np.mgrid[0:vj-1:nx*1j]
         # Always normalize, then we scale
         nn = ((px**2.0 + py**2.0)**0.5).max()
         px /= nn
@@ -1194,7 +1194,7 @@
     def _get_cbar_image(self, height = 400, width = 40, field = None):
         if field is None: field = self._current_field
         cmap_name = self._colormaps[field]
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals, cmap_name = cmap_name)
         pngs = write_png_to_string(to_plot)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -27,7 +27,7 @@
 import types
 
 from functools import wraps
-import numpy as na
+import numpy as np
 
 from .image_writer import \
     write_image, apply_colormap
@@ -129,19 +129,19 @@
         use_mesh = False
         xmi, xma = self.x_spec.bounds
         if self.x_spec.scale == 'log':
-            x_bins = na.logspace(na.log10(xmi), na.log10(xma),
+            x_bins = np.logspace(np.log10(xmi), np.log10(xma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            x_bins = na.logspace(xmi, xma, self.image.shape[0]+1)
+            x_bins = np.logspace(xmi, xma, self.image.shape[0]+1)
 
         ymi, yma = self.y_spec.bounds
         if self.y_spec.scale == 'log':
-            y_bins = na.logspace(na.log10(ymi), na.log10(yma),
+            y_bins = np.logspace(np.log10(ymi), np.log10(yma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            y_bins = na.logspace(ymi, yma, self.image.shape[0]+1)
+            y_bins = np.logspace(ymi, yma, self.image.shape[0]+1)
 
         im = self.image
         if self.cbar.scale == 'log':
@@ -338,11 +338,11 @@
         raw_data = self.plot.image[::-1,:]
 
         if self.plot.cbar.scale == 'log':
-            func = na.log10
+            func = np.log10
         else:
             func = lambda a: a
-        raw_data = na.repeat(raw_data, 3, axis=0)
-        raw_data = na.repeat(raw_data, 3, axis=1)
+        raw_data = np.repeat(raw_data, 3, axis=0)
+        raw_data = np.repeat(raw_data, 3, axis=1)
         to_plot = apply_colormap(raw_data, self.plot.cbar.bounds,
                                  self.plot.cbar.cmap, func)
         if self.plot.cbar.scale == 'log':
@@ -369,7 +369,7 @@
 
     def _convert_axis(self, spec):
         func = lambda a: a
-        if spec.scale == 'log': func = na.log10
+        if spec.scale == 'log': func = np.log10
         tick_info = self._convert_ticks(spec.ticks, spec.bounds, func)
         ax = {'ticks':tick_info,
               'title': spec.title}
@@ -378,7 +378,7 @@
     def _get_cbar_image(self, height = 400, width = 40):
         # Right now there's just the single 'cmap', but that will eventually
         # change.  I think?
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals)
         pngs = write_png_to_string(to_plot)


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_passthrough
@@ -61,7 +61,7 @@
         Default: minimum dx
     length : float, optional
         Optionally specify the length of integration.  
-        Default: na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        Default: np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
     direction : real, optional
         Specifies the direction of integration.  The magnitude of this
         value has no effect, only the sign.
@@ -77,10 +77,10 @@
     >>> from yt.visualization.api import Streamlines
     >>> pf = load('DD1701') # Load pf
 
-    >>> c = na.array([0.5]*3)
+    >>> c = np.array([0.5]*3)
     >>> N = 100
     >>> scale = 1.0
-    >>> pos_dx = na.random.random((N,3))*scale-scale/2.
+    >>> pos_dx = np.random.random((N,3))*scale-scale/2.
     >>> pos = c+pos_dx
     
     >>> streamlines = Streamlines(pf,pos,'x-velocity', 'y-velocity', 'z-velocity', length=1.0) 
@@ -91,7 +91,7 @@
     >>> fig=pl.figure() 
     >>> ax = Axes3D(fig)
     >>> for stream in streamlines.streamlines:
-    >>>     stream = stream[na.all(stream != 0.0, axis=1)]
+    >>>     stream = stream[np.all(stream != 0.0, axis=1)]
     >>>     ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
     >>> pl.savefig('streamlines.png')
     """
@@ -101,13 +101,13 @@
                  get_magnitude=False):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.start_positions = na.array(positions)
+        self.start_positions = np.array(positions)
         self.N = self.start_positions.shape[0]
         self.xfield = xfield
         self.yfield = yfield
         self.zfield = zfield
         self.get_magnitude=get_magnitude
-        self.direction = na.sign(direction)
+        self.direction = np.sign(direction)
         if volume is None:
             volume = AMRKDTree(self.pf, fields=[self.xfield,self.yfield,self.zfield],
                             log_fields=[False,False,False], merge_trees=True)
@@ -116,13 +116,13 @@
             dx = self.pf.h.get_smallest_dx()
         self.dx = dx
         if length is None:
-            length = na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+            length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
         self.steps = int(length/dx)
-        self.streamlines = na.zeros((self.N,self.steps,3), dtype='float64')
+        self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
-            self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
+            self.magnitudes = np.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
         nprocs = self.comm.size
@@ -161,21 +161,21 @@
                 brick.integrate_streamline(stream[-step+1], self.direction*self.dx, marr)
                 mag[-step+1] = marr[0]
                 
-            if na.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
-                   na.any(stream[-step+1,:] >= self.pf.domain_right_edge):
+            if np.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
+                   np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if na.any(stream[-step+1,:] < node.l_corner) | \
-                   na.any(stream[-step+1,:] >= node.r_corner):
+            if np.any(stream[-step+1,:] < node.l_corner) | \
+                   np.any(stream[-step+1,:] >= node.r_corner):
                 return step-1
             step -= 1
         return step
 
     def clean_streamlines(self):
-        temp = na.empty(self.N, dtype='object')
-        temp2 = na.empty(self.N, dtype='object')
+        temp = np.empty(self.N, dtype='object')
+        temp2 = np.empty(self.N, dtype='object')
         for i,stream in enumerate(self.streamlines):
-            mask = na.all(stream != 0.0, axis=1)
+            mask = np.all(stream != 0.0, axis=1)
             temp[i] = stream[mask]
             temp2[i] = self.magnitudes[i,mask]
         self.streamlines = temp


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/tick_locators.py
--- a/yt/visualization/tick_locators.py
+++ b/yt/visualization/tick_locators.py
@@ -5,7 +5,7 @@
 ##
 
 import math
-import numpy as na
+import numpy as np
 
 def is_decade(x,base=10):
     if x == 0.0:
@@ -40,7 +40,7 @@
         if subs is None:
             self._subs = None  # autosub
         else:
-            self._subs = na.asarray(subs)+0.0
+            self._subs = np.asarray(subs)+0.0
 
     def _set_numticks(self):
         self.numticks = 15  # todo; be smart here; this is just for dev
@@ -62,9 +62,9 @@
         numdec = math.floor(vmax)-math.ceil(vmin)
 
         if self._subs is None: # autosub
-            if numdec>10: subs = na.array([1.0])
-            elif numdec>6: subs = na.arange(2.0, b, 2.0)
-            else: subs = na.arange(2.0, b)
+            if numdec>10: subs = np.array([1.0])
+            elif numdec>6: subs = np.arange(2.0, b, 2.0)
+            else: subs = np.arange(2.0, b)
         else:
             subs = self._subs
 
@@ -72,7 +72,7 @@
         while numdec/stride+1 > self.numticks:
             stride += 1
 
-        decades = na.arange(math.floor(vmin),
+        decades = np.arange(math.floor(vmin),
                              math.ceil(vmax)+stride, stride)
         if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
             ticklocs = []
@@ -81,7 +81,7 @@
         else:
             ticklocs = b**decades
 
-        return na.array(ticklocs)
+        return np.array(ticklocs)
 
 
 class LinearLocator(object):
@@ -122,7 +122,7 @@
 
 
         if self.numticks==0: return []
-        ticklocs = na.linspace(vmin, vmax, self.numticks)
+        ticklocs = np.linspace(vmin, vmax, self.numticks)
 
         #return self.raise_if_exceeds(ticklocs)
         return ticklocs


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/CUDARayCast.py
--- a/yt/visualization/volume_rendering/CUDARayCast.py
+++ b/yt/visualization/volume_rendering/CUDARayCast.py
@@ -27,7 +27,7 @@
 
 from yt.mods import *
 import yt.extensions.HierarchySubset as hs
-import numpy as na
+import numpy as np
 import h5py, time
 
 import matplotlib;matplotlib.use("Agg");import pylab
@@ -62,7 +62,7 @@
 
     print "Constructing transfer function."
     if "Data" in fn:
-        mh = na.log10(1.67e-24)
+        mh = np.log10(1.67e-24)
         tf = ColorTransferFunction((7.5+mh, 14.0+mh))
         tf.add_gaussian( 8.25+mh, 0.002, [0.2, 0.2, 0.4, 0.1])
         tf.add_gaussian( 9.75+mh, 0.002, [0.0, 0.0, 0.3, 0.1])
@@ -77,17 +77,17 @@
         tf.add_gaussian(-28.5, 0.05, [1.0, 1.0, 1.0, 1.0])
     else: raise RuntimeError
 
-    cpu['ngrids'] = na.array([cpu['dims'].shape[0]], dtype='int32')
+    cpu['ngrids'] = np.array([cpu['dims'].shape[0]], dtype='int32')
     cpu['tf_r'] = tf.red.y.astype("float32")
     cpu['tf_g'] = tf.green.y.astype("float32")
     cpu['tf_b'] = tf.blue.y.astype("float32")
     cpu['tf_a'] = tf.alpha.y.astype("float32")
 
-    cpu['tf_bounds'] = na.array(tf.x_bounds, dtype='float32')
+    cpu['tf_bounds'] = np.array(tf.x_bounds, dtype='float32')
 
-    cpu['v_dir'] = na.array([0.3, 0.5, 0.6], dtype='float32')
+    cpu['v_dir'] = np.array([0.3, 0.5, 0.6], dtype='float32')
 
-    c = na.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
+    c = np.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
 
     print "Getting cutting plane."
     cp = pf.h.cutting(cpu['v_dir'], c)
@@ -98,16 +98,16 @@
     back_c = c - cp._norm_vec * W
     front_c = c + cp._norm_vec * W
 
-    px, py = na.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
+    px, py = np.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
     xv = cp._inv_mat[0,0]*px + cp._inv_mat[0,1]*py + cp.center[0]
     yv = cp._inv_mat[1,0]*px + cp._inv_mat[1,1]*py + cp.center[1]
     zv = cp._inv_mat[2,0]*px + cp._inv_mat[2,1]*py + cp.center[2]
-    cpu['v_pos'] = na.array([xv, yv, zv], dtype='float32').transpose()
+    cpu['v_pos'] = np.array([xv, yv, zv], dtype='float32').transpose()
 
-    cpu['image_r'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_g'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_b'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_a'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_r'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_g'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_b'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_a'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
 
     print "Generating module"
     source = open("yt/extensions/volume_rendering/_cuda_caster.cu").read()
@@ -161,7 +161,7 @@
         pylab.imshow(image[-1], interpolation='nearest')
         pylab.savefig("/u/ki/mturk/public_html/vr6/%s.png" % (ii))
 
-    image = na.array(image).transpose()
+    image = np.array(image).transpose()
     image = (image - mi) / (ma - mi)
     pylab.clf()
     pylab.imshow(image, interpolation='nearest')


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/UBVRI.py
--- a/yt/visualization/volume_rendering/UBVRI.py
+++ b/yt/visualization/volume_rendering/UBVRI.py
@@ -24,21 +24,21 @@
 """
 
 
-import numpy as na
+import numpy as np
 
 johnson_filters = dict(
     B = dict(
-      wavelen = na.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
+      wavelen = np.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550, 4600,
         4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000, 5050, 5100, 5150, 5200,
         5250, 5300, 5350, 5400, 5450, 5500, 5550], dtype='float64'),
-      trans = na.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
+      trans = np.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
         0.95, 0.98, 0.99, 1.0, 0.99, 0.98, 0.96, 0.94, 0.91, 0.87, 0.83, 0.79,
         0.74, 0.69, 0.63, 0.58, 0.52, 0.46, 0.41, 0.36, 0.3, 0.25, 0.2, 0.15,
         0.12, 0.09, 0.06, 0.04, 0.02, 0.01, 0.0, ], dtype='float64'),
       ),
     I = dict(
-      wavelen = na.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
+      wavelen = np.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
         7150, 7200, 7250, 7300, 7350, 7400, 7450, 7500, 7550, 7600, 7650, 7700,
         7750, 7800, 7850, 7900, 7950, 8000, 8050, 8100, 8150, 8200, 8250, 8300,
         8350, 8400, 8450, 8500, 8550, 8600, 8650, 8700, 8750, 8800, 8850, 8900,
@@ -48,7 +48,7 @@
         10600, 10650, 10700, 10750, 10800, 10850, 10900, 10950, 11000, 11050,
         11100, 11150, 11200, 11250, 11300, 11350, 11400, 11450, 11500, 11550,
         11600, 11650, 11700, 11750, 11800, 11850, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
         0.21, 0.26, 0.3, 0.36, 0.4, 0.44, 0.49, 0.56, 0.6, 0.65, 0.72, 0.76,
         0.84, 0.9, 0.93, 0.96, 0.97, 0.97, 0.98, 0.98, 0.99, 0.99, 0.99, 0.99,
         1.0, 1.0, 1.0, 1.0, 1.0, 0.99, 0.98, 0.98, 0.97, 0.96, 0.94, 0.93, 0.9,
@@ -59,7 +59,7 @@
         0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     R = dict(
-      wavelen = na.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
+      wavelen = np.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, 7400,
@@ -67,7 +67,7 @@
         8050, 8100, 8150, 8200, 8250, 8300, 8350, 8400, 8450, 8500, 8550, 8600,
         8650, 8700, 8750, 8800, 8850, 8900, 8950, 9000, 9050, 9100, 9150, 9200,
         9250, 9300, 9350, 9400, 9450, 9500, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
+      trans = np.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
         0.34, 0.4, 0.46, 0.5, 0.55, 0.6, 0.64, 0.69, 0.71, 0.74, 0.77, 0.79,
         0.81, 0.84, 0.86, 0.88, 0.9, 0.91, 0.92, 0.94, 0.95, 0.96, 0.97, 0.98,
         0.99, 0.99, 1.0, 1.0, 0.99, 0.98, 0.96, 0.94, 0.92, 0.9, 0.88, 0.85,
@@ -77,20 +77,20 @@
         0.02, 0.01, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     U = dict(
-      wavelen = na.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
+      wavelen = np.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
         3450, 3500, 3550, 3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
+      trans = np.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
         0.95, 0.97, 0.99, 1.0, 0.99, 0.97, 0.92, 0.73, 0.56, 0.36, 0.23, 0.05,
         0.03, 0.01, 0.0, ], dtype='float64'),),
     V = dict(
-      wavelen = na.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
+      wavelen = np.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
         5050, 5100, 5150, 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, ],
           dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
         0.67, 0.78, 0.85, 0.91, 0.94, 0.96, 0.98, 0.98, 0.95, 0.87, 0.79, 0.72,
         0.71, 0.69, 0.65, 0.62, 0.58, 0.52, 0.46, 0.4, 0.34, 0.29, 0.24, 0.2,
         0.17, 0.14, 0.11, 0.08, 0.06, 0.05, 0.03, 0.02, 0.02, 0.01, 0.01, 0.01,
@@ -102,4 +102,4 @@
 for filter, vals in johnson_filters.items():
     wavelen = vals["wavelen"]
     trans = vals["trans"]
-    vals["Lchar"] = wavelen[na.argmax(trans)]
+    vals["Lchar"] = wavelen[np.argmax(trans)]


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -24,7 +24,7 @@
 """
 
 import __builtin__
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import *
@@ -167,12 +167,12 @@
         >>> pf = EnzoStaticOutput('DD1701') # Load pf
         >>> c = [0.5]*3 # Center
         >>> L = [1.0,1.0,1.0] # Viewpoint
-        >>> W = na.sqrt(3) # Width
+        >>> W = np.sqrt(3) # Width
         >>> N = 1024 # Pixels (1024^2)
 
         # Get density min, max
         >>> mi, ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi, ma = na.log10(mi), na.log10(ma)
+        >>> mi, ma = np.log10(mi), np.log10(ma)
 
         # Construct transfer function
         >>> tf = vr.ColorTransferFunction((mi-2, ma+2))
@@ -226,10 +226,10 @@
     def _setup_box_properties(self, width, center, unit_vectors):
         self.width = width
         self.center = center
-        self.box_vectors = na.array([unit_vectors[0]*width[0],
+        self.box_vectors = np.array([unit_vectors[0]*width[0],
                                      unit_vectors[1]*width[1],
                                      unit_vectors[2]*width[2]])
-        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.origin = center - 0.5*np.dot(width,unit_vectors)
         self.back_center =  center - 0.5*width[2]*unit_vectors[2]
         self.front_center = center + 0.5*width[2]*unit_vectors[2]         
 
@@ -289,23 +289,23 @@
                                          north_vector = north_vector)
         self._setup_box_properties(width, self.center, self.orienter.unit_vectors)
     def new_image(self):
-        image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
+        image = np.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.transfer_function, self.sub_samples)
+                np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
     def get_sampler(self, args):
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
-            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = np.empty(3,dtype='float64')
             temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
                     self.light_dir[1] * self.orienter.unit_vectors[2] + \
                     self.light_dir[2] * self.orienter.unit_vectors[0]
@@ -326,13 +326,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
 
         view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
@@ -510,30 +510,30 @@
         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
         ...     iw.write_bitmap(snapshot, "move_%04i.png" % i)
         """
-        self.center = na.array(self.center)
+        self.center = np.array(self.center)
         dW = None
         if exponential:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back 
                 if (self.center == 0.0).all():
-                    self.center += (na.array(final) - self.center) / (10. * n_steps)
-                final_zoom = final_width/na.array(self.width)
+                    self.center += (np.array(final) - self.center) / (10. * n_steps)
+                final_zoom = final_width/np.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
             else:
-                dW = na.array([1.0,1.0,1.0])
-            position_diff = (na.array(final)/self.center)*1.0
+                dW = np.array([1.0,1.0,1.0])
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back
-                dW = (1.0*final_width-na.array(self.width))/n_steps
+                dW = (1.0*final_width-np.array(self.width))/n_steps
             else:
-                dW = na.array([0.0,0.0,0.0])
-            dx = (na.array(final)-self.center)*1.0/n_steps
+                dW = np.array([0.0,0.0,0.0])
+            dx = (np.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.switch_view(center=self.center*dx, width=self.width*dW)
@@ -559,7 +559,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.rotation_vector
@@ -568,7 +568,7 @@
 
         normal_vector = self.front_center-self.center
 
-        self.switch_view(normal_vector=na.dot(R,normal_vector))
+        self.switch_view(normal_vector=np.dot(R,normal_vector))
 
     def roll(self, theta):
         r"""Roll by a given angle
@@ -583,12 +583,12 @@
         Examples
         --------
 
-        >>> cam.roll(na.pi/4)
+        >>> cam.roll(np.pi/4)
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
         north_vector = self.orienter.north_vector
-        self.switch_view(north_vector=na.dot(R, north_vector))
+        self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
         r"""Loop over rotate, creating a rotation
@@ -613,7 +613,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -676,12 +676,12 @@
         self.front_center += self.expand_factor*dl
         self.back_center -= dl
 
-        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+        px = np.linspace(-self.width[0]/2.0, self.width[0]/2.0,
                          self.resolution[0])[:,None]
-        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+        py = np.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
         inv_mat = self.orienter.inv_mat
-        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+        positions = np.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
@@ -693,14 +693,14 @@
         positions = self.front_center - 1.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
         vectors = (self.front_center - positions)
 
-        uv = na.ones(3, dtype='float64')
+        uv = np.ones(3, dtype='float64')
         image.shape = (self.resolution[0]**2,1,3)
         vectors.shape = (self.resolution[0]**2,1,3)
         positions.shape = (self.resolution[0]**2,1,3)
         args = (positions, vectors, self.back_center, 
                 (0.0,1.0,0.0,1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'), 
+                np.zeros(3, dtype='float64'), 
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -708,7 +708,7 @@
         image.shape = self.resolution[0], self.resolution[0], 3
 
 def corners(left_edge, right_edge):
-    return na.array([
+    return np.array([
       [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
@@ -726,7 +726,7 @@
                  pf = None, use_kd=True, no_ghost=False, use_light=False):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.nside = nside
         self.use_kd = use_kd
@@ -747,20 +747,20 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
+        image = np.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
         nv = 12 * self.nside ** 2
-        vs = arr_pix2vec_nest(self.nside, na.arange(nv))
+        vs = arr_pix2vec_nest(self.nside, np.arange(nv))
         vs *= self.radius
         vs.shape = nv, 1, 3
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nv, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nv, 1, 3), dtype='float64') * self.center
         args = (positions, vs, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
  
@@ -771,13 +771,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -823,14 +823,14 @@
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg
-            phi, theta = na.mgrid[0.0:2*na.pi:800j, 0:na.pi:800j]
+            phi, theta = np.mgrid[0.0:2*np.pi:800j, 0:np.pi:800j]
             pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
             image *= self.radius * self.pf['cm']
-            img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+            img = np.log10(image[:,0,0][pixi]).reshape((800,800))
 
             fig = matplotlib.figure.Figure((10, 5))
             ax = fig.add_subplot(1,1,1,projection='hammer')
-            implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
+            implot = ax.imshow(img, extent=(-np.pi,np.pi,-np.pi/2,np.pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
 
             if label == None:
@@ -852,7 +852,7 @@
                  rays_per_cell = 0.1, max_nside = 8192):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.use_kd = use_kd
         if transfer_function is None:
@@ -880,8 +880,8 @@
                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-        left_edges = na.array([b.LeftEdge for b in bricks])
-        right_edges = na.array([b.RightEdge for b in bricks])
+        left_edges = np.array([b.LeftEdge for b in bricks])
+        right_edges = np.array([b.RightEdge for b in bricks])
         min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
                      for b in bricks))
         # We jitter a bit if we're on a boundary of our initial grid
@@ -896,7 +896,7 @@
         for i,brick in enumerate(bricks):
             ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
                                        bricks)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         info, values = ray_source.get_rays()
@@ -935,10 +935,10 @@
         self.use_light = use_light
         self.light_dir = None
         self.light_rgba = None
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.fov = fov
         if iterable(resolution):
@@ -957,7 +957,7 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        image = np.zeros((self.resolution**2,1,3), dtype='float64', order='C')
         return image
         
     def get_sampler_args(self, image):
@@ -968,13 +968,13 @@
             vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
         del vp2
         vp *= self.radius
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
 
         args = (positions, vp, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -988,13 +988,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -1088,7 +1088,7 @@
         
         >>> field='Density'
         >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi,ma = na.log10(mi), na.log10(ma)
+        >>> mi,ma = np.log10(mi), np.log10(ma)
         
         # You may want to comment out the above lines and manually set the min and max
         # of the log of the Density field. For example:
@@ -1106,7 +1106,7 @@
         # the color range to the min and max values, rather than the transfer function
         # bounds.
         >>> Nc = 5
-        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=np.logspace(-2,0,Nc),
         >>>         colormap='RdBu_r')
         >>> 
         # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
@@ -1164,18 +1164,18 @@
             self.nimy = 1
         if pf is not None: self.pf = pf
         
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         
-        self.normal_vector = na.array([0.,0.,1])
-        self.north_vector = na.array([1.,0.,0.])
-        self.east_vector = na.array([0.,1.,0.])
+        self.normal_vector = np.array([0.,0.,1])
+        self.north_vector = np.array([1.,0.,0.])
+        self.east_vector = np.array([0.,1.,0.])
         self.rotation_vector = self.north_vector
 
         if iterable(resolution):
             raise RuntimeError("Resolution must be a single int")
         self.resolution = resolution
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.focal_center = focal_center
         self.radius = radius
         self.fov = fov
@@ -1195,17 +1195,17 @@
 
     def get_vector_plane(self):
         if self.focal_center is not None:
-            rvec =  na.array(self.focal_center) - na.array(self.center)
+            rvec =  np.array(self.focal_center) - np.array(self.center)
             rvec /= (rvec**2).sum()**0.5
-            angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
+            angle = np.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
                 (rvec**2).sum()**0.5))
-            rot_vector = na.cross(rvec, self.normal_vector)
+            rot_vector = np.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
             self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
-            self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
-            self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
-            self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
+            self.normal_vector = np.dot(self.rotation_matrix,self.normal_vector)
+            self.north_vector = np.dot(self.rotation_matrix,self.north_vector)
+            self.east_vector = np.dot(self.rotation_matrix,self.east_vector)
         else:
             self.focal_center = self.center + self.radius*self.normal_vector  
         dist = ((self.focal_center - self.center)**2).sum()**0.5
@@ -1228,9 +1228,9 @@
             self.get_vector_plane()
 
         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+        image = np.zeros((nx*ny,1,3), dtype='float64', order='C')
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nx*ny, 1, 3), dtype='float64') * self.center
         vector_plane = VectorPlane(positions, self.vp, self.center,
                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
         tfp = TransferFunctionProxy(self.transfer_function)
@@ -1243,7 +1243,7 @@
         total_cells = 0
         for brick in self.volume.traverse(None, self.center, image):
             brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         image.shape = (nx, ny, 3)
@@ -1269,7 +1269,7 @@
         if self.image_decomp:
             if self.comm.rank == 0:
                 if self.global_comm.rank == 0:
-                    final_image = na.empty((nx*self.nimx, 
+                    final_image = np.empty((nx*self.nimx, 
                         ny*self.nimy, 3),
                         dtype='float64',order='C')
                     final_image[:nx, :ny, :] = image
@@ -1312,7 +1312,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.north_vector
@@ -1322,9 +1322,9 @@
         R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
-        self.normal_vector = na.dot(R,self.normal_vector)
-        self.north_vector = na.dot(R,self.north_vector)
-        self.east_vector = na.dot(R,self.east_vector)
+        self.normal_vector = np.dot(R,self.normal_vector)
+        self.north_vector = np.dot(R,self.north_vector)
+        self.east_vector = np.dot(R,self.east_vector)
 
         if keep_focus:
             self.center = self.focal_center - dist*self.normal_vector
@@ -1349,7 +1349,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -1381,10 +1381,10 @@
         ...     cam.save_image('move_%04i.png' % i)
         """
         if exponential:
-            position_diff = (na.array(final)/self.center)*1.0
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
-            dx = (na.array(final) - self.center)*1.0/n_steps
+            dx = (np.array(final) - self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.center *= dx
@@ -1426,7 +1426,7 @@
         effects of nearby cells.
     rotation : optional, 3x3 array
         If supplied, the vectors will be rotated by this.  You can construct
-        this by, for instance, calling na.array([v1,v2,v3]) where those are the
+        this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
 
     Returns
@@ -1445,7 +1445,7 @@
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
-    center = na.array(center, dtype='float64')
+    center = np.array(center, dtype='float64')
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
         def _make_wf(f, w):
@@ -1457,8 +1457,8 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     nv = 12*nside**2
-    image = na.zeros((nv,1,3), dtype='float64', order='C')
-    vs = arr_pix2vec_nest(nside, na.arange(nv))
+    image = np.zeros((nv,1,3), dtype='float64', order='C')
+    vs = arr_pix2vec_nest(nside, np.arange(nv))
     vs.shape = (nv,1,3)
     if rotation is not None:
         vs2 = vs.copy()
@@ -1466,14 +1466,14 @@
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     else:
         vs += 1e-8
-    positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
+    positions = np.ones((nv, 1, 3), dtype='float64', order='C') * center
     dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
     positions += inner_radius * dx * vs
     vs *= radius
-    uv = na.ones(3, dtype='float64')
+    uv = np.ones(3, dtype='float64')
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
-                                image, uv, uv, na.zeros(3, dtype='float64'))
+                                image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
         data = [grid[field] * grid.child_mask.astype('float64')
@@ -1502,15 +1502,15 @@
                         take_log = True, resolution=512, cmin=None, cmax=None):
     import matplotlib.figure
     import matplotlib.backends.backend_agg
-    if rotation is None: rotation = na.eye(3).astype("float64")
+    if rotation is None: rotation = np.eye(3).astype("float64")
 
     img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
 
     fig = matplotlib.figure.Figure((10, 5))
     ax = fig.add_subplot(1,1,1,projection='aitoff')
-    if take_log: func = na.log10
+    if take_log: func = np.log10
     else: func = lambda a: a
-    implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
+    implot = ax.imshow(func(img), extent=(-np.pi,np.pi,-np.pi/2,np.pi/2),
                        clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
     cb = fig.colorbar(implot, orientation='horizontal')
     cb.set_label(label)
@@ -1568,12 +1568,12 @@
             pass
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
             (-self.width[0]/2, self.width[0]/2,
              -self.width[1]/2, self.width[1]/2),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.sub_samples)
+                np.array(self.width), self.sub_samples)
         return args
 
     def finalize_image(self,image):
@@ -1607,8 +1607,8 @@
                     this_point = (self.center + width/2. * off1 * north_vector
                                          + width/2. * off2 * east_vector
                                          + width/2. * off3 * normal_vector)
-                    na.minimum(mi, this_point, mi)
-                    na.maximum(ma, this_point, ma)
+                    np.minimum(mi, this_point, mi)
+                    np.maximum(ma, this_point, ma)
         # Now we have a bounding box.
         grids = pf.h.region(self.center, mi, ma)._grids
 
@@ -1630,7 +1630,7 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.pf.field_info[self.field].take_log:
-            im = na.log10(image)
+            im = np.log10(image)
         else:
             im = image
         if self.comm.rank is 0 and fn is not None:
@@ -1722,7 +1722,7 @@
 
     >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
                       0.2, N, "Temperature", "Density")
-    >>> write_image(na.log10(image), "offaxis.png")
+    >>> write_image(np.log10(image), "offaxis.png")
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -24,7 +24,7 @@
 """
 
 import random
-import numpy as na
+import numpy as np
 from .create_spline import create_spline
 
 class Keyframes(object):
@@ -67,12 +67,12 @@
         Examples
         --------
 
-        >>> import numpy as na
+        >>> import numpy as np
         >>> import matplotlib.pyplot as plt
         >>> from yt.visualization.volume_rendering.camera_path import *
 
         # Make a camera path from 10 random (x,y,z) keyframes
-        >>> data = na.random.random.((10,3))
+        >>> data = np.random.random.((10,3))
         >>> kf = Keyframes(data[:,0], data[:,1], data[:,2])
         >>> path = kf.create_path(250, shortest_path=False)
 
@@ -93,7 +93,7 @@
             print "Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz)
             sys.exit()
         self.nframes = Nx
-        self.pos = na.zeros((Nx,3))
+        self.pos = np.zeros((Nx,3))
         self.pos[:,0] = x
         self.pos[:,1] = y
         if z != None:
@@ -103,7 +103,7 @@
         self.north_vectors = north_vectors
         self.up_vectors = up_vectors
         if times == None:
-            self.times = na.arange(self.nframes)
+            self.times = np.arange(self.nframes)
         else:
             self.times = times
         self.cartesian_matrix()
@@ -131,7 +131,7 @@
         """
         # randomize tour
         self.tour = range(self.nframes)
-        na.random.shuffle(self.tour)
+        np.random.shuffle(self.tour)
         if fixed_start:
             first = self.tour.index(0)
             self.tour[0], self.tour[first] = self.tour[first], self.tour[0]
@@ -191,17 +191,17 @@
         Create a distance matrix for the city coords that uses
         straight line distance
         """
-        self.dist_matrix = na.zeros((self.nframes, self.nframes))
-        xmat = na.zeros((self.nframes, self.nframes))
+        self.dist_matrix = np.zeros((self.nframes, self.nframes))
+        xmat = np.zeros((self.nframes, self.nframes))
         xmat[:,:] = self.pos[:,0]
         dx = xmat - xmat.T
-        ymat = na.zeros((self.nframes, self.nframes))
+        ymat = np.zeros((self.nframes, self.nframes))
         ymat[:,:] = self.pos[:,1]
         dy = ymat - ymat.T
-        zmat = na.zeros((self.nframes, self.nframes))
+        zmat = np.zeros((self.nframes, self.nframes))
         zmat[:,:] = self.pos[:,2]
         dz = zmat - zmat.T
-        self.dist_matrix = na.sqrt(dx*dx + dy*dy + dz*dz)
+        self.dist_matrix = np.sqrt(dx*dx + dy*dy + dz*dz)
 
     def tour_length(self, tour):
         r"""
@@ -227,7 +227,7 @@
         if next > prev:
             return 1.0
         else:
-            return na.exp( -abs(next-prev) / temperature )
+            return np.exp( -abs(next-prev) / temperature )
 
     def get_shortest_path(self):
         r"""Determine shortest path between all keyframes.
@@ -294,14 +294,14 @@
             path.  Also saved to self.path.
         """
         self.npoints = npoints
-        self.path = {"time": na.zeros(npoints),
-                     "position": na.zeros((npoints, 3)),
-                     "north_vectors": na.zeros((npoints,3)),
-                     "up_vectors": na.zeros((npoints,3))}
+        self.path = {"time": np.zeros(npoints),
+                     "position": np.zeros((npoints, 3)),
+                     "north_vectors": np.zeros((npoints,3)),
+                     "up_vectors": np.zeros((npoints,3))}
         if shortest_path:
             self.get_shortest_path()
         if path_time == None:
-            path_time = na.linspace(0, self.nframes, npoints)
+            path_time = np.linspace(0, self.nframes, npoints)
         self.path["time"] = path_time
         for dim in range(3):
             self.path["position"][:,dim] = create_spline(self.times, self.pos[:,dim],


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/create_spline.py
--- a/yt/visualization/volume_rendering/create_spline.py
+++ b/yt/visualization/volume_rendering/create_spline.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def create_spline(old_x, old_y, new_x, tension=0.5, sorted=False):
     """
@@ -45,18 +45,18 @@
     """
     ndata = len(old_x)
     N = len(new_x)
-    result = na.zeros(N)
+    result = np.zeros(N)
     if not sorted:
-        isort = na.argsort(old_x)
+        isort = np.argsort(old_x)
         old_x = old_x[isort]
         old_y = old_y[isort]
     # Floor/ceiling of values outside of the original data
-    new_x = na.minimum(new_x, old_x[-1])
-    new_x = na.maximum(new_x, old_x[0])
-    ind = na.searchsorted(old_x, new_x)
-    im2 = na.maximum(ind-2, 0)
-    im1 = na.maximum(ind-1, 0)
-    ip1 = na.minimum(ind+1, ndata-1)
+    new_x = np.minimum(new_x, old_x[-1])
+    new_x = np.maximum(new_x, old_x[0])
+    ind = np.searchsorted(old_x, new_x)
+    im2 = np.maximum(ind-2, 0)
+    im1 = np.maximum(ind-1, 0)
+    ip1 = np.minimum(ind+1, ndata-1)
     for i in range(N):
         if ind[i] != im1[i]:
             u = (new_x[i] - old_x[im1[i]]) / (old_x[ind[i]] - old_x[im1[i]])


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 import h5py
 
@@ -63,10 +63,10 @@
                    len(self.bricks), back_point, front_point)
         if self.bricks is None: self.initialize_source()
         vec = front_point - back_point
-        dist = na.minimum(
-             na.sum((self.brick_left_edges - back_point) * vec, axis=1),
-             na.sum((self.brick_right_edges - back_point) * vec, axis=1))
-        ind = na.argsort(dist)
+        dist = np.minimum(
+             np.sum((self.brick_left_edges - back_point) * vec, axis=1),
+             np.sum((self.brick_right_edges - back_point) * vec, axis=1))
+        ind = np.argsort(dist)
         for b in self.bricks[ind]:
             #print b.LeftEdge, b.RightEdge
             yield b
@@ -79,7 +79,7 @@
         for field, log_field in zip(self.fields, self.log_fields):
             vcd = grid.get_vertex_centered_data(field, no_ghost = self.no_ghost)
             vcd = vcd.astype("float64")
-            if log_field: vcd = na.log10(vcd)
+            if log_field: vcd = np.log10(vcd)
             vcds.append(vcd)
 
         GF = GridFaces(grid.Children + [grid])
@@ -121,11 +121,11 @@
         # intersection, we only need to do the left edge & right edge.
         #
         # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.bricks = na.empty(len(bricks), dtype='object')
+        self.brick_left_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_right_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_parents = np.zeros( NB, dtype='int64')
+        self.brick_dimensions = np.zeros( (NB, 3), dtype='int64')
+        self.bricks = np.empty(len(bricks), dtype='object')
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
             self.brick_right_edges[i,:] = b.RightEdge
@@ -143,12 +143,12 @@
             for j in [-1, 1]:
                 for k in [-1, 1]:
                     for b in self.bricks:
-                        BB = na.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
-                        LE, RE = na.min(BB, axis=0), na.max(BB, axis=0)
+                        BB = np.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
+                        LE, RE = np.min(BB, axis=0), np.max(BB, axis=0)
                         nb.append(
                             PartitionedGrid(b.parent_grid_id, len(b.my_data), 
                                 [md[::i,::j,::k].copy("C") for md in b.my_data],
-                                LE, RE, na.array(b.my_data[0].shape) - 1))
+                                LE, RE, np.array(b.my_data[0].shape) - 1))
         # Replace old bricks
         self.initialize_bricks(nb)
 
@@ -183,7 +183,7 @@
                                 self.brick_right_edges[i,:],
                                 self.brick_dimensions[i,:],
                                 ))
-        self.bricks = na.array(bricks, dtype='object')
+        self.bricks = np.array(bricks, dtype='object')
         f.close()
 
     def reset_cast(self):
@@ -194,10 +194,10 @@
     def __init__(self, data_array):
         self.bricks = [PartitionedGrid(-1, 1, 
                        [data_array.astype("float64")],
-                       na.zeros(3, dtype='float64'),
-                       na.ones(3, dtype='float64'),
-                       na.array(data_array.shape, dtype='int64')-1)]
-        self.brick_dimensions = na.ones((1, 3), dtype='int64')*data_array.shape
+                       np.zeros(3, dtype='float64'),
+                       np.ones(3, dtype='float64'),
+                       np.array(data_array.shape, dtype='int64')-1)]
+        self.brick_dimensions = np.ones((1, 3), dtype='int64')*data_array.shape
 
     def initialize_source(self):
         pass
@@ -221,24 +221,24 @@
     def __getitem__(self, item):
         return self.faces[item]
 
-def export_partitioned_grids(grid_list, fn, int_type=na.int64, float_type=na.float64):
+def export_partitioned_grids(grid_list, fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "w")
     pbar = get_pbar("Writing Grids", len(grid_list))
     nelem = sum((grid.my_data.size for grid in grid_list))
     ngrids = len(grid_list)
     group = f.create_group("/PGrids")
-    left_edge = na.concatenate([[grid.LeftEdge,] for grid in grid_list])
+    left_edge = np.concatenate([[grid.LeftEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/LeftEdges", data=left_edge, dtype=float_type); del left_edge
-    right_edge = na.concatenate([[grid.RightEdge,] for grid in grid_list])
+    right_edge = np.concatenate([[grid.RightEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/RightEdges", data=right_edge, dtype=float_type); del right_edge
-    dims = na.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
+    dims = np.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
     f.create_dataset("/PGrids/Dims", data=dims, dtype=int_type); del dims
-    data = na.concatenate([grid.my_data.ravel() for grid in grid_list])
+    data = np.concatenate([grid.my_data.ravel() for grid in grid_list])
     f.create_dataset("/PGrids/Data", data=data, dtype=float_type); del data
     f.close()
     pbar.finish()
 
-def import_partitioned_grids(fn, int_type=na.int64, float_type=na.float64):
+def import_partitioned_grids(fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "r")
     n_groups = len(f)
     grid_list = []
@@ -258,4 +258,4 @@
         pbar.update(i)
     pbar.finish()
     f.close()
-    return na.array(grid_list, dtype='object')
+    return np.array(grid_list, dtype='object')


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -25,7 +25,7 @@
 import h5py
 try: import pyfits
 except: pass
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -67,7 +67,7 @@
         f.close()
     else:
         print 'No support for fits import.'
-    return na.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
+    return np.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
 
 def plot_channel(image, name, cmap='gist_heat', log=True, dex=3, zero_factor=1.0e-10, 
                  label=None, label_color='w', label_size='large'):
@@ -84,7 +84,7 @@
     import matplotlib
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     ma = image[image>0.0].max()
     image[image==0.0] = ma*zero_factor
     if log:
@@ -113,7 +113,7 @@
     """
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     if image.shape[2] >= 4:
         image = image[:,:,:3]
     pylab.clf()


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -261,7 +261,7 @@
         tex_coord.Append((t1,t0,t1)); ver_coord.Append((x1, y0, z1)) # 7
         
         # Store quads
-        self._quads[tex_id] = (tex_coord, ver_coord, na.array(indices,dtype=na.uint8))
+        self._quads[tex_id] = (tex_coord, ver_coord, np.array(indices,dtype=np.uint8))
 
 def visvis_plot(vp):
     """
@@ -280,10 +280,10 @@
     ax = vv.gca()
 
     for i,g in enumerate(gs):
-        ss = ((g.RightEdge - g.LeftEdge) / (na.array(g.my_data[0].shape)-1)).tolist()
+        ss = ((g.RightEdge - g.LeftEdge) / (np.array(g.my_data[0].shape)-1)).tolist()
         origin = g.LeftEdge.astype("float32").tolist()
         dd = (g.my_data[0].astype("float32") - mi)/(ma - mi)
-        dd = na.clip(dd, 0.0, 1.0)
+        dd = np.clip(dd, 0.0, 1.0)
         print ss
         texes.append(vv.Aarray(dd, origin = origin, sampling = ss))
 


diff -r b45aa6c3c1422e3c591df3e63cce19f374a2b146 -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from matplotlib.cm import get_cmap
 
 from yt.funcs import *
@@ -59,10 +59,10 @@
         self.pass_through = 0
         self.nbins = nbins
         self.x_bounds = x_bounds
-        self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
-        self.y = na.zeros(nbins, dtype='float64')
+        self.x = np.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
+        self.y = np.zeros(nbins, dtype='float64')
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -88,8 +88,8 @@
         >>> tf = TransferFunction( (-10.0, -5.0) )
         >>> tf.add_gaussian(-9.0, 0.01, 1.0)
         """
-        vals = height * na.exp(-(self.x - location)**2.0/width)
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        vals = height * np.exp(-(self.x - location)**2.0/width)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_line(self, start, stop):
         r"""Add a line between two points to the transmission function.
@@ -122,7 +122,7 @@
         # not satisfy our bounding box arguments
         vals = slope * (self.x - x0) + y0
         vals[~((self.x >= x0) & (self.x <= x1))] = 0.0
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.
@@ -154,12 +154,12 @@
         >>> tf.add_gaussian(-7.0, 0.01, 1.0)
         >>> tf.add_step(-8.0, -6.0, 0.5)
         """
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         vals[(self.x >= start) & (self.x <= stop)] = value
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_filtered_planck(self, wavelength, trans):
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         nu = clight/(wavelength*1e-8)
         nu = nu[::-1]
 
@@ -167,15 +167,15 @@
             T = 10**logT
             # Black body at this nu, T
             Bnu = ((2.0 * hcgs * nu**3) / clight**2.0) / \
-                    (na.exp(hcgs * nu / (kboltz * T)) - 1.0)
+                    (np.exp(hcgs * nu / (kboltz * T)) - 1.0)
             # transmission
             f = Bnu * trans[::-1]
             # integrate transmission over nu
-            vals[i] = na.trapz(f,nu)
+            vals[i] = np.trapz(f,nu)
 
         # normalize by total transmission over filter
-        self.y = vals/trans.sum() #/na.trapz(trans[::-1],nu)
-        #self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = vals/trans.sum() #/np.trapz(trans[::-1],nu)
+        #self.y = np.clip(np.maximum(vals, self.y), 0.0, 1.0)
 
     def plot(self, filename):
         r"""Save an image file of the transfer function.
@@ -245,7 +245,7 @@
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):
@@ -459,20 +459,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -500,20 +500,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -574,7 +574,7 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
-        tomap = na.linspace(0.,1.,num=rel1-rel0)
+        tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)*scale
         if scale_func is None:
@@ -640,17 +640,17 @@
             if ma is None: ma = col_bounds[1] - dist/(10.0*N)
         if w is None: w = 0.001 * (ma-mi)/N
         if alpha is None and self.grey_opacity:
-            alpha = na.ones(N, dtype="float64")
+            alpha = np.ones(N, dtype="float64")
         elif alpha is None and not self.grey_opacity:
-            alpha = na.logspace(-3, 0, N)
-        for v, a in zip(na.mgrid[mi:ma:N*1j], alpha):
+            alpha = np.logspace(-3, 0, N)
+        for v, a in zip(np.mgrid[mi:ma:N*1j], alpha):
             self.sample_colormap(v, w, a, colormap=colormap, col_bounds=col_bounds)
 
     def get_colormap_image(self, height, width):
-        image = na.zeros((height, width, 3), dtype='uint8')
-        hvals = na.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
+        image = np.zeros((height, width, 3), dtype='uint8')
+        hvals = np.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
         for i,f in enumerate(self.funcs[:3]):
-            vals = na.interp(hvals, f.x, f.y)
+            vals = np.interp(hvals, f.x, f.y)
             image[:,:,i] = (vals[:,None] * 255).astype('uint8')
         image = image[::-1,:,:]
         return image
@@ -736,7 +736,7 @@
         self._normalize()
 
     def _normalize(self):
-        fmax  = na.array([f.y for f in self.tables[:3]])
+        fmax  = np.array([f.y for f in self.tables[:3]])
         normal = fmax.max(axis=0)
         for f in self.tables[:3]:
             f.y = f.y/normal



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d87439267be6/
changeset:   d87439267be6
branch:      yt
user:        MatthewTurk
date:        2012-09-10 19:59:42
summary:     Updating coding styleguide.
affected #:  1 file

diff -r e6986d875c433ca3ab09e7070c1a06c7e7c7eef1 -r d87439267be6e6c2d2f2fb4c6a077b6a075c6900 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -18,7 +18,7 @@
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
    "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "na.multiply(a, 3, a)".
+   be "np.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
  * When writing a subclass, use the super built-in to access the super class,
@@ -40,8 +40,7 @@
 
    from yt.visualization.plot_collection import PlotCollection
 
- * Numpy is to be imported as "na" not "np".  While this may change in the
-   future, for now this is the correct idiom.
+ * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword
    arguments, then you are doing too much in __init__ and not enough via
    parameter setting.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3dfdedf7fb12/
changeset:   3dfdedf7fb12
branch:      yt
user:        ngoldbaum
date:        2012-09-09 03:58:00
summary:     Improving the way unit names and display names of derived fields are handled.  Now when PlotWindow is field name or a display name it cannot parse an exception is thrown and a useful error message is printed.  Complicated field names involving arbitrary latex and text is now possible.
affected #:  2 files

diff -r 551e1238ab38bfd9f1951e6a3fe692cc995e5768 -r 3dfdedf7fb1201b2c29d861b8d8d283cc36415ac yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -76,6 +76,30 @@
     def __str__(self):
         return "Simulation time-series type %s not defined." % self.sim_type
 
+class YTCannotParseFieldDisplayName(YTException):
+    def __init__(self, field_name, display_name, mathtext_error):
+        self.field_name = field_name
+        self.display_name = display_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+                % (self.display_name, self.field_name) + self.mathtext_error
+
+class YTCannotParseUnitDisplayName(YTException):
+    def __init__(self, field_name, display_unit, mathtext_error):
+        self.field_name = field_name
+        self.unit_name = unit_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The unit display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+            % (self.unit_name, self.field_name) + self.mathtext_error
+
 class AmbiguousOutputs(YTException):
     def __init__(self, pf):
         YTException.__init__(self, pf)


diff -r 551e1238ab38bfd9f1951e6a3fe692cc995e5768 -r 3dfdedf7fb1201b2c29d861b8d8d283cc36415ac yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,6 +26,8 @@
 """
 import base64
 import matplotlib.figure
+from matplotlib.mathtext import MathTextParser
+from matplotlib.pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__
@@ -738,11 +740,33 @@
             self.plots[f].axes.set_ylabel(labels[1])
 
             field_name = self.data_source.pf.field_info[f].display_name
-            if field_name is None: field_name = f
+
+            if field_name is None:
+                field_name = r'$\rm{'+f+r'}$'
+            elif field_name.find('$') == -1:
+                field_name = r'$\rm{'+field_name+r'}$'
+            
+            parser = MathTextParser('Agg')
+            try:
+                parser.parse(field_name)
+            except ParseFatalException, err:
+                raise YTCannotParseFieldDisplayName(f,field_name,str(err))
+
+            try:
+                parser.parse(r'$'+md['units']+r'$')
+            except ParseFatalException, err:
+                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
+
             if md['units'] == None or md['units'] == '':
+<<<<<<< variant A
                 label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
+>>>>>>> variant B
+                label = field_name
+####### Ancestor
+                label += r'$'
+======= end
             else:
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+                label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9926251b1008/
changeset:   9926251b1008
branch:      yt
user:        ngoldbaum
date:        2012-09-09 20:07:57
summary:     Fixing a bad merge.
affected #:  1 file

diff -r 3dfdedf7fb1201b2c29d861b8d8d283cc36415ac -r 9926251b1008f8f68f3af39395cccce635f4b56e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -758,13 +758,7 @@
                 raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
 
             if md['units'] == None or md['units'] == '':
-<<<<<<< variant A
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
->>>>>>> variant B
                 label = field_name
-####### Ancestor
-                label += r'$'
-======= end
             else:
                 label = field_name+r'$\/\/('+md['units']+r')$'
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6732a585f969/
changeset:   6732a585f969
branch:      yt
user:        ngoldbaum
date:        2012-09-08 01:49:29
summary:     Updating plot window to accept fields display names that are already
properly formatted mathtext.  Right now this is detected by checking
to see if the first character of the display name is '$' (i.e. the
string begins a latex math macro).  If not, the display name is
assumed to be an improperly formatted latex code and is prepended with
\rm{ and appended with }.  This means that in the future all display
names that include mathematical expressions must be properly formatted
mathtext.
affected #:  3 files

diff -r 9926251b1008f8f68f3af39395cccce635f4b56e -r 6732a585f9699a571263a230d2fa26e48ab6a40f yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -968,7 +968,7 @@
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
-          display_name=r"\mathrm{Particle}\/\mathrm{Density})")
+          display_name=r"$\mathrm{Particle}\/\mathrm{Density}$")
 
 def _MagneticEnergy(field,data):
     """This assumes that your front end has provided Bx, By, Bz in


diff -r 9926251b1008f8f68f3af39395cccce635f4b56e -r 6732a585f9699a571263a230d2fa26e48ab6a40f yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,22 +171,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -487,7 +487,7 @@
     """
     return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
 # Particle functions
 


diff -r 9926251b1008f8f68f3af39395cccce635f4b56e -r 6732a585f9699a571263a230d2fa26e48ab6a40f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -725,12 +725,12 @@
                 self.plots[f].image, cax = self.plots[f].cax)
 
             if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit'].encode('string-escape')+')'
+                axes_unit_label = '\/\/('+md['unit']+')'
             else:
                 axes_unit_label = ''
 
             if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
                         axes_unit_label + r'}$' for i in (0,1)]
             else:
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',



https://bitbucket.org/yt_analysis/yt-3.0/changeset/86ad53e70660/
changeset:   86ad53e70660
branch:      yt
user:        MatthewTurk
date:        2012-09-10 20:15:49
summary:     Merged in ngoldbaum/yt-cleancopy (pull request #270)
affected #:  4 files

diff -r d87439267be6e6c2d2f2fb4c6a077b6a075c6900 -r 86ad53e706609810e95b989191c85e1365768859 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -968,7 +968,7 @@
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
-          display_name=r"\mathrm{Particle}\/\mathrm{Density})")
+          display_name=r"$\mathrm{Particle}\/\mathrm{Density}$")
 
 def _MagneticEnergy(field,data):
     """This assumes that your front end has provided Bx, By, Bz in


diff -r d87439267be6e6c2d2f2fb4c6a077b6a075c6900 -r 86ad53e706609810e95b989191c85e1365768859 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,22 +171,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -487,7 +487,7 @@
     """
     return np.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
 # Particle functions
 


diff -r d87439267be6e6c2d2f2fb4c6a077b6a075c6900 -r 86ad53e706609810e95b989191c85e1365768859 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -76,6 +76,30 @@
     def __str__(self):
         return "Simulation time-series type %s not defined." % self.sim_type
 
+class YTCannotParseFieldDisplayName(YTException):
+    def __init__(self, field_name, display_name, mathtext_error):
+        self.field_name = field_name
+        self.display_name = display_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+                % (self.display_name, self.field_name) + self.mathtext_error
+
+class YTCannotParseUnitDisplayName(YTException):
+    def __init__(self, field_name, display_unit, mathtext_error):
+        self.field_name = field_name
+        self.unit_name = unit_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The unit display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+            % (self.unit_name, self.field_name) + self.mathtext_error
+
 class AmbiguousOutputs(YTException):
     def __init__(self, pf):
         YTException.__init__(self, pf)


diff -r d87439267be6e6c2d2f2fb4c6a077b6a075c6900 -r 86ad53e706609810e95b989191c85e1365768859 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,6 +26,8 @@
 """
 import base64
 import matplotlib.figure
+from matplotlib.mathtext import MathTextParser
+from matplotlib.pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__
@@ -723,12 +725,12 @@
                 self.plots[f].image, cax = self.plots[f].cax)
 
             if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit'].encode('string-escape')+')'
+                axes_unit_label = '\/\/('+md['unit']+')'
             else:
                 axes_unit_label = ''
 
             if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
                         axes_unit_label + r'}$' for i in (0,1)]
             else:
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
@@ -738,11 +740,27 @@
             self.plots[f].axes.set_ylabel(labels[1])
 
             field_name = self.data_source.pf.field_info[f].display_name
-            if field_name is None: field_name = f
+
+            if field_name is None:
+                field_name = r'$\rm{'+f+r'}$'
+            elif field_name.find('$') == -1:
+                field_name = r'$\rm{'+field_name+r'}$'
+            
+            parser = MathTextParser('Agg')
+            try:
+                parser.parse(field_name)
+            except ParseFatalException, err:
+                raise YTCannotParseFieldDisplayName(f,field_name,str(err))
+
+            try:
+                parser.parse(r'$'+md['units']+r'$')
+            except ParseFatalException, err:
+                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
+
             if md['units'] == None or md['units'] == '':
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
+                label = field_name
             else:
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+                label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b8446e7a4fcc/
changeset:   b8446e7a4fcc
branch:      yt
user:        samskillman
date:        2012-09-11 23:46:26
summary:     fixing a broken na->np because np was used as an input variable.
affected #:  1 file

diff -r 86ad53e706609810e95b989191c85e1365768859 -r b8446e7a4fcccae084f13b29e7be2c5b7060133c yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -647,26 +647,26 @@
 
 class EnzoHierarchy1D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions[:,:1] = ei
         self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:1] = LE
         self.grid_right_edge[:,:1] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         self.grid_left_edge[:,1:] = 0.0
         self.grid_right_edge[:,1:] = 1.0
         self.grid_dimensions[:,1:] = 1
 
 class EnzoHierarchy2D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions[:,:2] = ei
         self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:2] = LE
         self.grid_right_edge[:,:2] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         self.grid_left_edge[:,2] = 0.0
         self.grid_right_edge[:,2] = 1.0
         self.grid_dimensions[:,2] = 1



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0856364b1b31/
changeset:   0856364b1b31
branch:      yt
user:        MatthewTurk
date:        2012-09-12 00:35:47
summary:     Adding conversion factors for lingering translated fields in FLASH
affected #:  1 file

diff -r b8446e7a4fcccae084f13b29e7be2c5b7060133c -r 0856364b1b31fd275c861019f3912e79cc7cb51f yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -42,7 +42,7 @@
 from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
-     ValidateDataField
+     ValidateDataField, TranslationFunc
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -184,11 +184,16 @@
                 self.derived_field_list.append(field)
             if (field not in KnownFLASHFields and
                 field.startswith("particle")) :
-                self.parameter_file.field_info.add_field(field,
-                                                         function=NullFunc,
-                                                         take_log=False,
-                                                         validators = [ValidateDataField(field)],
-                                                         particle_type=True)
+                self.parameter_file.field_info.add_field(
+                        field, function=NullFunc, take_log=False,
+                        validators = [ValidateDataField(field)],
+                        particle_type=True)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
                 
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2a9a5b16f808/
changeset:   2a9a5b16f808
branch:      yt
user:        MatthewTurk
date:        2012-09-12 00:46:01
summary:     Don't instantiate a given pf twice in FLASH.
affected #:  1 file

diff -r 0856364b1b31fd275c861019f3912e79cc7cb51f -r 2a9a5b16f8082413eceafaebfc085d693845db90 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -208,6 +208,7 @@
                  storage_filename = None,
                  conversion_override = None):
 
+        if self._handle is not None: return
         self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override



https://bitbucket.org/yt_analysis/yt-3.0/changeset/4e6e794f4053/
changeset:   4e6e794f4053
branch:      yt
user:        MatthewTurk
date:        2012-09-13 14:30:49
summary:     Fixing YT_DEST in activate.csh.
affected #:  1 file

diff -r 2a9a5b16f8082413eceafaebfc085d693845db90 -r 4e6e794f4053f4db788d6d1263163a509439213b doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -20,7 +20,7 @@
     setenv YT_DEST
 endif
 set _OLD_VIRTUAL_YT_DEST="$YT_DEST"
-setenv YT_DEST "${VIRTUAL_ENV}:${YT_DEST}"
+setenv YT_DEST "${VIRTUAL_ENV}"
 
 if ($?PYTHONPATH == 0) then
     setenv PYTHONPATH



https://bitbucket.org/yt_analysis/yt-3.0/changeset/bb29b353e351/
changeset:   bb29b353e351
branch:      yt
user:        samskillman
date:        2012-09-14 00:41:02
summary:     Fixing a bug when this was moved out of camera.py and into orientation.py.  The original north vector fed to the orienter should not be modified, even if steady_north is used.
affected #:  1 file

diff -r 4e6e794f4053f4db788d6d1263163a509439213b -r bb29b353e351c0cadb58459b4348dbfc12145771 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -56,6 +56,7 @@
             mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
             north_vector = None
         if north_vector is not None: self.steady_north = True
+        self.north_vector = north_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
@@ -74,7 +75,6 @@
         north_vector /= np.sqrt(np.dot(north_vector, north_vector))
         east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
-        self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
         self.inv_mat = np.linalg.pinv(self.unit_vectors)
         



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a95c5836ddd0/
changeset:   a95c5836ddd0
branch:      yt
user:        ngoldbaum
date:        2012-09-14 20:35:20
summary:     Switching the default unit for command line plotting to '1' intead of 'unitary'.
affected #:  1 file

diff -r 86ad53e706609810e95b989191c85e1365768859 -r a95c5836ddd0a331a2c31c06ff90b2bff572efe0 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -152,7 +152,7 @@
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
                    action="store", type=str,
-                   dest="unit", default='unitary',
+                   dest="unit", default='1',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
                    action="store", type=float,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/af06f14fcd9e/
changeset:   af06f14fcd9e
branch:      yt
user:        MatthewTurk
date:        2012-09-14 20:42:30
summary:     Merged in ngoldbaum/yt-cleancopy (pull request #272)
affected #:  1 file

diff -r bb29b353e351c0cadb58459b4348dbfc12145771 -r af06f14fcd9eab09d59a8f065a42c64731fb3f5d yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -152,7 +152,7 @@
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
                    action="store", type=str,
-                   dest="unit", default='unitary',
+                   dest="unit", default='1',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
                    action="store", type=float,



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7152e1776e98/
changeset:   7152e1776e98
branch:      yt
user:        ngoldbaum
date:        2012-09-17 22:03:57
summary:     Fixing the way 2D data is handled so that grid edges (for FLASH) and dx/dy/dz are correct for non-enzo data.
affected #:  3 files

diff -r af06f14fcd9eab09d59a8f065a42c64731fb3f5d -r 7152e1776e984b39e4b96aec129e654e9f4ba247 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,8 +210,6 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property


diff -r af06f14fcd9eab09d59a8f065a42c64731fb3f5d -r 7152e1776e984b39e4b96aec129e654e9f4ba247 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -143,6 +143,9 @@
         for i in range(nlevels+1):
             dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
+        if ND < 3:
+            dxs[:,ND:] = rdx[ND:]
+
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
             self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx


diff -r af06f14fcd9eab09d59a8f065a42c64731fb3f5d -r 7152e1776e984b39e4b96aec129e654e9f4ba247 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1255,8 +1255,7 @@
     def __init__(self, field, size):
         self._plot_valid = True
         fsize, axrect, caxrect = self._get_best_layout(size)
-        # Hardcoding the axis dimensions for now
-        
+                
         self.figure = matplotlib.figure.Figure(figsize = fsize, 
                                                frameon = True)
         self.axes = self.figure.add_axes(axrect)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/27378d5fc23d/
changeset:   27378d5fc23d
branch:      yt
user:        ngoldbaum
date:        2012-09-17 22:33:58
summary:     A kludgy way of avoiding crashes when trying to make a plot with a very compressed axis ratio.  Printing a warning that this might be a bad idea.
affected #:  1 file

diff -r 7152e1776e984b39e4b96aec129e654e9f4ba247 -r 27378d5fc23db4ebee055507307b77d29e25165b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -809,7 +809,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None):
+    def save(self,name=None,mpl_kwargs=None):
         """saves the plot to disk.
 
         Parameters
@@ -817,6 +817,10 @@
         name : string
            the base of the filename.  If not set the filename of 
            the parameter file is used
+        mpl_kwargs : dict
+           A dict of keyword arguments to be passed to matplotlib.
+           
+        >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
         if name == None:
@@ -841,7 +845,7 @@
                 n = "%s_%s_%s" % (name, type, k)
             if weight:
                 n += "_%s" % (weight)
-            names.append(v.save(n))
+            names.append(v.save(n,mpl_kwargs))
         return names
 
     def _send_zmq(self):
@@ -1255,13 +1259,23 @@
     def __init__(self, field, size):
         self._plot_valid = True
         fsize, axrect, caxrect = self._get_best_layout(size)
-                
-        self.figure = matplotlib.figure.Figure(figsize = fsize, 
-                                               frameon = True)
-        self.axes = self.figure.add_axes(axrect)
-        self.cax = self.figure.add_axes(caxrect)
-
-    def save(self, name, canvas = None):
+        
+        if np.any(np.array(axrect) < 0):
+            self.figure = matplotlib.figure.Figure(figsize = size, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes((.07,.10,.8,.8))
+            self.cax = self.figure.add_axes((.87,.10,.04,.8))
+            mylog.warning('The axis ratio of the requested plot is very narrow.  '
+                          'There is a good chance the plot will not look very good, '
+                          'consider making the plot manually using FixedResolutionBuffer '
+                          'and matplotlib.')
+        else:
+            self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes(axrect)
+            self.cax = self.figure.add_axes(caxrect)
+            
+    def save(self, name, mpl_kwargs, canvas = None):
         if name[-4:] == '.png':
             suffix = ''
         else:
@@ -1278,7 +1292,7 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn)
+        canvas.print_figure(fn,mpl_kwargs)
         return fn
 
     def _get_best_layout(self, size):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/165fc415608a/
changeset:   165fc415608a
branch:      yt
user:        samskillman
date:        2012-09-17 22:53:42
summary:     Should be **mpl_kwargs to get MPL to parse it correctly.
affected #:  1 file

diff -r 27378d5fc23db4ebee055507307b77d29e25165b -r 165fc415608a397479110e532109c5a6f773650e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1292,7 +1292,7 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn,mpl_kwargs)
+        canvas.print_figure(fn,**mpl_kwargs)
         return fn
 
     def _get_best_layout(self, size):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2a6872ca2489/
changeset:   2a6872ca2489
branch:      yt
user:        ngoldbaum
date:        2012-09-18 01:04:08
summary:     mpl_kwargs should be an empty dict rather than None by default.
affected #:  1 file

diff -r 165fc415608a397479110e532109c5a6f773650e -r 2a6872ca2489c0e8cd38fd5fecc9738b03fefc52 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -809,7 +809,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None,mpl_kwargs=None):
+    def save(self,name=None,mpl_kwargs={}):
         """saves the plot to disk.
 
         Parameters



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9497ec69bfce/
changeset:   9497ec69bfce
branch:      yt
user:        MatthewTurk
date:        2012-09-18 03:15:14
summary:     Swap mutable in function declaration for a None and check inside the function.
affected #:  1 file

diff -r 2a6872ca2489c0e8cd38fd5fecc9738b03fefc52 -r 9497ec69bfce39c1289edee1a9c226389afe166c yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -809,7 +809,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None,mpl_kwargs={}):
+    def save(self, name=None, mpl_kwargs=None):
         """saves the plot to disk.
 
         Parameters
@@ -827,6 +827,7 @@
             name = str(self.pf)
         elif name.endswith('.png'):
             return v.save(name)
+        if mpl_kwargs is None: mpl_kwargs = {}
         axis = axis_names[self.data_source.axis]
         weight = None
         if 'Slice' in self.data_source.__class__.__name__:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/eeb5923f215e/
changeset:   eeb5923f215e
branch:      yt
user:        MatthewTurk
date:        2012-09-18 22:39:29
summary:     Adding IPython Notebooks to the Hub upload types, and "yt upload_notebook"
command.
affected #:  3 files

diff -r af06f14fcd9eab09d59a8f065a42c64731fb3f5d -r eeb5923f215ee949f30f4160e7aeecf9d96bb37a yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1188,6 +1188,35 @@
         import yt.utilities.lodgeit as lo
         lo.main( None, download=args.number )
 
+class YTNotebookUploadCmd(YTCommand):
+    args = (dict(short="file", type=str),)
+    description = \
+        """
+        Upload an IPython notebook to hub.yt-project.org.
+        """
+
+    name = "upload_notebook"
+    def __call__(self, args):
+        filename = args.file
+        if not os.path.isfile(filename):
+            raise IOError(filename)
+        if not filename.endswith(".ipynb"):
+            print "File must be an IPython notebook!"
+            return 1
+        import json
+        try:
+            t = json.loads(open(filename).read())['metadata']['name']
+        except (ValueError, KeyError):
+            print "File does not appear to be an IPython notebook."
+        from yt.utilities.minimal_representation import MinimalNotebook
+        mn = MinimalNotebook(filename, t)
+        rv = mn.upload()
+        print "Upload successful!"
+        print
+        print "To view your notebook go here:"
+        print "  %s" % (rv['url'].replace("/go/", "/nb/"))
+        print
+
 class YTPlotCmd(YTCommand):
     args = ("width", "unit", "bn", "proj", "center",
             "zlim", "axis", "field", "weight", "skip",


diff -r af06f14fcd9eab09d59a8f065a42c64731fb3f5d -r eeb5923f215ee949f30f4160e7aeecf9d96bb37a yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -141,3 +141,8 @@
 
     def __str__(self):
         return "This parameter file doesn't recognize %s" % self.unit
+
+class YTHubRegisterError(YTException):
+    def __str__(self):
+        return "You must create an API key before uploading.  See " + \
+               "https://data.yt-project.org/getting_started.html"


diff -r af06f14fcd9eab09d59a8f065a42c64731fb3f5d -r eeb5923f215ee949f30f4160e7aeecf9d96bb37a yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -30,6 +30,7 @@
 from tempfile import TemporaryFile
 from yt.config import ytcfg
 from yt.funcs import *
+from yt.utilities.exceptions import *
 
 from .poster.streaminghttp import register_openers
 from .poster.encode import multipart_encode
@@ -93,6 +94,7 @@
     def upload(self):
         api_key = ytcfg.get("yt","hub_api_key")
         url = ytcfg.get("yt","hub_url")
+        if api_key == '': raise YTHubRegisterError
         metadata, (final_name, chunks) = self._generate_post()
         if hasattr(self, "_pf_mrep"):
             self._pf_mrep.upload()
@@ -216,3 +218,22 @@
         metadata = self._attrs
         chunks = []
         return (metadata, ("chunks", []))
+
+class MinimalNotebook(MinimalRepresentation):
+    type = "notebook"
+    _attr_list = ("title",)
+
+    def __init__(self, filename, title = None):
+        # First we read in the data
+        if not os.path.isfile(filename):
+            raise IOError(filename)
+        self.data = open(filename).read()
+        if title is None:
+            title = json.loads(self.data)['metadata']['name']
+        self.title = title
+        self.data = np.fromstring(self.data, dtype='c')
+
+    def _generate_post(self):
+        metadata = self._attrs
+        chunks = [ ("notebook", self.data) ]
+        return (metadata, ("chunks", chunks))



https://bitbucket.org/yt_analysis/yt-3.0/changeset/878a54e77a4f/
changeset:   878a54e77a4f
branch:      yt
user:        MatthewTurk
date:        2012-09-18 22:39:48
summary:     Merge
affected #:  3 files

diff -r eeb5923f215ee949f30f4160e7aeecf9d96bb37a -r 878a54e77a4feef3d03a0417ede10fab4a7998d6 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,8 +210,6 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property


diff -r eeb5923f215ee949f30f4160e7aeecf9d96bb37a -r 878a54e77a4feef3d03a0417ede10fab4a7998d6 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -143,6 +143,9 @@
         for i in range(nlevels+1):
             dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
+        if ND < 3:
+            dxs[:,ND:] = rdx[ND:]
+
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
             self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx


diff -r eeb5923f215ee949f30f4160e7aeecf9d96bb37a -r 878a54e77a4feef3d03a0417ede10fab4a7998d6 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -809,7 +809,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None):
+    def save(self, name=None, mpl_kwargs=None):
         """saves the plot to disk.
 
         Parameters
@@ -817,12 +817,17 @@
         name : string
            the base of the filename.  If not set the filename of 
            the parameter file is used
+        mpl_kwargs : dict
+           A dict of keyword arguments to be passed to matplotlib.
+           
+        >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
         if name == None:
             name = str(self.pf)
         elif name.endswith('.png'):
             return v.save(name)
+        if mpl_kwargs is None: mpl_kwargs = {}
         axis = axis_names[self.data_source.axis]
         weight = None
         if 'Slice' in self.data_source.__class__.__name__:
@@ -841,7 +846,7 @@
                 n = "%s_%s_%s" % (name, type, k)
             if weight:
                 n += "_%s" % (weight)
-            names.append(v.save(n))
+            names.append(v.save(n,mpl_kwargs))
         return names
 
     def _send_zmq(self):
@@ -1255,14 +1260,23 @@
     def __init__(self, field, size):
         self._plot_valid = True
         fsize, axrect, caxrect = self._get_best_layout(size)
-        # Hardcoding the axis dimensions for now
         
-        self.figure = matplotlib.figure.Figure(figsize = fsize, 
-                                               frameon = True)
-        self.axes = self.figure.add_axes(axrect)
-        self.cax = self.figure.add_axes(caxrect)
-
-    def save(self, name, canvas = None):
+        if np.any(np.array(axrect) < 0):
+            self.figure = matplotlib.figure.Figure(figsize = size, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes((.07,.10,.8,.8))
+            self.cax = self.figure.add_axes((.87,.10,.04,.8))
+            mylog.warning('The axis ratio of the requested plot is very narrow.  '
+                          'There is a good chance the plot will not look very good, '
+                          'consider making the plot manually using FixedResolutionBuffer '
+                          'and matplotlib.')
+        else:
+            self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes(axrect)
+            self.cax = self.figure.add_axes(caxrect)
+            
+    def save(self, name, mpl_kwargs, canvas = None):
         if name[-4:] == '.png':
             suffix = ''
         else:
@@ -1279,7 +1293,7 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn)
+        canvas.print_figure(fn,**mpl_kwargs)
         return fn
 
     def _get_best_layout(self, size):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d77fb0031d39/
changeset:   d77fb0031d39
branch:      yt
user:        MatthewTurk
date:        2012-09-18 22:49:19
summary:     Adding a bit about how to just view the notebook.
affected #:  1 file

diff -r 878a54e77a4feef3d03a0417ede10fab4a7998d6 -r d77fb0031d39753d51694f464b35a4622fe98441 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1213,7 +1213,12 @@
         rv = mn.upload()
         print "Upload successful!"
         print
+        print "To access your raw notebook go here:"
+        print
+        print "  %s" % (rv['url'])
+        print
         print "To view your notebook go here:"
+        print
         print "  %s" % (rv['url'].replace("/go/", "/nb/"))
         print
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2120043a7851/
changeset:   2120043a7851
branch:      yt
user:        MatthewTurk
date:        2012-09-19 15:04:53
summary:     Quick fix to get around roundoff error in the progressbar.

Sam, any suggestions would help!
affected #:  1 file

diff -r d77fb0031d39753d51694f464b35a4622fe98441 -r 2120043a7851f77c41c67f5301640528cc8b5314 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -998,11 +998,11 @@
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
-        pbar = get_pbar("Building kd-Tree",
-                np.prod(self.domain_right_edge-self.domain_left_edge))
+        total_vol = np.prod(self.domain_right_edge-self.domain_left_edge)
+        pbar = get_pbar("Building kd-Tree", total_vol)
 
         while current_node is not None:
-            pbar.update(volume_partitioned)
+            pbar.update(min(volume_partitioned, total_vol))
 
             # If we don't have any grids, that means we are revisiting
             # a dividing node, and there is nothing to be done.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9f67757a44ef/
changeset:   9f67757a44ef
branch:      yt
user:        xarthisius
date:        2012-09-20 11:24:55
summary:     Import ParseFatalException from a proper module
affected #:  1 file

diff -r 2120043a7851f77c41c67f5301640528cc8b5314 -r 9f67757a44ef2b2cfae8d68f60419803dc6cee77 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,7 +27,7 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
-from matplotlib.pyparsing import ParseFatalException
+from pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b30f700ac72b/
changeset:   b30f700ac72b
branch:      yt
user:        xarthisius
date:        2012-09-20 17:01:42
summary:     Use try, except statement for importing pyparsing in case it's not present in matplotlib
affected #:  1 file

diff -r 9f67757a44ef2b2cfae8d68f60419803dc6cee77 -r b30f700ac72bcaa6d1e6145499d5e8a13c747e06 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,7 +27,10 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
-from pyparsing import ParseFatalException
+try:
+    from matplotlib.pyparsing import ParseFatalException
+except ImportError:
+    from pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5087f6769726/
changeset:   5087f6769726
branch:      yt
user:        ngoldbaum
date:        2012-09-08 01:49:29
summary:     Updating plot window to accept fields display names that are already
properly formatted mathtext.  Right now this is detected by checking
to see if the first character of the display name is '$' (i.e. the
string begins a latex math macro).  If not, the display name is
assumed to be an improperly formatted latex code and is prepended with
\rm{ and appended with }.  This means that in the future all display
names that include mathematical expressions must be properly formatted
mathtext.
affected #:  3 files

diff -r 82199064771081fe61b58de835e150916e692860 -r 5087f6769726a9527b508470eb1b904bfe6beacd yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -968,7 +968,7 @@
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
-          display_name=r"\mathrm{Particle}\/\mathrm{Density})")
+          display_name=r"$\mathrm{Particle}\/\mathrm{Density}$")
 
 def _MagneticEnergy(field,data):
     """This assumes that your front end has provided Bx, By, Bz in


diff -r 82199064771081fe61b58de835e150916e692860 -r 5087f6769726a9527b508470eb1b904bfe6beacd yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,22 +171,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -487,7 +487,7 @@
     """
     return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
 # Particle functions
 


diff -r 82199064771081fe61b58de835e150916e692860 -r 5087f6769726a9527b508470eb1b904bfe6beacd yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -723,12 +723,12 @@
                 self.plots[f].image, cax = self.plots[f].cax)
 
             if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit'].encode('string-escape')+')'
+                axes_unit_label = '\/\/('+md['unit']+')'
             else:
                 axes_unit_label = ''
 
             if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
                         axes_unit_label + r'}$' for i in (0,1)]
             else:
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
@@ -738,11 +738,16 @@
             self.plots[f].axes.set_ylabel(labels[1])
 
             field_name = self.data_source.pf.field_info[f].display_name
+            # If the field author has passed us formatted mathtext, leave it alone
+            if field_name[0] == '$':
+                label = field_name[:-1]
+            else:
+                label = r'$\rm{'+field_name+r'}'
             if field_name is None: field_name = f
             if md['units'] == None or md['units'] == '':
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
+                label += r'$'
             else:
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+                label += r'\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/cc17d1b6a2f9/
changeset:   cc17d1b6a2f9
branch:      yt
user:        ngoldbaum
date:        2012-09-09 03:58:00
summary:     Improving the way unit names and display names of derived fields are handled.  Now when PlotWindow is field name or a display name it cannot parse an exception is thrown and a useful error message is printed.  Complicated field names involving arbitrary latex and text is now possible.
affected #:  2 files

diff -r 5087f6769726a9527b508470eb1b904bfe6beacd -r cc17d1b6a2f976688ba07ab333e013becb85f097 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -76,6 +76,30 @@
     def __str__(self):
         return "Simulation time-series type %s not defined." % self.sim_type
 
+class YTCannotParseFieldDisplayName(YTException):
+    def __init__(self, field_name, display_name, mathtext_error):
+        self.field_name = field_name
+        self.display_name = display_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+                % (self.display_name, self.field_name) + self.mathtext_error
+
+class YTCannotParseUnitDisplayName(YTException):
+    def __init__(self, field_name, display_unit, mathtext_error):
+        self.field_name = field_name
+        self.unit_name = unit_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The unit display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+            % (self.unit_name, self.field_name) + self.mathtext_error
+
 class AmbiguousOutputs(YTException):
     def __init__(self, pf):
         YTException.__init__(self, pf)


diff -r 5087f6769726a9527b508470eb1b904bfe6beacd -r cc17d1b6a2f976688ba07ab333e013becb85f097 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,6 +26,8 @@
 """
 import base64
 import matplotlib.figure
+from matplotlib.mathtext import MathTextParser
+from matplotlib.pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__
@@ -738,16 +740,27 @@
             self.plots[f].axes.set_ylabel(labels[1])
 
             field_name = self.data_source.pf.field_info[f].display_name
-            # If the field author has passed us formatted mathtext, leave it alone
-            if field_name[0] == '$':
-                label = field_name[:-1]
+
+            if field_name is None:
+                field_name = r'$\rm{'+f+r'}$'
+            elif field_name.find('$') == -1:
+                field_name = r'$\rm{'+field_name+r'}$'
+            
+            parser = MathTextParser('Agg')
+            try:
+                parser.parse(field_name)
+            except ParseFatalException, err:
+                raise YTCannotParseFieldDisplayName(f,field_name,str(err))
+
+            try:
+                parser.parse(r'$'+md['units']+r'$')
+            except ParseFatalException, err:
+                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
+
+            if md['units'] == None or md['units'] == '':
+                label = field_name
             else:
-                label = r'$\rm{'+field_name+r'}'
-            if field_name is None: field_name = f
-            if md['units'] == None or md['units'] == '':
-                label += r'$'
-            else:
-                label += r'\/\/('+md['units']+r')$'
+                label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/cc78ebee2f1f/
changeset:   cc78ebee2f1f
branch:      yt
user:        ngoldbaum
date:        2012-09-19 01:25:39
summary:     Fixing the way 2D data is loaded.  Need to be a bit more careful for 2D AMR simulations.
affected #:  2 files

diff -r 2a6872ca2489c0e8cd38fd5fecc9738b03fefc52 -r cc78ebee2f1f9ab46f73fa00ca7698fac397a4f0 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,6 +210,8 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property


diff -r 2a6872ca2489c0e8cd38fd5fecc9738b03fefc52 -r cc78ebee2f1f9ab46f73fa00ca7698fac397a4f0 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -148,8 +148,8 @@
 
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = np.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/28ca834d247d/
changeset:   28ca834d247d
branch:      yt
user:        ngoldbaum
date:        2012-09-20 19:58:50
summary:     Merging
affected #:  116 files

diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -20,7 +20,7 @@
     setenv YT_DEST
 endif
 set _OLD_VIRTUAL_YT_DEST="$YT_DEST"
-setenv YT_DEST "${VIRTUAL_ENV}:${YT_DEST}"
+setenv YT_DEST "${VIRTUAL_ENV}"
 
 if ($?PYTHONPATH == 0) then
     setenv PYTHONPATH


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -18,7 +18,7 @@
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
    "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "na.multiply(a, 3, a)".
+   be "np.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
  * When writing a subclass, use the super built-in to access the super class,
@@ -40,8 +40,7 @@
 
    from yt.visualization.plot_collection import PlotCollection
 
- * Numpy is to be imported as "na" not "np".  While this may change in the
-   future, for now this is the correct idiom.
+ * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword
    arguments, then you are doing too much in __init__ and not enough via
    parameter setting.


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def voigt(a,u):
     """
@@ -65,15 +65,15 @@
             J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
                       Sep 1990 (better overflow checking)
     """
-    x = na.asarray(u).astype(na.float64)
-    y = na.asarray(a).astype(na.float64)
+    x = np.asarray(u).astype(np.float64)
+    y = np.asarray(a).astype(np.float64)
 
-    w = na.array([0.462243670,   0.286675505,   0.109017206, 
+    w = np.array([0.462243670,   0.286675505,   0.109017206, 
                   0.0248105209,  0.00324377334, 0.000228338636, 
                   7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
                   2.22939365e-13])
 
-    t = na.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
+    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
                   2.25497400,  2.78880606,  3.34785457, 3.94476404, 
                   4.60368245,  5.38748089])
 
@@ -94,31 +94,31 @@
     y2 = y * y
 
     # limits are y<1.,  x<4 or y<1.8(x+1),  x>4 (no checking performed)
-    u1 = na.exp(-x * x + y2) * na.cos(2. * x * y)
+    u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
 
     # Clenshaw's Algorithm
-    bno1 = na.zeros(x.shape)
-    bno2 = na.zeros(x.shape)
-    x1 = na.clip((x / 5.), -na.inf, 1.)
+    bno1 = np.zeros(x.shape)
+    bno2 = np.zeros(x.shape)
+    x1 = np.clip((x / 5.), -np.inf, 1.)
     coef = 4. * x1 * x1 - 2.
     for i in range(33, -1, -1):
         bn = coef * bno1 - bno2 + c[i]
-        bno2 = na.copy(bno1)
-        bno1 = na.copy(bn)
+        bno2 = np.copy(bno1)
+        bno1 = np.copy(bn)
 
     f = x1 * (bn - bno2)
     dno1 = 1. - 2. * x * f
     dno2 = f
 
-    q = na.abs(x) > 5
+    q = np.abs(x) > 5
     if q.any():
-        x14 = na.power(na.clip(x[q], -na.inf, 500.),  14)
-        x12 = na.power(na.clip(x[q], -na.inf, 1000.), 12)
-        x10 = na.power(na.clip(x[q], -na.inf, 5000.), 10)
-        x8  = na.power(na.clip(x[q], -na.inf, 50000.), 8)
-        x6  = na.power(na.clip(x[q], -na.inf, 1.e6),   6)
-        x4  = na.power(na.clip(x[q], -na.inf, 1.e9),   4)
-        x2  = na.power(na.clip(x[q], -na.inf, 1.e18),  2)
+        x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
+        x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
+        x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
+        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
         dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
@@ -135,12 +135,12 @@
             if (i % 2) == 1:
                 q = -q
                 yn = yn * y2
-                g = dn.astype(na.float64) * yn
+                g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if na.max(na.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8: break
 
     k1 = u1 - 1.12837917 * funct
-    k1 = k1.astype(na.float64).clip(0)
+    k1 = k1.astype(np.float64).clip(0)
     return k1
 
 def tau_profile(lam0, fval, gamma, vkms, column_density, 
@@ -191,19 +191,19 @@
     ## create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
-            na.arange(n_lambda, dtype=na.float) * dlambda - \
+            np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2    # wavelength vector (angstroms)
     nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
 
     ## tau_0
-    tau_X = na.sqrt(na.pi) * e**2 / (me * ccgs) * \
+    tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
     tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq
     x = (nua - nu1) / nudop
-    a = gamma / (4 * na.pi * nudop)   # damping parameter 
+    a = gamma / (4 * np.pi * nudop)   # damping parameter 
     phi = voigt(a, x)                 # profile
     tauphi = tau0 * phi               # profile scaled with tau0
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from absorption_line import tau_profile
 
@@ -48,7 +48,7 @@
         self.tau_field = None
         self.flux_field = None
         self.spectrum_line_list = None
-        self.lambda_bins = na.linspace(lambda_min, lambda_max, n_lambda)
+        self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
         self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
         self.line_list = []
         self.continuum_list = []
@@ -114,13 +114,13 @@
             field_data[field] = input[field].value
         input.close()
 
-        self.tau_field = na.zeros(self.lambda_bins.size)
+        self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
-        self.flux_field = na.exp(-self.tau_field)
+        self.flux_field = np.exp(-self.tau_field)
 
         if output_file.endswith('.h5'):
             self._write_spectrum_hdf5(output_file)
@@ -148,20 +148,20 @@
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = na.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
-            left_index = na.digitize((this_wavelength *
-                                     na.power((tau_min * continuum['normalization'] /
+            right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
+            left_index = np.digitize((this_wavelength *
+                                     np.power((tau_min * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
-            valid_continuua = na.where(((column_density /
+            valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > tau_min) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
-                line_tau = na.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
+                line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
                                      this_wavelength[lixel]), continuum['index']) * \
                                      column_density[lixel] / continuum['normalization']
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
@@ -184,10 +184,10 @@
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
-            thermal_b = km_per_cm * na.sqrt((2 * boltzmann_constant_cgs *
+            thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['Temperature']) /
                                             (amu_cgs * line['atomic_mass']))
-            center_bins = na.digitize((delta_lambda + line['wavelength']),
+            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                       self.lambda_bins)
 
             # ratio of line width to bin width
@@ -201,7 +201,7 @@
                            spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
 
             # loop over all lines wider than the bin width
-            valid_lines = na.where((width_ratio >= 1.0) &
+            valid_lines = np.where((width_ratio >= 1.0) &
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 
 from yt.utilities.linear_interpolators import \
@@ -44,13 +44,13 @@
     mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
     if center is None: center = pf.h.find_max("Density")[1]
     fields = ensure_list(fields)
-    r,theta,phi = na.mgrid[0:rmax:nr*1j,
-                           0:na.pi:ntheta*1j,
-                           0:2*na.pi:nphi*1j]
+    r,theta,phi = np.mgrid[0:rmax:nr*1j,
+                           0:np.pi:ntheta*1j,
+                           0:2*np.pi:nphi*1j]
     new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*na.sin(theta)*na.cos(phi) + center[0]
-    new_grid['y'] = r*na.sin(theta)*na.sin(phi) + center[1]
-    new_grid['z'] = r*na.cos(theta)             + center[2]
+    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
+    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
+    new_grid['z'] = r*np.cos(theta)             + center[2]
     sphere = pf.h.sphere(center, rmax)
     return arbitrary_regrid(new_grid, sphere, fields, smoothed)
 
@@ -62,10 +62,10 @@
     This has not been well-tested other than for regular spherical regridding.
     """
     fields = ensure_list(fields)
-    new_grid['handled'] = na.zeros(new_grid['x'].shape, dtype='bool')
+    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
-        new_grid[field] = na.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = na.argsort(data_source.gridLevels)
+        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
+    grid_order = np.argsort(data_source.gridLevels)
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):
@@ -73,12 +73,12 @@
         cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
 
         # makes x0,x1,y0,y1,z0,z1
-        bounds = na.concatenate(zip(cg.left_edge, cg.right_edge)) 
+        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
 
         
         # Now we figure out which of our points are inside this grid
         # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = na.ones(new_grid['handled'].shape, dtype='bool') # everything at first
+        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
         for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
             # &= does a logical_and on the array
             point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
@@ -116,7 +116,7 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  na.log10(sph_grid[field][:,i,:]))
+                  np.log10(sph_grid[field][:,i,:]))
     pylab.savefig("polar/latitude_%03i.png" % i)
 
 for i in range(n_phi):
@@ -124,6 +124,6 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  na.log10(sph_grid[field][:,:,i]))
+                  np.log10(sph_grid[field][:,:,i]))
     pylab.savefig("polar/longitude_%03i.png" % i)
 """


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.convenience import \
     simulation
@@ -132,12 +132,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (na.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.fabs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:na.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -146,7 +146,7 @@
                     current_slice = cosmology_splice[-1]
                     while current_slice['next'] is not None and \
                             (z < current_slice['next']['redshift'] or \
-                                 na.abs(z - current_slice['next']['redshift']) <
+                                 np.abs(z - current_slice['next']['redshift']) <
                                  z_Tolerance):
                         current_slice = current_slice['next']
 
@@ -164,7 +164,7 @@
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:na.fabs(far_redshift -
+            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
                                                                     obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
@@ -246,9 +246,9 @@
         outputs = []
 
         while z > near_redshift:
-            rounded = na.round(z, decimals=decimals)
+            rounded = np.round(z, decimals=decimals)
             if rounded - z < 0:
-                rounded += na.power(10.0, (-1.0*decimals))
+                rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
@@ -289,7 +289,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -299,9 +299,9 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
-            output['deltazMax'] = na.fabs(z2 - z)
+            output['deltazMax'] = np.fabs(z2 - z)
 
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
@@ -329,7 +329,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -339,10 +339,10 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(na.fabs(z2 - z), deltaz_min)
+            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -364,7 +364,7 @@
         distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
           self.cosmology.HubbleConstantNow / 100.0
 
-        while ((na.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
@@ -374,6 +374,6 @@
             iteration += 1
             if (iteration > max_Iterations):
                 mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, na.fabs(z2 - z)))
+                            (z, np.fabs(z2 - z)))
                 break
-        return na.fabs(z2 - z)
+        return np.fabs(z2 - z)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
@@ -24,25 +24,25 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def common_volume(n_cube_1, n_cube_2, periodic=None):
     "Return the n-volume in common between the two n-cubes."
 
     # Check for proper args.
-    if ((len(na.shape(n_cube_1)) != 2) or
-        (na.shape(n_cube_1)[1] != 2) or
-        (na.shape(n_cube_1) != na.shape(n_cube_2))):
+    if ((len(np.shape(n_cube_1)) != 2) or
+        (np.shape(n_cube_1)[1] != 2) or
+        (np.shape(n_cube_1) != np.shape(n_cube_2))):
         print "Arguments must be 2 (n, 2) numpy array."
         return 0
 
     if ((periodic is not None) and
-        (na.shape(n_cube_1) != na.shape(periodic))):
+        (np.shape(n_cube_1) != np.shape(periodic))):
         print "periodic argument must be (n, 2) numpy array."
         return 0
 
     nCommon = 1.0
-    for q in range(na.shape(n_cube_1)[0]):
+    for q in range(np.shape(n_cube_1)[0]):
         if (periodic is None):
             nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
         else:
@@ -97,10 +97,10 @@
             return min(flen1, flen2)
 
         # Adjust for periodicity
-        seg1[0] = na.mod(seg1[0], scale) + periodic[0]
+        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
         seg1[1] = seg1[0] + len1
         if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = na.mod(seg2[0], scale) + periodic[0]
+        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
         seg2[1] = seg2[0] + len2
         if (seg2[1] > periodic[1]): seg2[1] -= scale
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.analysis_modules.halo_profiler.api import \
@@ -77,7 +77,7 @@
 
     # Write out cube of masks from each slice.
     if cube_file is not None:
-        _write_halo_mask(cube_file, na.array(light_cone_mask))
+        _write_halo_mask(cube_file, np.array(light_cone_mask))
 
     # Write out a text list of all halos in the image.
     if map_file is not None:
@@ -86,7 +86,7 @@
     # Write out final mask.
     if mask_file is not None:
         # Final mask is simply the product of the mask from each slice.
-        final_mask = na.ones(shape=(pixels, pixels))
+        final_mask = np.ones(shape=(pixels, pixels))
         for mask in light_cone_mask:
             final_mask *= mask
         _write_halo_mask(mask_file, final_mask)
@@ -103,7 +103,7 @@
     output = h5py.File(filename, 'a')
     if 'HaloMask' in output.keys():
         del output['HaloMask']
-    output.create_dataset('HaloMask', data=na.array(halo_mask))
+    output.create_dataset('HaloMask', data=np.array(halo_mask))
     output.close()
 
 @parallel_root_only
@@ -155,21 +155,21 @@
     # Make boolean mask and cut out halos.
     dx = slice['box_width_fraction'] / pixels
     x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = na.ones(shape=(pixels, pixels), dtype=bool)
+    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
 
     # Cut out any pixel that has any part at all in the circle.
     for q in range(len(all_halo_radius)):
-        dif_xIndex = na.array(int(all_halo_x[q]/dx) -
-                              na.array(range(pixels))) != 0
-        dif_yIndex = na.array(int(all_halo_y[q]/dx) -
-                              na.array(range(pixels))) != 0
+        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
+                              np.array(range(pixels))) != 0
+        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
+                              np.array(range(pixels))) != 0
 
-        xDistance = (na.abs(x - all_halo_x[q]) -
+        xDistance = (np.abs(x - all_halo_x[q]) -
                      (0.5 * dx)) * dif_xIndex
-        yDistance = (na.abs(x - all_halo_y[q]) -
+        yDistance = (np.abs(x - all_halo_y[q]) -
                      (0.5 * dx)) * dif_yIndex
 
-        distance = na.array([na.sqrt(w**2 + xDistance**2)
+        distance = np.array([np.sqrt(w**2 + xDistance**2)
                              for w in yDistance])
         haloMask *= (distance >= all_halo_radius[q])
 
@@ -231,11 +231,11 @@
                                Mpc_units)
             halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
 
-    halo_x = na.array(halo_x)
-    halo_y = na.array(halo_y)
-    halo_depth = na.array(halo_depth)
-    halo_radius = na.array(halo_radius)
-    halo_mass = na.array(halo_mass)
+    halo_x = np.array(halo_x)
+    halo_y = np.array(halo_y)
+    halo_depth = np.array(halo_depth)
+    halo_radius = np.array(halo_radius)
+    halo_mass = np.array(halo_mass)
 
     # Adjust halo centers along line of sight.
     depth_center = slice['projection_center'][slice['projection_axis']]
@@ -247,15 +247,15 @@
     add_left = (halo_depth + halo_radius) > 1 # should be box width
     add_right = (halo_depth - halo_radius) < 0
 
-    halo_depth = na.concatenate([halo_depth,
+    halo_depth = np.concatenate([halo_depth,
                                  (halo_depth[add_left]-1),
                                  (halo_depth[add_right]+1)])
-    halo_x = na.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = na.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = na.concatenate([halo_radius,
+    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
+    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
+    halo_radius = np.concatenate([halo_radius,
                                   halo_radius[add_left],
                                   halo_radius[add_right]])
-    halo_mass = na.concatenate([halo_mass,
+    halo_mass = np.concatenate([halo_mass,
                                 halo_mass[add_left],
                                 halo_mass[add_right]])
 
@@ -284,19 +284,19 @@
         del mask
     del halo_depth
 
-    all_halo_x = na.array([])
-    all_halo_y = na.array([])
-    all_halo_radius = na.array([])
-    all_halo_mass = na.array([])
+    all_halo_x = np.array([])
+    all_halo_y = np.array([])
+    all_halo_radius = np.array([])
+    all_halo_mass = np.array([])
 
     # Tile halos of width box fraction is greater than one.
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(slice['box_width_fraction']))):
-        for y in range(int(na.ceil(slice['box_width_fraction']))):
-            all_halo_x = na.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = na.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = na.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = na.concatenate([all_halo_mass, halo_mass])
+    for x in range(int(np.ceil(slice['box_width_fraction']))):
+        for y in range(int(np.ceil(slice['box_width_fraction']))):
+            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
+            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
+            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
+            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
 
     del halo_x, halo_y, halo_radius, halo_mass
 
@@ -310,8 +310,8 @@
 
     # Wrap off-edge centers back around to
     # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += na.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += na.ceil(slice['box_width_fraction'])
+    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
+    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
 
     # After shifting, some centers have fractional coverage
     # on both sides of the box.
@@ -319,9 +319,9 @@
 
     # Centers hanging off the right edge.
     add_x_right = all_halo_x + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= na.ceil(slice['box_width_fraction'])
+    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
     add_x_halo_y = all_halo_y[add_x_right]
     add_x_halo_radius = all_halo_radius[add_x_right]
     add_x_halo_mass = all_halo_mass[add_x_right]
@@ -330,7 +330,7 @@
     # Centers hanging off the left edge.
     add_x_left = all_halo_x - all_halo_radius < 0
     add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += na.ceil(slice['box_width_fraction'])
+    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
     add2_x_halo_y = all_halo_y[add_x_left]
     add2_x_halo_radius = all_halo_radius[add_x_left]
     add2_x_halo_mass = all_halo_mass[add_x_left]
@@ -338,10 +338,10 @@
 
     # Centers hanging off the top edge.
     add_y_right = all_halo_y + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_y_halo_x = all_halo_x[add_y_right]
     add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= na.ceil(slice['box_width_fraction'])
+    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
     add_y_halo_radius = all_halo_radius[add_y_right]
     add_y_halo_mass = all_halo_mass[add_y_right]
     del add_y_right
@@ -350,24 +350,24 @@
     add_y_left = all_halo_y - all_halo_radius < 0
     add2_y_halo_x = all_halo_x[add_y_left]
     add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += na.ceil(slice['box_width_fraction'])
+    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
     add2_y_halo_radius = all_halo_radius[add_y_left]
     add2_y_halo_mass = all_halo_mass[add_y_left]
     del add_y_left
 
     # Add the hanging centers back to the projection data.
-    all_halo_x = na.concatenate([all_halo_x,
+    all_halo_x = np.concatenate([all_halo_x,
                                  add_x_halo_x, add2_x_halo_x,
                                  add_y_halo_x, add2_y_halo_x])
-    all_halo_y = na.concatenate([all_halo_y,
+    all_halo_y = np.concatenate([all_halo_y,
                                  add_x_halo_y, add2_x_halo_y,
                                  add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = na.concatenate([all_halo_radius,
+    all_halo_radius = np.concatenate([all_halo_radius,
                                       add_x_halo_radius,
                                       add2_x_halo_radius,
                                       add_y_halo_radius,
                                       add2_y_halo_radius])
-    all_halo_mass = na.concatenate([all_halo_mass,
+    all_halo_mass = np.concatenate([all_halo_mass,
                                     add_x_halo_mass,
                                     add2_x_halo_mass,
                                     add_y_halo_mass,


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -198,7 +198,7 @@
 
         # Calculate projection sizes, and get
         # random projection axes and centers.
-        na.random.seed(self.original_random_seed)
+        np.random.seed(self.original_random_seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -250,9 +250,9 @@
                self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
                 # Random axis and center.
                 self.light_cone_solution[q]['projection_axis'] = \
-                  na.random.randint(0, 3)
+                  np.random.randint(0, 3)
                 self.light_cone_solution[q]['projection_center'] = \
-                  [na.random.random() for i in range(3)]
+                  [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
@@ -342,7 +342,7 @@
                                                    njobs=njobs,
                                                    dynamic=dynamic)
             # Collapse cube into final mask.
-            self.halo_mask = na.ones(shape=(self.pixels, self.pixels),
+            self.halo_mask = np.ones(shape=(self.pixels, self.pixels),
                                      dtype=bool)
             for mask in halo_mask_cube:
                 self.halo_mask *= mask
@@ -428,7 +428,7 @@
                 boxSizeProper = self.simulation.box_size / \
                   (self.simulation.hubble_constant * (1.0 + output['redshift']))
                 pixelarea = (boxSizeProper/self.pixels)**2 #in proper cm^2
-                factor = pixelarea/(4.0*na.pi*dL**2)
+                factor = pixelarea/(4.0*np.pi*dL**2)
                 mylog.info("Distance to slice = %e" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
@@ -461,7 +461,7 @@
                 else:
                     my_image = all_storage[my_slice]['field'] / \
                       all_storage[my_slice]['weight_field']
-                only_on_root(write_image, na.log10(my_image),
+                only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
             self.projection_stack.append(all_storage[my_slice]['field'])
@@ -491,7 +491,7 @@
 
         # Write image.
         if save_final_image:
-            only_on_root(write_image, na.log10(light_cone_projection),
+            only_on_root(write_image, np.log10(light_cone_projection),
                          "%s_%s.png" % (filename, field), cmap_name=cmap_name)
 
         # Write stack to hdf5 file.
@@ -561,7 +561,7 @@
         box_fraction_used = 0.0
 
         # Seed random number generator with new seed.
-        na.random.seed(int(new_seed))
+        np.random.seed(int(new_seed))
 
         for q, output in enumerate(self.light_cone_solution):
             # It is necessary to make the same number of calls to the random
@@ -578,9 +578,9 @@
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
-                newAxis = na.random.randint(0, 3)
+                newAxis = np.random.randint(0, 3)
 
-                newCenter = [na.random.random() for i in range(3)]
+                newCenter = [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice, but with depth center shifted.
@@ -600,8 +600,8 @@
             box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
 
             # Make list of rectangle corners to calculate common volume.
-            newCube = na.zeros(shape=(len(newCenter), 2))
-            oldCube = na.zeros(shape=(len(newCenter), 2))
+            newCube = np.zeros(shape=(len(newCenter), 2))
+            oldCube = np.zeros(shape=(len(newCenter), 2))
             for w in range(len(newCenter)):
                 if (w == self.master_solution[q]['projection_axis']):
                     oldCube[w] = [self.master_solution[q]['projection_center'][w] -
@@ -630,7 +630,7 @@
                                   0.5 * self.master_solution[q]['box_width_fraction']]
 
             my_volume += common_volume(oldCube, newCube,
-                                           periodic=na.array([[0, 1],
+                                           periodic=np.array([[0, 1],
                                                               [0, 1],
                                                               [0, 1]]))
             total_volume += output['box_depth_fraction'] * \
@@ -691,7 +691,7 @@
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
         # Make list of redshifts to include as a dataset attribute.
-        redshiftList = na.array([my_slice['redshift'] \
+        redshiftList = np.array([my_slice['redshift'] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
@@ -727,16 +727,16 @@
 
         if write_data:
             mylog.info("Saving %s to %s." % (field_node, filename))
-            self.projection_stack = na.array(self.projection_stack)
+            self.projection_stack = np.array(self.projection_stack)
             field_dataset = output.create_dataset(field_node,
                                                   data=self.projection_stack)
             field_dataset.attrs['redshifts'] = redshiftList
             field_dataset.attrs['observer_redshift'] = \
-              na.float(self.observer_redshift)
+              np.float(self.observer_redshift)
             field_dataset.attrs['field_of_view_in_arcminutes'] = \
-              na.float(self.field_of_view_in_arcminutes)
+              np.float(self.field_of_view_in_arcminutes)
             field_dataset.attrs['image_resolution_in_arcseconds'] = \
-              na.float(self.image_resolution_in_arcseconds)
+              np.float(self.image_resolution_in_arcseconds)
 
         if (len(self.projection_weight_field_stack) > 0):
             if node_exists:
@@ -754,16 +754,16 @@
             if write_data:
                 mylog.info("Saving %s to %s." % (weight_field_node, filename))
                 self.projection_weight_field_stack = \
-                  na.array(self.projection_weight_field_stack)
+                  np.array(self.projection_weight_field_stack)
                 weight_field_dataset = \
                   output.create_dataset(weight_field_node,
                                         data=self.projection_weight_field_stack)
                 weight_field_dataset.attrs['redshifts'] = redshiftList
                 weight_field_dataset.attrs['observer_redshift'] = \
-                  na.float(self.observer_redshift)
+                  np.float(self.observer_redshift)
                 weight_field_dataset.attrs['field_of_view_in_arcminutes'] = \
-                  na.float(self.field_of_view_in_arcminutes)
+                  np.float(self.field_of_view_in_arcminutes)
                 weight_field_dataset.attrs['image_resolution_in_arcseconds'] = \
-                  na.float(self.image_resolution_in_arcseconds)
+                  np.float(self.image_resolution_in_arcseconds)
 
         output.close()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -98,15 +98,15 @@
     original_weight_field = copy.deepcopy(proj['weight_field'])
 
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
-        for y in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
+    for x in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
+        for y in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
             if ((x + y) > 0):
-                proj['px'] = na.concatenate([proj['px'], original_px+x])
-                proj['py'] = na.concatenate([proj['py'], original_py+y])
-                proj['pdx'] = na.concatenate([proj['pdx'], original_pdx])
-                proj['pdy'] = na.concatenate([proj['pdy'], original_pdy])
-                proj[field] = na.concatenate([proj[field], original_field])
-                proj['weight_field'] = na.concatenate([proj['weight_field'],
+                proj['px'] = np.concatenate([proj['px'], original_px+x])
+                proj['py'] = np.concatenate([proj['py'], original_py+y])
+                proj['pdx'] = np.concatenate([proj['pdx'], original_pdx])
+                proj['pdy'] = np.concatenate([proj['pdy'], original_pdy])
+                proj[field] = np.concatenate([proj[field], original_field])
+                proj['weight_field'] = np.concatenate([proj['weight_field'],
                                                        original_weight_field])
 
     # Delete originals.
@@ -129,17 +129,17 @@
     proj['py'] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj['px'][proj['px'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
-    proj['py'][proj['py'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
+    proj['px'][proj['px'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
+    proj['py'][proj['py'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
     add_x_right = proj['px'] + 0.5 * proj['pdx'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_x_px = proj['px'][add_x_right]
-    add_x_px -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_x_px -= np.ceil(lightConeSlice['box_width_fraction'])
     add_x_py = proj['py'][add_x_right]
     add_x_pdx = proj['pdx'][add_x_right]
     add_x_pdy = proj['pdy'][add_x_right]
@@ -150,7 +150,7 @@
     # Cells hanging off the left edge.
     add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
     add2_x_px = proj['px'][add_x_left]
-    add2_x_px += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_x_px += np.ceil(lightConeSlice['box_width_fraction'])
     add2_x_py = proj['py'][add_x_left]
     add2_x_pdx = proj['pdx'][add_x_left]
     add2_x_pdy = proj['pdy'][add_x_left]
@@ -160,10 +160,10 @@
 
     # Cells hanging off the top edge.
     add_y_right = proj['py'] + 0.5 * proj['pdy'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_y_px = proj['px'][add_y_right]
     add_y_py = proj['py'][add_y_right]
-    add_y_py -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_y_py -= np.ceil(lightConeSlice['box_width_fraction'])
     add_y_pdx = proj['pdx'][add_y_right]
     add_y_pdy = proj['pdy'][add_y_right]
     add_y_field = proj[field][add_y_right]
@@ -174,7 +174,7 @@
     add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
     add2_y_px = proj['px'][add_y_left]
     add2_y_py = proj['py'][add_y_left]
-    add2_y_py += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_y_py += np.ceil(lightConeSlice['box_width_fraction'])
     add2_y_pdx = proj['pdx'][add_y_left]
     add2_y_pdy = proj['pdy'][add_y_left]
     add2_y_field = proj[field][add_y_left]
@@ -182,17 +182,17 @@
     del add_y_left
 
     # Add the hanging cells back to the projection data.
-    proj['px'] = na.concatenate([proj['px'], add_x_px, add_y_px,
+    proj['px'] = np.concatenate([proj['px'], add_x_px, add_y_px,
                                  add2_x_px, add2_y_px])
-    proj['py'] = na.concatenate([proj['py'], add_x_py, add_y_py,
+    proj['py'] = np.concatenate([proj['py'], add_x_py, add_y_py,
                                  add2_x_py, add2_y_py])
-    proj['pdx'] = na.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
+    proj['pdx'] = np.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
                                   add2_x_pdx, add2_y_pdx])
-    proj['pdy'] = na.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
+    proj['pdy'] = np.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
                                   add2_x_pdy, add2_y_pdy])
-    proj[field] = na.concatenate([proj[field], add_x_field, add_y_field,
+    proj[field] = np.concatenate([proj[field], add_x_field, add_y_field,
                                   add2_x_field, add2_y_field])
-    proj['weight_field'] = na.concatenate([proj['weight_field'],
+    proj['weight_field'] = np.concatenate([proj['weight_field'],
                                            add_x_weight_field, add_y_weight_field,
                                            add2_x_weight_field, add2_y_weight_field])
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
@@ -24,7 +24,7 @@
 """
 
 import copy
-import numpy as na
+import numpy as np
 import random as rand
 import sys
 
@@ -128,7 +128,7 @@
         rand.seed(seed)
         state = rand.getstate()
 
-    fail_digits = str(int(na.log10(failures))+1)
+    fail_digits = str(int(np.log10(failures))+1)
 
     while (len(unique_seeds) < solutions):
         # Create new random seed.
@@ -221,7 +221,7 @@
         mylog.error("Light cone solutions do not have equal volumes, will use the smaller one.")
 
     for q in range(len(solution1)):
-        cube1 = na.zeros(shape=(len(solution1[q]['projection_center']), 2))
+        cube1 = np.zeros(shape=(len(solution1[q]['projection_center']), 2))
         volume1 = 1.0
         for w in range(len(cube1)):
             if (w == solution1[q]['projection_axis']):
@@ -232,7 +232,7 @@
             cube1[w] = [solution1[q]['projection_center'][w] - 0.5 * width,
                         solution1[q]['projection_center'][w] + 0.5 * width]
 
-        cube2 = na.zeros(shape=(len(solution2[q]['projection_center']), 2))
+        cube2 = np.zeros(shape=(len(solution2[q]['projection_center']), 2))
         volume2 = 1.0
         for w in range(len(cube2)):
             if (w == solution2[q]['projection_axis']):
@@ -245,7 +245,7 @@
 
         total_volume += min(volume1, volume2)
         my_volume += common_volume(cube1, cube2,
-                                   periodic=na.array([[0, 1],
+                                   periodic=np.array([[0, 1],
                                                       [0, 1],
                                                       [0, 1]]))
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -124,7 +124,7 @@
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        na.random.seed(seed)
+        np.random.seed(seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -162,9 +162,9 @@
                     (box_fraction_used +
                      self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                 # Random start point
-                self.light_ray_solution[q]['start'] = na.random.random(3)
-                theta = na.pi * na.random.random()
-                phi = 2 * na.pi * na.random.random()
+                self.light_ray_solution[q]['start'] = np.random.random(3)
+                theta = np.pi * np.random.random()
+                phi = 2 * np.pi * np.random.random()
                 box_fraction_used = 0.0
             else:
                 # Use end point of previous segment and same theta and phi.
@@ -174,9 +174,9 @@
             self.light_ray_solution[q]['end'] = \
               self.light_ray_solution[q]['start'] + \
                 self.light_ray_solution[q]['traversal_box_fraction'] * \
-                na.array([na.cos(phi) * na.sin(theta),
-                          na.sin(phi) * na.sin(theta),
-                          na.cos(theta)])
+                np.array([np.cos(phi) * np.sin(theta),
+                          np.sin(phi) * np.sin(theta),
+                          np.cos(theta)])
             box_fraction_used += \
               self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -365,30 +365,30 @@
             sub_data = {}
             sub_data['segment_redshift'] = my_segment['redshift']
             for field in all_fields:
-                sub_data[field] = na.array([])
+                sub_data[field] = np.array([])
 
             # Get data for all subsegments in segment.
             for sub_segment in sub_segments:
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = pf.h.ray(sub_segment[0], sub_segment[1])
-                sub_data['dl'] = na.concatenate([sub_data['dl'],
+                sub_data['dl'] = np.concatenate([sub_data['dl'],
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
                 for field in fields:
-                    sub_data[field] = na.concatenate([sub_data[field],
+                    sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = na.array([sub_ray['x-velocity'],
+                    sub_vel = np.array([sub_ray['x-velocity'],
                                         sub_ray['y-velocity'],
                                         sub_ray['z-velocity']])
                     sub_data['los_velocity'] = \
-                      na.concatenate([sub_data['los_velocity'],
-                                      (na.rollaxis(sub_vel, 1) *
+                      np.concatenate([sub_data['los_velocity'],
+                                      (np.rollaxis(sub_vel, 1) *
                                        line_of_sight).sum(axis=1)])
                     del sub_vel
 
@@ -470,20 +470,20 @@
         if fields is None: fields = []
 
         # Create position array from halo list.
-        halo_centers = na.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, na.array(map(lambda halo: halo[field],
+        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
+        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
                                                        halo_list))) \
                                   for field in fields])
 
-        nearest_distance = na.zeros(data['x'].shape)
-        field_data = dict([(field, na.zeros(data['x'].shape)) \
+        nearest_distance = np.zeros(data['x'].shape)
+        field_data = dict([(field, np.zeros(data['x'].shape)) \
                            for field in fields])
         for index in xrange(nearest_distance.size):
-            nearest = na.argmin(periodic_distance(na.array([data['x'][index],
+            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
                                                             data['y'][index],
                                                             data['z'][index]]),
                                                   halo_centers))
-            nearest_distance[index] = periodic_distance(na.array([data['x'][index],
+            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
                                                                   data['y'][index],
                                                                   data['z'][index]]),
                                                         halo_centers[nearest])
@@ -532,41 +532,41 @@
         for field in [field for field in datum.keys()
                       if field not in exceptions]:
             if field in new_data:
-                new_data[field] = na.concatenate([new_data[field], datum[field]])
+                new_data[field] = np.concatenate([new_data[field], datum[field]])
             else:
-                new_data[field] = na.copy(datum[field])
+                new_data[field] = np.copy(datum[field])
     return new_data
 
 def vector_length(start, end):
     "Calculate vector length."
 
-    return na.sqrt(na.power((end - start), 2).sum())
+    return np.sqrt(np.power((end - start), 2).sum())
 
 def periodic_distance(coord1, coord2):
     "Calculate length of shortest vector between to points in periodic domain."
     dif = coord1 - coord2
 
-    dim = na.ones(coord1.shape,dtype=int)
+    dim = np.ones(coord1.shape,dtype=int)
     def periodic_bind(num):
-        pos = na.abs(num % dim)
-        neg = na.abs(num % -dim)
-        return na.min([pos,neg],axis=0)
+        pos = np.abs(num % dim)
+        neg = np.abs(num % -dim)
+        return np.min([pos,neg],axis=0)
 
     dif = periodic_bind(dif)
-    return na.sqrt((dif * dif).sum(axis=-1))
+    return np.sqrt((dif * dif).sum(axis=-1))
 
 def periodic_ray(start, end, left=None, right=None):
     "Break up periodic ray into non-periodic segments."
 
     if left is None:
-        left = na.zeros(start.shape)
+        left = np.zeros(start.shape)
     if right is None:
-        right = na.ones(start.shape)
+        right = np.ones(start.shape)
     dim = right - left
 
     vector = end - start
-    wall = na.zeros(start.shape)
-    close = na.zeros(start.shape, dtype=object)
+    wall = np.zeros(start.shape)
+    close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
     right_bound = vector > 0
@@ -574,15 +574,15 @@
     bound = vector != 0.0
 
     wall[left_bound] = left[left_bound]
-    close[left_bound] = na.max
+    close[left_bound] = np.max
     wall[right_bound] = right[right_bound]
-    close[right_bound] = na.min
-    wall[no_bound] = na.inf
-    close[no_bound] = na.min
+    close[right_bound] = np.min
+    wall[no_bound] = np.inf
+    close[no_bound] = np.min
 
     segments = []
-    this_start = na.copy(start)
-    this_end = na.copy(end)
+    this_start = np.copy(start)
+    this_end = np.copy(end)
     t = 0.0
     tolerance = 1e-6
 
@@ -596,14 +596,14 @@
             this_start[hit_right] -= dim[hit_right]
             this_end[hit_right] -= dim[hit_right]
 
-        nearest = na.array([close[q]([this_end[q], wall[q]]) \
+        nearest = np.array([close[q]([this_end[q], wall[q]]) \
                                 for q in range(start.size)])
         dt = ((nearest - this_start) / vector)[bound].min()
         now = this_start + vector * dt
-        close_enough = na.abs(now - nearest) < 1e-10
+        close_enough = np.abs(now - nearest) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([na.copy(this_start), na.copy(now)])
-        this_start = na.copy(now)
+        segments.append([np.copy(this_start), np.copy(now)])
+        this_start = np.copy(now)
         t += dt
 
     return segments


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -31,7 +31,7 @@
 import h5py
 import itertools
 import math
-import numpy as na
+import numpy as np
 import random
 import sys
 import os.path as path
@@ -123,13 +123,13 @@
         cy = self["particle_position_y"]
         cz = self["particle_position_z"]
         if isinstance(self, FOFHalo):
-            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
         else:
             c_vec = self.maximum_density_location() - self.pf.domain_center
         cx = (cx - c_vec[0])
         cy = (cy - c_vec[1])
         cz = (cz - c_vec[2])
-        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
         return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
     def maximum_density(self):
@@ -158,7 +158,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[1:]
-        return na.array([
+        return np.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
                 self._max_dens[self.id][3]])
@@ -193,7 +193,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return na.array([vx, vy, vz]) / pm.sum()
+        return np.array([vx, vy, vz]) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -216,8 +216,8 @@
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
         vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
         s = vx ** 2. + vy ** 2. + vz ** 2.
-        ms = na.mean(s)
-        return na.sqrt(ms) * pm.size
+        ms = np.mean(s)
+        return np.sqrt(ms) * pm.size
 
     def maximum_radius(self, center_of_mass=True):
         r"""Returns the maximum radius in the halo for all particles,
@@ -246,13 +246,13 @@
             center = self.center_of_mass()
         else:
             center = self.maximum_density_location()
-        rx = na.abs(self["particle_position_x"] - center[0])
-        ry = na.abs(self["particle_position_y"] - center[1])
-        rz = na.abs(self["particle_position_z"] - center[2])
+        rx = np.abs(self["particle_position_x"] - center[0])
+        ry = np.abs(self["particle_position_y"] - center[1])
+        rz = np.abs(self["particle_position_z"] - center[2])
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
-                + na.minimum(ry, DW[1] - ry) ** 2.0
-                + na.minimum(rz, DW[2] - rz) ** 2.0)
+        r = np.sqrt(np.minimum(rx, DW[0] - rx) ** 2.0
+                + np.minimum(ry, DW[1] - ry) ** 2.0
+                + np.minimum(rz, DW[2] - rz) ** 2.0)
         return r.max()
 
     def __getitem__(self, key):
@@ -393,7 +393,7 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins + 1)[over])
+            vir_bin = max(np.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
@@ -419,8 +419,8 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        dist = na.empty(thissize, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
         mark = 0
         # Find the distances to the particles. I don't like this much, but I
@@ -432,15 +432,15 @@
             mark += 1
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(min(dist) * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(min(dist) * .99 + TINY),
             math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1)
         # Find out which bin each particle goes into, and add the particle
         # mass to that bin.
-        inds = na.digitize(dist, self.radial_bins) - 1
+        inds = np.digitize(dist, self.radial_bins) - 1
         if self["particle_position_x"].size > 1:
-            for index in na.unique(inds):
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                na.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["ParticleMassMsun"][inds == index])
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -450,12 +450,12 @@
         (self.radial_bins * cm)**3.0)
         
     def _get_ellipsoid_parameters_basic(self):
-        na.seterr(all='ignore')
+        np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
         # neglecting to check if the 4 particles in the same plane,
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
-        if na.size(self["particle_position_x"]) < 4:
+        if np.size(self["particle_position_x"]) < 4:
             mylog.warning("Too few particles for ellipsoid parameters.")
             return (0, 0, 0, 0, 0, 0, 0)
         # Calculate the parameters that describe the ellipsoid of
@@ -466,19 +466,19 @@
 		    self["particle_position_y"],
 		    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = na.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
 	position = [position[0] - com[0],
 		    position[1] - com[1],
 		    position[2] - com[2]]
 	# different cases of particles being on other side of boundary
-	for axis in range(na.size(DW)):
-	    cases = na.array([position[axis],
+	for axis in range(np.size(DW)):
+	    cases = np.array([position[axis],
 	  		      position[axis] + DW[axis],
 			      position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
-            position[axis] = na.choose(na.abs(cases).argmin(axis=0), cases)
+            position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
 	# find the furthest particle's index
-	r = na.sqrt(position[0]**2 +
+	r = np.sqrt(position[0]**2 +
 		    position[1]**2 +
 		    position[2]**2)
         A_index = r.argmax()
@@ -490,24 +490,24 @@
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = na.empty((na.size(position[0]), 3), dtype='float64')
+	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
-        rr = na.array([position[0],
+        rr = np.array([position[0],
 		       position[1],
 		       position[2]]).T # Similar to tB_vector in old code.
-        tC_vector = na.cross(e0_vector_copy, rr)
+        tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
-            te2[:,dim] *= na.sum(tC_vector**2., axis = 1)**(-0.5)
-        te1 = na.cross(te2, e0_vector_copy)
-        length = na.abs(-na.sum(rr * te1, axis = 1) * \
-            (1. - na.sum(rr * e0_vector_copy, axis = 1)**2. * \
+            te2[:,dim] *= np.sum(tC_vector**2., axis = 1)**(-0.5)
+        te1 = np.cross(te2, e0_vector_copy)
+        length = np.abs(-np.sum(rr * te1, axis = 1) * \
+            (1. - np.sum(rr * e0_vector_copy, axis = 1)**2. * \
             mag_A**-2.)**(-0.5))
         # This problem apparently happens sometimes, that the NaNs are turned
         # into infs, which messes up the nanargmax below.
-        length[length == na.inf] = 0.
-        tB_index = na.nanargmax(length) # ignores NaNs created above.
+        length[length == np.inf] = 0.
+        tB_index = np.nanargmax(length) # ignores NaNs created above.
         mag_B = length[tB_index]
         e1_vector = te1[tB_index]
         e2_vector = te2[tB_index]
@@ -518,24 +518,24 @@
             temp_e0[:,dim] = e0_vector[dim]
             temp_e1[:,dim] = e1_vector[dim]
             temp_e2[:,dim] = e2_vector[dim]
-        length = na.abs(na.sum(rr * temp_e2, axis = 1) * (1 - \
-            na.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
-            na.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
-        length[length == na.inf] = 0.
-        tC_index = na.nanargmax(length)
+        length = np.abs(np.sum(rr * temp_e2, axis = 1) * (1 - \
+            np.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
+            np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+        length[length == np.inf] = 0.
+        tC_index = np.nanargmax(length)
         mag_C = length[tC_index]
         # tilt is calculated from the rotation about x axis
         # needed to align e1 vector with the y axis
         # after e0 is aligned with x axis
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0_vector[1] / e0_vector[0])
+        t1 = np.arctan(e0_vector[1] / e0_vector[0])
         RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
         r1 = (e0_vector * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
-        r2 = na.dot(RY, na.dot(RZ, e1_vector))
-        tilt = na.arctan(r2[2]/r2[1])
+        r2 = np.dot(RY, np.dot(RZ, e1_vector))
+        tilt = np.arctan(r2[2]/r2[1])
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
@@ -572,11 +572,11 @@
 
         #Halo.__init__(self,halo_list,index,
         self.size=Np 
-        self.CoM=na.array([X,Y,Z])
+        self.CoM=np.array([X,Y,Z])
         self.max_dens_point=-1
         self.group_total_mass=-1
         self.max_radius=Rvir
-        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.bulk_vel=np.array([VX,VY,VZ])*1e5
         self.rms_vel=-1
         self.group_total_mass = -1 #not implemented 
     
@@ -651,7 +651,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -704,7 +704,7 @@
         self.bin_count = bins
         period = self.data.pf.domain_right_edge - \
             self.data.pf.domain_left_edge
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
         cen = self.center_of_mass()
         # Cosmology
         h = self.data.pf.hubble_constant
@@ -716,7 +716,7 @@
         # If I own some of this halo operate on the particles.
         if self.indices is not None:
             # Get some pertinent information about the halo.
-            dist = na.empty(self.indices.size, dtype='float64')
+            dist = np.empty(self.indices.size, dtype='float64')
             mark = 0
             # Find the distances to the particles.
             # I don't like this much, but I
@@ -737,15 +737,15 @@
         dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(dist_min * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(dist_min * .99 + TINY),
             math.log10(dist_max * 1.01 + 2 * TINY), num=self.bin_count + 1)
         if self.indices is not None and self.indices.size > 1:
             # Find out which bin each particle goes into, and add the particle
             # mass to that bin.
-            inds = na.digitize(dist, self.radial_bins) - 1
-            for index in na.unique(inds):
+            inds = np.digitize(dist, self.radial_bins) - 1
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    na.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["ParticleMassMsun"][inds == index])
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -831,7 +831,7 @@
         self.saved_fields = {}
         self.particle_mask = None
         self.ds_sort = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -871,7 +871,7 @@
                     # The result of searchsorted is an array with the positions
                     # of the indexes in pid as they are in sp_pid. This is
                     # because each element of pid is in sp_pid only once.
-                    self.particle_mask = na.searchsorted(sp_pid, pid)
+                    self.particle_mask = np.searchsorted(sp_pid, pid)
                 # We won't store this field below in saved_fields because
                 # that would mean keeping two copies of it, one in the yt
                 # machinery and one here.
@@ -890,9 +890,9 @@
             return None
         elif field == 'particle_index' or field == 'particle_type':
             # the only integer field
-            field_data = na.empty(size, dtype='int64')
+            field_data = np.empty(size, dtype='int64')
         else:
-            field_data = na.empty(size, dtype='float64')
+            field_data = np.empty(size, dtype='float64')
         f.close()
         # Apparently, there's a bug in h5py that was keeping the file pointer
         # f closed, even though it's re-opened below. This del seems to fix
@@ -943,7 +943,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -1025,7 +1025,7 @@
         self.tilt = tilt
         self.bin_count = None
         self.overdensity = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -1084,7 +1084,7 @@
                 self.particle_fields[field] = \
                     self._data_source[field][ii].astype('float64')
             del self._data_source[field]
-        self._base_indices = na.arange(tot_part)[ii]
+        self._base_indices = np.arange(tot_part)[ii]
         gc.collect()
 
     def _get_dm_indices(self):
@@ -1099,10 +1099,10 @@
             return slice(None)
 
     def _parse_output(self):
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount(self.tags + 1)
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount(self.tags + 1)
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         dens = self.densities[sort_indices]
         cp = 0
         for i in unique_ids:
@@ -1112,7 +1112,7 @@
                 continue
             group_indices = grab_indices[cp:cp_c]
             self._groups.append(self._halo_class(self, i, group_indices))
-            md_i = na.argmax(dens[cp:cp_c])
+            md_i = np.argmax(dens[cp:cp_c])
             px, py, pz = \
                 [self.particle_fields['particle_position_%s' % ax][group_indices]
                                             for ax in 'xyz']
@@ -1201,7 +1201,7 @@
         """
         # Set up a vector to multiply other
         # vectors by to project along proj_dim
-        vec = na.array([1., 1., 1.])
+        vec = np.array([1., 1., 1.])
         vec[proj_dim] = 0.
         period = self.pf.domain_right_edge - self.pf.domain_left_edge
         period = period * vec
@@ -1367,9 +1367,9 @@
         splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
         for num in splits:
             if 'nan' not in num:
-                formats += na.array(eval(num)).dtype,
+                formats += np.array(eval(num)).dtype,
             else:
-                formats += na.dtype('float'),
+                formats += np.dtype('float'),
         assert len(formats) == len(names)
 
         #Jc = 1.98892e33/pf['mpchcm']*1e5
@@ -1384,7 +1384,7 @@
                     Rs=1.0/pf['kpchcm'],
                     JX=Jc,JY=Jc,JZ=Jc)
         dtype = {'names':names,'formats':formats}
-        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
         #convert position units  
         for name in names:
             halo_table[name]=halo_table[name]*conv.get(name,1)
@@ -1470,7 +1470,7 @@
                self.particle_fields["particle_position_y"] / self.period[1],
                self.particle_fields["particle_position_z"] / self.period[2],
                self.link)
-        self.densities = na.ones(self.tags.size, dtype='float64') * -1
+        self.densities = np.ones(self.tags.size, dtype='float64') * -1
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
@@ -1518,12 +1518,12 @@
             size = int(line[2])
             fnames = locations[halo]
             # Everything else
-            CoM = na.array([float(line[7]), float(line[8]), float(line[9])])
-            max_dens_point = na.array([float(line[3]), float(line[4]),
+            CoM = np.array([float(line[7]), float(line[8]), float(line[9])])
+            max_dens_point = np.array([float(line[3]), float(line[4]),
                 float(line[5]), float(line[6])])
             group_total_mass = float(line[1])
             max_radius = float(line[13])
-            bulk_vel = na.array([float(line[10]), float(line[11]),
+            bulk_vel = np.array([float(line[10]), float(line[11]),
                 float(line[12])])
             rms_vel = float(line[14])
             if len(line) == 15:
@@ -1541,7 +1541,7 @@
                 e1_vec0 = float(line[18])
                 e1_vec1 = float(line[19])
                 e1_vec2 = float(line[20])
-                e1_vec = na.array([e1_vec0, e1_vec1, e1_vec2])
+                e1_vec = np.array([e1_vec0, e1_vec1, e1_vec2])
                 tilt = float(line[21])
                 self._groups.append(LoadedHalo(self.pf, halo, size = size,
                     CoM = CoM,
@@ -1596,7 +1596,7 @@
             y = float(line[columns['y']])
             z = float(line[columns['z']])
             r = float(line[columns['r']])
-            cen = na.array([x, y, z])
+            cen = np.array([x, y, z])
             # Now we see if there's anything else.
             if extra:
                 temp_dict = {}
@@ -1631,7 +1631,7 @@
         self.rearrange = rearrange
         self.period = period
         self.old_period = period.copy()
-        self.period = na.array([1.] * 3)
+        self.period = np.array([1.] * 3)
         self._data_source = data_source
         self.premerge = premerge
         self.tree = tree
@@ -1645,20 +1645,20 @@
         if (self.particle_fields["particle_index"] < 0).any():
             mylog.error("Negative values in particle_index field. Parallel HOP will fail.")
             exit = True
-        if na.unique(self.particle_fields["particle_index"]).size != \
+        if np.unique(self.particle_fields["particle_index"]).size != \
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
             self.particle_fields['ParticleMassMsun'])
-        na.divide(self.particle_fields["particle_position_x"],
+        np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
-        na.divide(self.particle_fields["particle_position_y"],
+        np.divide(self.particle_fields["particle_position_y"],
             self.old_period[1], self.particle_fields["particle_position_y"])
-        na.divide(self.particle_fields["particle_position_z"],
+        np.divide(self.particle_fields["particle_position_z"],
             self.old_period[2], self.particle_fields["particle_position_z"])
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
@@ -1688,20 +1688,20 @@
         self.period = self.old_period.copy()
         # Precompute the bulk velocity in parallel.
         yt_counters("Precomp bulk vel.")
-        self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
+        self.bulk_vel = np.zeros((self.group_count, 3), dtype='float64')
         yt_counters("bulk vel. reading data")
         pm = obj.mass
         # Fix this back to un-normalized units.
-        na.multiply(pm, self.total_mass, pm)
+        np.multiply(pm, self.total_mass, pm)
         xv = self._data_source["particle_velocity_x"][self._base_indices]
         yv = self._data_source["particle_velocity_y"][self._base_indices]
         zv = self._data_source["particle_velocity_z"][self._base_indices]
         yt_counters("bulk vel. reading data")
         yt_counters("bulk vel. computing")
         select = (self.tags >= 0)
-        calc = len(na.where(select == True)[0])
+        calc = len(np.where(select == True)[0])
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             ms = pm[select]
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
@@ -1710,13 +1710,13 @@
             sort = subchain.argsort()
             vel = vel[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i + 1]], axis=0)
+                self.bulk_vel[u] = np.sum(vel[marks[i]:marks[i + 1]], axis=0)
             del vel, subchain, sort_subchain
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
@@ -1729,27 +1729,27 @@
         # Now calculate the RMS velocity of the groups in parallel, very
         # similarly to the bulk velocity and re-using some of the arrays.
         yt_counters("rms vel computing")
-        rms_vel_temp = na.zeros((self.group_count, 2), dtype='float64')
+        rms_vel_temp = np.zeros((self.group_count, 2), dtype='float64')
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
             vel[:, 2] = zv[select] * ms
             vel = vel[sort]
             for i, u in enumerate(uniq_subchain):
                 # This finds the sum locally.
-                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i + 1]] - \
+                rms_vel_temp[u][0] = np.sum(((vel[marks[i]:marks[i + 1]] - \
                     self.bulk_vel[u]) / self.Tot_M[u]) ** 2.)
                 # I could use self.group_sizes...
                 rms_vel_temp[u][1] = marks[i + 1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
         rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
-        self.rms_vel = na.empty(self.group_count, dtype='float64')
+        self.rms_vel = np.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
             self.rms_vel[groupID] = \
-                na.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
+                np.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
                 self.group_sizes[groupID]
         del rms_vel_temp
         yt_counters("rms vel computing")
@@ -1764,16 +1764,16 @@
         """
         Each task will make an entry for all groups, but it may be empty.
         """
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount((self.tags + 1).tolist())
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount((self.tags + 1).tolist())
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         del sort_indices
         cp = 0
         index = 0
         # We want arrays for parallel HOP
-        self._groups = na.empty(self.group_count, dtype='object')
-        self._max_dens = na.empty((self.group_count, 4), dtype='float64')
+        self._groups = np.empty(self.group_count, dtype='object')
+        self._max_dens = np.empty((self.group_count, 4), dtype='float64')
         if self.group_count == 0:
             mylog.info("There are no halos found.")
             return
@@ -1861,7 +1861,7 @@
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
-        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge)) / 2.0
+        self.center = (np.array(ds.right_edge) + np.array(ds.left_edge)) / 2.0
 
     def _parse_halolist(self, threshold_adjustment):
         groups = []
@@ -1871,7 +1871,7 @@
         for halo in self._groups:
             this_max_dens = halo.maximum_density_location()
             # if the most dense particle is in the box, keep it
-            if na.all((this_max_dens >= LE) & (this_max_dens <= RE)):
+            if np.all((this_max_dens >= LE) & (this_max_dens <= RE)):
                 # Now we add the halo information to OURSELVES, taken from the
                 # self.hop_list
                 # We need to mock up the HOPHaloList thingie, so we need to
@@ -2128,8 +2128,8 @@
         >>> halos = parallelHF(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding=0.0)
@@ -2141,7 +2141,7 @@
         if self.tree != 'F' and self.tree != 'C':
             mylog.error("No kD Tree specified!")
         period = pf.domain_right_edge - pf.domain_left_edge
-        topbounds = na.array([[0., 0., 0.], period])
+        topbounds = np.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
@@ -2190,14 +2190,14 @@
             # approximation, but it's OK with the safety factor
             padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
-            self.padding = (na.ones(3, dtype='float64') * padding,
-                na.ones(3, dtype='float64') * padding)
+            self.padding = (np.ones(3, dtype='float64') * padding,
+                np.ones(3, dtype='float64') * padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
                 (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
-            LE_padding = na.empty(3, dtype='float64')
-            RE_padding = na.empty(3, dtype='float64')
+            LE_padding = np.empty(3, dtype='float64')
+            RE_padding = np.empty(3, dtype='float64')
             avg_spacing = (float(vol) / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
@@ -2215,9 +2215,9 @@
                     self._data_source.left_edge[(dim + 2) % 3])
                 bin_width = base_padding
                 num_bins = int(math.ceil(width / bin_width))
-                bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+                bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
                     self._data_source.left_edge[dim]
-                counts, bins = na.histogram(data, bins)
+                counts, bins = np.histogram(data, bins)
                 # left side.
                 start = 0
                 count = counts[0]
@@ -2250,8 +2250,8 @@
             total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
-            self.padding = (na.zeros(3, dtype='float64'),
-                na.zeros(3, dtype='float64'))
+            self.padding = (np.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
@@ -2282,8 +2282,8 @@
         n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
-        my_points = na.empty((n_random, 3), dtype='float64')
-        uni = na.array(random.sample(xrange(xp.size), n_random))
+        my_points = np.empty((n_random, 3), dtype='float64')
+        uni = np.array(random.sample(xrange(xp.size), n_random))
         uni = uni[uni.argsort()]
         my_points[:, 0] = xp[uni]
         del xp
@@ -2297,10 +2297,10 @@
         mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
-            root_points = na.empty((tot_random, 3), dtype='float64')
+            root_points = np.empty((tot_random, 3), dtype='float64')
             root_points.shape = (1, 3 * tot_random)
         else:
-            root_points = na.empty([])
+            root_points = np.empty([])
         my_points.shape = (1, n_random * 3)
         root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
@@ -2315,9 +2315,9 @@
         num_bins = 1000
         width = bounds[1][dim] - bounds[0][dim]
         bin_width = width / num_bins
-        bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+        bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
             bounds[0][dim]
-        counts, bins = na.histogram(points[:, dim], bins)
+        counts, bins = np.histogram(points[:, dim], bins)
         # Find the bin that passes the cut points.
         midpoints = [bounds[0][dim]]
         sum = 0
@@ -2341,7 +2341,7 @@
         subpoints = []
         subbounds = []
         for pair in zip(midpoints[:-1], midpoints[1:]):
-            select = na.bitwise_and(points[:, dim] >= pair[0],
+            select = np.bitwise_and(points[:, dim] >= pair[0],
                 points[:, dim] < pair[1])
             subpoints.append(points[select])
             nb = bounds.copy()
@@ -2363,7 +2363,7 @@
         ms = -self.Tot_M.copy()
         del self.Tot_M
         Cx = self.CoM[:, 0].copy()
-        sorted = na.lexsort([Cx, ms])
+        sorted = np.lexsort([Cx, ms])
         del Cx, ms
         self._groups = self._groups[sorted]
         self._max_dens = self._max_dens[sorted]
@@ -2426,8 +2426,8 @@
         >>> halos = HaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
@@ -2520,8 +2520,8 @@
         >>> halos = FOFHaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self.pf = pf
         self.hierarchy = pf.h
@@ -2544,7 +2544,7 @@
             avg_spacing = (float(vol) / n_parts) ** (1. / 3.)
             linking_length = link * avg_spacing
         else:
-            linking_length = na.abs(link)
+            linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -25,7 +25,7 @@
 
 from collections import defaultdict
 import itertools, sys
-import numpy as na
+import numpy as np
 import gc
 
 from yt.funcs import *
@@ -88,23 +88,23 @@
         for taskID in global_bounds:
             thisLE, thisRE = global_bounds[taskID]
             if self.mine != taskID:
-                vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
             if self.mine == taskID:
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2]]))
         # Find the neighbors we share corners with. Yes, this is lazy with
         # a double loop, but it works and this is definitely not a performance
         # bottleneck.
@@ -119,13 +119,13 @@
                 # Also test to see if the distance to this corner is within
                 # max_padding, which is more likely the case with load-balancing
                 # turned on.
-                dx = min( na.fabs(my_vertex[0] - vertex[0]), \
-                    self.period[0] - na.fabs(my_vertex[0] - vertex[0]))
-                dy = min( na.fabs(my_vertex[1] - vertex[1]), \
-                    self.period[1] - na.fabs(my_vertex[1] - vertex[1]))
-                dz = min( na.fabs(my_vertex[2] - vertex[2]), \
-                    self.period[2] - na.fabs(my_vertex[2] - vertex[2]))
-                d = na.sqrt(dx*dx + dy*dy + dz*dz)
+                dx = min( np.fabs(my_vertex[0] - vertex[0]), \
+                    self.period[0] - np.fabs(my_vertex[0] - vertex[0]))
+                dy = min( np.fabs(my_vertex[1] - vertex[1]), \
+                    self.period[1] - np.fabs(my_vertex[1] - vertex[1]))
+                dz = min( np.fabs(my_vertex[2] - vertex[2]), \
+                    self.period[2] - np.fabs(my_vertex[2] - vertex[2]))
+                d = np.sqrt(dx*dx + dy*dy + dz*dz)
                 if d <= self.max_padding:
                     self.neighbors.add(int(vertex[3]))
         # Faces and edges.
@@ -219,13 +219,13 @@
         annulus data.
         """
         if round == 'first':
-            max_pad = na.max(self.padding)
+            max_pad = np.max(self.padding)
             self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
             for neighbor in self.neighbors:
-                self.max_padding = na.maximum(self.global_padding[neighbor], \
+                self.max_padding = np.maximum(self.global_padding[neighbor], \
                     self.max_padding)
 
     def _communicate_padding_data(self):
@@ -247,7 +247,7 @@
         # This will reduce the size of the loop over particles.
         yt_counters("Picking padding data to send.")
         send_count = self.is_inside_annulus.sum()
-        points = na.empty((send_count, 3), dtype='float64')
+        points = np.empty((send_count, 3), dtype='float64')
         points[:,0] = self.xpos[self.is_inside_annulus]
         points[:,1] = self.ypos[self.is_inside_annulus]
         points[:,2] = self.zpos[self.is_inside_annulus]
@@ -280,9 +280,9 @@
         recv_size = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_points[opp_neighbor] = na.empty((opp_size, 3), dtype='float64')
-            recv_mass[opp_neighbor] = na.empty(opp_size, dtype='float64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_points[opp_neighbor] = np.empty((opp_size, 3), dtype='float64')
+            recv_mass[opp_neighbor] = np.empty(opp_size, dtype='float64')
             recv_size += opp_size
         yt_counters("Initalizing recv arrays.")
         # Setup the receiving slots.
@@ -306,11 +306,11 @@
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
         # Now we add the data to ourselves.
-        self.index_pad = na.empty(recv_size, dtype='int64')
-        self.xpos_pad = na.empty(recv_size, dtype='float64')
-        self.ypos_pad = na.empty(recv_size, dtype='float64')
-        self.zpos_pad = na.empty(recv_size, dtype='float64')
-        self.mass_pad = na.empty(recv_size, dtype='float64')
+        self.index_pad = np.empty(recv_size, dtype='int64')
+        self.xpos_pad = np.empty(recv_size, dtype='float64')
+        self.ypos_pad = np.empty(recv_size, dtype='float64')
+        self.zpos_pad = np.empty(recv_size, dtype='float64')
+        self.mass_pad = np.empty(recv_size, dtype='float64')
         so_far = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
@@ -335,7 +335,7 @@
         yt_counters("Flipping coordinates around the periodic boundary.")
         self.size = self.index.size + self.index_pad.size
         # Now that we have the full size, initialize the chainID array
-        self.chainID = na.ones(self.size,dtype='int64') * -1
+        self.chainID = np.ones(self.size,dtype='int64') * -1
         # Clean up explicitly, but these should be empty dicts by now.
         del recv_real_indices, hooks, recv_points, recv_mass
         yt_counters("Communicate discriminated padding")
@@ -348,10 +348,10 @@
         if self.tree == 'F':
             # Yes, we really do need to initialize this many arrays.
             # They're deleted in _parallelHOP.
-            fKD.dens = na.zeros(self.size, dtype='float64', order='F')
-            fKD.mass = na.concatenate((self.mass, self.mass_pad))
+            fKD.dens = np.zeros(self.size, dtype='float64', order='F')
+            fKD.mass = np.concatenate((self.mass, self.mass_pad))
             del self.mass
-            fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
+            fKD.pos = np.empty((3, self.size), dtype='float64', order='F')
             # This actually copies the data into the fortran space.
             self.psize = self.xpos.size
             fKD.pos[0, :self.psize] = self.xpos
@@ -364,7 +364,7 @@
             fKD.pos[2, self.psize:] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
+            fKD.qv = np.asfortranarray(np.empty(3, dtype='float64'))
             fKD.nn = self.num_neighbors
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
@@ -375,8 +375,8 @@
             # Now call the fortran.
             create_tree(0)
         elif self.tree == 'C':
-            self.mass = na.concatenate((self.mass, self.mass_pad))
-            self.pos = na.empty((self.size, 3), dtype='float64')
+            self.mass = np.concatenate((self.mass, self.mass_pad))
+            self.pos = np.empty((self.size, 3), dtype='float64')
             self.psize = self.xpos.size
             self.pos[:self.psize, 0] = self.xpos
             self.pos[:self.psize, 1] = self.ypos
@@ -407,7 +407,7 @@
         # Test to see if the points are in the 'real' region
         (LE, RE) = self.bounds
         if round == 'first':
-            points = na.empty((self.real_size, 3), dtype='float64')
+            points = np.empty((self.real_size, 3), dtype='float64')
             points[:,0] = self.xpos
             points[:,1] = self.ypos
             points[:,2] = self.zpos
@@ -426,21 +426,21 @@
         temp_LE = LE + self.max_padding
         temp_RE = RE - self.max_padding
         if round == 'first':
-            inner = na.invert( (points >= temp_LE).all(axis=1) * \
+            inner = np.invert( (points >= temp_LE).all(axis=1) * \
                 (points < temp_RE).all(axis=1) )
         elif round == 'second' or round == 'third':
             if self.tree == 'F':
-                inner = na.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
+                inner = np.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
                     (fKD.pos.T < temp_RE).all(axis=1) )
             elif self.tree == 'C':
-                inner = na.invert( (self.pos >= temp_LE).all(axis=1) * \
+                inner = np.invert( (self.pos >= temp_LE).all(axis=1) * \
                     (self.pos < temp_RE).all(axis=1) )
         if round == 'first':
             del points
         # After inverting the logic above, we want points that are both
         # inside the real region, but within one padding of the boundary,
         # and this will do it.
-        self.is_inside_annulus = na.bitwise_and(self.is_inside, inner)
+        self.is_inside_annulus = np.bitwise_and(self.is_inside, inner)
         del inner
         # Below we make a mapping of real particle index->local ID
         # Unf. this has to be a dict, because any task can have
@@ -449,10 +449,10 @@
         # as the full number of particles.
         # We can skip this the first two times around.
         if round == 'third':
-            temp = na.arange(self.size)
-            my_part = na.bitwise_or(na.invert(self.is_inside), self.is_inside_annulus)
-            my_part = na.bitwise_and(my_part, (self.chainID != -1))
-            catted_indices = na.concatenate(
+            temp = np.arange(self.size)
+            my_part = np.bitwise_or(np.invert(self.is_inside), self.is_inside_annulus)
+            my_part = np.bitwise_and(my_part, (self.chainID != -1))
+            catted_indices = np.concatenate(
                 (self.index, self.index_pad))[my_part]
             self.rev_index = dict.fromkeys(catted_indices)
             self.rev_index.update(itertools.izip(catted_indices, temp[my_part]))
@@ -468,11 +468,11 @@
         keeping the all of this data, just using it.
         """
         yt_counters("densestNN")
-        self.densestNN = na.empty(self.size,dtype='int64')
+        self.densestNN = np.empty(self.size,dtype='int64')
         # We find nearest neighbors in chunks.
         chunksize = 10000
         if self.tree == 'F':
-            fKD.chunk_tags = na.asfortranarray(na.empty((self.num_neighbors, chunksize), dtype='int64'))
+            fKD.chunk_tags = np.asfortranarray(np.empty((self.num_neighbors, chunksize), dtype='int64'))
             start = 1 # Fortran counting!
             finish = 0
             while finish < self.size:
@@ -486,8 +486,8 @@
                 chunk_NNtags = (fKD.chunk_tags[:,:finish-start+1] - 1).transpose()
                 # Find the densest nearest neighbors by referencing the already
                 # calculated density.
-                n_dens = na.take(self.density,chunk_NNtags)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density,chunk_NNtags)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start + 1): # +1 for fortran counting.
                     j = start + i - 1 # -1 for fortran counting.
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -502,9 +502,9 @@
                 # be as memory efficient - fragmenting?
                 chunk_NNtags = self.kdtree.find_chunk_nearest_neighbors(start, \
                     finish, num_neighbors=self.num_neighbors)
-                n_dens = na.take(self.density, chunk_NNtags)
-                max_loc = na.argmax(n_dens, axis=1)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density, chunk_NNtags)
+                max_loc = np.argmax(n_dens, axis=1)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start):
                     j = start + i
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -520,8 +520,8 @@
         """
         yt_counters("build_chains")
         chainIDmax = 0
-        self.densest_in_chain = na.ones(10000, dtype='float64') * -1 # chainID->density, one to one
-        self.densest_in_chain_real_index = na.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
+        self.densest_in_chain = np.ones(10000, dtype='float64') * -1 # chainID->density, one to one
+        self.densest_in_chain_real_index = np.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
         for i in xrange(int(self.size)):
             # If it's already in a group, move on, or if this particle is
             # in the padding, move on because chains can only terminate in
@@ -536,7 +536,7 @@
             # in the next loop.
             if chainIDnew == chainIDmax:
                 chainIDmax += 1
-        self.padded_particles = na.array(self.padded_particles, dtype='int64')
+        self.padded_particles = np.array(self.padded_particles, dtype='int64')
         self.densest_in_chain = self.__clean_up_array(self.densest_in_chain)
         self.densest_in_chain_real_index = self.__clean_up_array(self.densest_in_chain_real_index)
         yt_counters("build_chains")
@@ -598,9 +598,9 @@
         yt_counters("preconnect_chains")
         yt_counters("local chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] = na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] = np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -626,8 +626,8 @@
         elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
             for i in xrange(self.size):
@@ -685,7 +685,7 @@
         # link is to itself. At that point we've found the densest chain
         # in this set of sets and we keep a record of that.
         yt_counters("preconnect pregrouping.")
-        final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
+        final_chain_map = np.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
         for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
@@ -701,9 +701,9 @@
                 self.chainID[i] = final_chain_map[self.chainID[i]]
         del final_chain_map
         # Now make the chainID assignments consecutive.
-        map = na.empty(self.densest_in_chain.size, dtype='int64')
-        dic_new = na.empty(chain_count - removed, dtype='float64')
-        dicri_new = na.empty(chain_count - removed, dtype='int64')
+        map = np.empty(self.densest_in_chain.size, dtype='int64')
+        dic_new = np.empty(chain_count - removed, dtype='float64')
+        dicri_new = np.empty(chain_count - removed, dtype='int64')
         new = 0
         for i,dic in enumerate(self.densest_in_chain):
             if dic > 0:
@@ -763,9 +763,9 @@
         mylog.info("Sorting chains...")
         yt_counters("global chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] =na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] =np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -779,14 +779,14 @@
         mylog.info("Pre-linking chains 'by hand'...")
         yt_counters("global chain hand-linking.")
         # If there are no repeats, we can skip this mess entirely.
-        uniq = na.unique(self.densest_in_chain_real_index)
+        uniq = np.unique(self.densest_in_chain_real_index)
         if uniq.size != self.densest_in_chain_real_index.size:
             # Find only the real particle indices that are repeated to reduce
             # the dict workload below.
             dicri = self.densest_in_chain_real_index[self.densest_in_chain_real_index.argsort()]
-            diff = na.ediff1d(dicri)
+            diff = np.ediff1d(dicri)
             diff = (diff == 0) # Picks out the places where the ids are equal
-            diff = na.concatenate((diff, [False])) # Makes it the same length
+            diff = np.concatenate((diff, [False])) # Makes it the same length
             # This has only the repeated IDs. Sets are faster at searches than
             # arrays.
             dicri = set(dicri[diff])
@@ -837,11 +837,11 @@
         for opp_neighbor in self.neighbors:
             opp_size = self.global_padded_count[opp_neighbor]
             to_recv_count += opp_size
-            temp_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            temp_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            temp_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            temp_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # The arrays we'll actually keep around...
-        self.recv_real_indices = na.empty(to_recv_count, dtype='int64')
-        self.recv_chainIDs = na.empty(to_recv_count, dtype='int64')
+        self.recv_real_indices = np.empty(to_recv_count, dtype='int64')
+        self.recv_chainIDs = np.empty(to_recv_count, dtype='int64')
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -899,9 +899,9 @@
         """
         yt_counters("connect_chains_across_tasks")
         # Remote (lower dens) chain -> local (higher) chain.
-        chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
+        chainID_translate_map_local = np.arange(self.nchains, dtype='int64')
         # Build the stuff to send.
-        self.uphill_real_indices = na.concatenate((
+        self.uphill_real_indices = np.concatenate((
             self.index, self.index_pad))[self.padded_particles]
         self.uphill_chainIDs = self.chainID[self.padded_particles]
         del self.padded_particles
@@ -991,7 +991,7 @@
         """
         yt_counters("communicate_annulus_chainIDs")
         # Pick the particles in the annulus.
-        real_indices = na.concatenate(
+        real_indices = np.concatenate(
             (self.index, self.index_pad))[self.is_inside_annulus]
         chainIDs = self.chainID[self.is_inside_annulus]
         # We're done with this here.
@@ -1012,8 +1012,8 @@
         recv_chainIDs = dict.fromkeys(self.neighbors)
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -1062,8 +1062,8 @@
         # Plus 2 because we're looking for that neighbor, but only keeping 
         # nMerge + 1 neighbor tags, skipping ourselves.
         if self.tree == 'F':
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge+2
         elif self.tree == 'C':
@@ -1160,9 +1160,9 @@
                 top_keys.append(top_key)
                 bot_keys.append(bot_key)
                 vals.append(data[top_key][bot_key])
-        top_keys = na.array(top_keys, dtype='int64')
-        bot_keys = na.array(bot_keys, dtype='int64')
-        vals = na.array(vals, dtype='float64')
+        top_keys = np.array(top_keys, dtype='int64')
+        bot_keys = np.array(bot_keys, dtype='int64')
+        vals = np.array(vals, dtype='float64')
 
         data.clear()
 
@@ -1179,14 +1179,14 @@
         # We need to find out which pairs of self.top_keys, self.bot_keys are
         # both < self.peakthresh, and create arrays that will store this
         # relationship.
-        both = na.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
+        both = np.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
             (self.densest_in_chain[self.bot_keys] < self.peakthresh))
         g_high = self.top_keys[both]
         g_low = self.bot_keys[both]
         g_dens = self.vals[both]
         del both
-        self.reverse_map = na.ones(self.densest_in_chain.size) * -1
-        densestbound = na.ones(self.densest_in_chain.size) * -1.0
+        self.reverse_map = np.ones(self.densest_in_chain.size) * -1
+        densestbound = np.ones(self.densest_in_chain.size) * -1.0
         for i, gl in enumerate(g_low):
             if g_dens[i] > densestbound[gl]:
                 densestbound[gl] = g_dens[i]
@@ -1200,7 +1200,7 @@
             if self.densest_in_chain[chainID] >= self.peakthresh:
                 self.reverse_map[chainID] = groupID
                 groupID += 1
-        group_equivalancy_map = na.empty(groupID, dtype='object')
+        group_equivalancy_map = np.empty(groupID, dtype='object')
         for i in xrange(groupID):
             group_equivalancy_map[i] = set([])
         # Loop over all of the chain linkages.
@@ -1259,7 +1259,7 @@
         # Shack.'
         Set_list = []
         # We only want the holes that are modulo mine.
-        keys = na.arange(groupID, dtype='int64')
+        keys = np.arange(groupID, dtype='int64')
         size = self.comm.size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
@@ -1298,7 +1298,7 @@
         del group_equivalancy_map, final_set, keys, select, groupIDs, current_sets
         del mine_groupIDs, not_mine_groupIDs, new_set, to_add_set, liter
         # Convert this list of sets into a look-up table
-        lookup = na.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
+        lookup = np.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
         for i,item in enumerate(Set_list):
             item_min = min(item)
             for groupID in item:
@@ -1353,7 +1353,7 @@
             # There are no groups, probably.
             pass
         # Make a secondary map to make the IDs consecutive.
-        values = na.arange(len(temp))
+        values = np.arange(len(temp))
         secondary_map = dict(itertools.izip(temp, values))
         del values
         # Update reverse_map
@@ -1386,8 +1386,8 @@
                 self.chainID[i] = -1
         del self.is_inside
         # Create a densest_in_group, analogous to densest_in_chain.
-        keys = na.arange(group_count)
-        vals = na.zeros(group_count)
+        keys = np.arange(group_count)
+        vals = np.zeros(group_count)
         self.densest_in_group = dict(itertools.izip(keys,vals))
         self.densest_in_group_real_index = self.densest_in_group.copy()
         del keys, vals
@@ -1409,12 +1409,12 @@
         velocity, to save time in HaloFinding.py (fewer barriers!).
         """
         select = (self.chainID != -1)
-        calc = len(na.where(select == True)[0])
-        loc = na.empty((calc, 3), dtype='float64')
+        calc = len(np.where(select == True)[0])
+        loc = np.empty((calc, 3), dtype='float64')
         if self.tree == 'F':
-            loc[:, 0] = na.concatenate((self.xpos, self.xpos_pad))[select]
-            loc[:, 1] = na.concatenate((self.ypos, self.ypos_pad))[select]
-            loc[:, 2] = na.concatenate((self.zpos, self.zpos_pad))[select]
+            loc[:, 0] = np.concatenate((self.xpos, self.xpos_pad))[select]
+            loc[:, 1] = np.concatenate((self.ypos, self.ypos_pad))[select]
+            loc[:, 2] = np.concatenate((self.zpos, self.zpos_pad))[select]
             self.__max_memory()
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
         elif self.tree == 'C':
@@ -1424,15 +1424,15 @@
         # I think this will be faster than several vector operations that need
         # to pull the entire chainID array out of memory several times.
         yt_counters("max dens point")
-        max_dens_point = na.zeros((self.group_count,4),dtype='float64')
-        for i,part in enumerate(na.arange(self.size)[select]):
+        max_dens_point = np.zeros((self.group_count,4),dtype='float64')
+        for i,part in enumerate(np.arange(self.size)[select]):
             groupID = self.chainID[part]
             if part < self.real_size:
                 real_index = self.index[part]
             else:
                 real_index = self.index_pad[part - self.real_size]
             if real_index == self.densest_in_group_real_index[groupID]:
-                max_dens_point[groupID] = na.array([self.density[part], \
+                max_dens_point[groupID] = np.array([self.density[part], \
                 loc[i, 0], loc[i, 1], loc[i, 2]])
         del self.index, self.index_pad, self.densest_in_group_real_index
         # Now we broadcast this, effectively, with an allsum. Even though
@@ -1443,25 +1443,25 @@
         yt_counters("max dens point")
         # Now CoM.
         yt_counters("CoM")
-        CoM_M = na.zeros((self.group_count,3),dtype='float64')
-        Tot_M = na.zeros(self.group_count, dtype='float64')
-        #c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
+        CoM_M = np.zeros((self.group_count,3),dtype='float64')
+        Tot_M = np.zeros(self.group_count, dtype='float64')
+        #c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
         if calc:
-            c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
-            size = na.bincount(self.chainID[select]).astype('int64')
+            c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
+            size = np.bincount(self.chainID[select]).astype('int64')
         else:
             # This task has no particles in groups!
-            size = na.zeros(self.group_count, dtype='int64')
+            size = np.zeros(self.group_count, dtype='int64')
         # In case this task doesn't have all the groups, add trailing zeros.
         if size.size != self.group_count:
-            size = na.concatenate((size, na.zeros(self.group_count - size.size, dtype='int64')))
+            size = np.concatenate((size, np.zeros(self.group_count - size.size, dtype='int64')))
         if calc:
             cc = loc - c_vec
-            cc = cc - na.floor(cc)
-            ms = na.concatenate((self.mass, self.mass_pad))[select]
+            cc = cc - np.floor(cc)
+            ms = np.concatenate((self.mass, self.mass_pad))[select]
             # Most of the time, the masses will be all the same, and we can try
             # to save some effort.
-            ms_u = na.unique(ms)
+            ms_u = np.unique(ms)
             if ms_u.size == 1:
                 single = True
                 Tot_M = size.astype('float64') * ms_u
@@ -1475,13 +1475,13 @@
             sort = subchain.argsort()
             cc = cc[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                CoM_M[u] = na.sum(cc[marks[i]:marks[i+1]], axis=0)
+                CoM_M[u] = np.sum(cc[marks[i]:marks[i+1]], axis=0)
             if not single:
                 for i,groupID in enumerate(subchain):
                     Tot_M[groupID] += ms[i]
@@ -1490,31 +1490,31 @@
                 # Don't divide by zero.
                 if groupID in self.I_own:
                     CoM_M[groupID] /= Tot_M[groupID]
-                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
+                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - np.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
         self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
         CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
         self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
-        self.CoM = na.empty((self.group_count,3), dtype='float64')
+        self.CoM = np.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
         yt_counters("CoM")
         self.__max_memory()
         # Now we find the maximum radius for all groups.
         yt_counters("max radius")
-        max_radius = na.zeros(self.group_count, dtype='float64')
+        max_radius = np.zeros(self.group_count, dtype='float64')
         if calc:
             com = self.CoM[subchain]
-            rad = na.fabs(com - loc)
-            dist = (na.minimum(rad, self.period - rad)**2.).sum(axis=1)
+            rad = np.fabs(com - loc)
+            dist = (np.minimum(rad, self.period - rad)**2.).sum(axis=1)
             dist = dist[sort]
             for i, u in enumerate(uniq_subchain):
-                max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
+                max_radius[u] = np.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
         self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
-        self.max_radius = na.sqrt(self.max_radius)
+        self.max_radius = np.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")
         self.__max_memory()
@@ -1558,7 +1558,7 @@
         chain_count = self._build_chains()
         # This array tracks whether or not relationships for this particle
         # need to be examined twice, in preconnect_chains and in connect_chains
-        self.search_again = na.ones(self.size, dtype='bool')
+        self.search_again = np.ones(self.size, dtype='bool')
         if self.premerge:
             chain_count = self._preconnect_chains(chain_count)
         mylog.info('Gobally assigning chainIDs...')
@@ -1625,7 +1625,7 @@
         try:
             arr[key] = value
         except IndexError:
-            arr = na.concatenate((arr, na.ones(10000, dtype=type)*-1))
+            arr = np.concatenate((arr, np.ones(10000, dtype=type)*-1))
             arr[key] = value
         return arr
     


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math, time
 
 from yt.funcs import *
@@ -186,7 +186,7 @@
         f = open(self.halo_file,'r')
         line = f.readline()
         if line == "":
-            self.haloes = na.array([])
+            self.haloes = np.array([])
             return
         while line[0] == '#':
             line = f.readline()
@@ -198,16 +198,16 @@
                 self.haloes.append(float(line[self.mass_column]))
             line = f.readline()
         f.close()
-        self.haloes = na.array(self.haloes)
+        self.haloes = np.array(self.haloes)
 
     def bin_haloes(self):
         """
         With the list of virial masses, find the halo mass function.
         """
-        bins = na.logspace(self.log_mass_min,
+        bins = np.logspace(self.log_mass_min,
             self.log_mass_max,self.num_sigma_bins)
         avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = na.histogram(self.haloes,bins)
+        dis, bins = np.histogram(self.haloes,bins)
         # add right to left
         for i,b in enumerate(dis):
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
@@ -246,13 +246,13 @@
 
         # output arrays
         # 1) log10 of mass (Msolar, NOT Msolar/h)
-        self.Rarray = na.empty(self.num_sigma_bins,dtype='float64')
+        self.Rarray = np.empty(self.num_sigma_bins,dtype='float64')
         # 2) mass (Msolar/h)
-        self.logmassarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.logmassarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 3) spatial scale corresponding to that radius (Mpc/h)
-        self.massarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.massarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 4) sigma(M, z=0, where mass is in Msun/h)
-        self.sigmaarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')
 
         # get sigma_8 normalization
         R = 8.0;  # in units of Mpc/h (comoving)
@@ -305,9 +305,9 @@
         
         # output arrays
         # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-        self.dn_M_z = na.empty(self.num_sigma_bins, dtype='float64')
+        self.dn_M_z = np.empty(self.num_sigma_bins, dtype='float64')
         # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-        self.nofmz_cum = na.zeros(self.num_sigma_bins, dtype='float64')
+        self.nofmz_cum = np.zeros(self.num_sigma_bins, dtype='float64')
         
         for j in xrange(self.num_sigma_bins - 1):
             i = (self.num_sigma_bins - 2) - j
@@ -360,7 +360,7 @@
 
         Rcom = self.R;  # this is R in comoving Mpc/h
 
-        f = k*k*self.PofK(k)*na.power( abs(self.WofK(Rcom,k)), 2.0);
+        f = k*k*self.PofK(k)*np.power( abs(self.WofK(Rcom,k)), 2.0);
 
         return f
 
@@ -369,7 +369,7 @@
         /* returns power spectrum as a function of wavenumber k */
         """
 
-        thisPofK = na.power(k, self.primordial_index) * na.power( self.TofK(k), 2.0);
+        thisPofK = np.power(k, self.primordial_index) * np.power( self.TofK(k), 2.0);
 
         return thisPofK;
 
@@ -389,7 +389,7 @@
 
         x = R*k;
 
-        thisWofK = 3.0 * ( na.sin(x) - x*na.cos(x) ) / (x*x*x);
+        thisWofK = 3.0 * ( np.sin(x) - x*np.cos(x) ) / (x*x*x);
 
         return thisWofK;
 
@@ -660,22 +660,22 @@
         self.y_freestream = 17.2*self.f_hdm*(1+0.488*math.pow(self.f_hdm,-7.0/6.0))* \
             SQR(self.num_degen_hdm*self.qq/self.f_hdm);
         temp1 = math.pow(self.growth_k0, 1.0-self.p_cb);
-        temp2 = na.power(self.growth_k0/(1+self.y_freestream),0.7);
-        self.growth_cb = na.power(1.0+temp2, self.p_cb/0.7)*temp1;
-        self.growth_cbnu = na.power(na.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
+        temp2 = np.power(self.growth_k0/(1+self.y_freestream),0.7);
+        self.growth_cb = np.power(1.0+temp2, self.p_cb/0.7)*temp1;
+        self.growth_cbnu = np.power(np.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
     
         # Compute the master function
         self.gamma_eff = self.omhh*(self.alpha_gamma+(1-self.alpha_gamma)/ \
             (1+SQR(SQR(kk*self.sound_horizon_fit*0.43))));
         self.qq_eff = self.qq*self.omhh/self.gamma_eff;
     
-        tf_sup_L = na.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
-        tf_sup_C = 14.4+325/(1+60.5*na.power(self.qq_eff,1.11));
+        tf_sup_L = np.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
+        tf_sup_C = 14.4+325/(1+60.5*np.power(self.qq_eff,1.11));
         self.tf_sup = tf_sup_L/(tf_sup_L+tf_sup_C*SQR(self.qq_eff));
     
         self.qq_nu = 3.92*self.qq*math.sqrt(self.num_degen_hdm/self.f_hdm);
         self.max_fs_correction = 1+1.2*math.pow(self.f_hdm,0.64)*math.pow(self.num_degen_hdm,0.3+0.6*self.f_hdm)/ \
-            (na.power(self.qq_nu,-1.6)+na.power(self.qq_nu,0.8));
+            (np.power(self.qq_nu,-1.6)+np.power(self.qq_nu,0.8));
         self.tf_master = self.tf_sup*self.max_fs_correction;
     
         # Now compute the CDM+HDM+baryon transfer functions
@@ -707,21 +707,21 @@
     changes by less than *error*. Hopefully someday we can do something
     better than this!
     """
-    xvals = na.logspace(0,na.log10(initial_guess), initial_guess+1)-.9
+    xvals = np.logspace(0,np.log10(initial_guess), initial_guess+1)-.9
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
-    # Trapezoid rule, but with different dxes between values, so na.trapz
+    # Trapezoid rule, but with different dxes between values, so np.trapz
     # will not work.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area0 = na.sum(areas)
+    area0 = np.sum(areas)
     # Next guess.
     next_guess = 10 * initial_guess
-    xvals = na.logspace(0,na.log10(next_guess), 2*initial_guess**2+1)-.99
+    xvals = np.logspace(0,np.log10(next_guess), 2*initial_guess**2+1)-.99
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
     # Trapezoid rule.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area1 = na.sum(areas)
+    area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
     area_final = area1
@@ -729,12 +729,12 @@
     one_pow = 3
     while diff > error:
         next_guess *= 10
-        xvals = na.logspace(0,na.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
+        xvals = np.logspace(0,np.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
         yvals = fcn(xvals)
         xdiffs = xvals[1:] - xvals[:-1]
         # Trapezoid rule.
         areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-        area_next = na.sum(areas)
+        area_next = np.sum(areas)
         diff = area_next - area_last
         area_last = area_next
         one_pow+=1


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -41,7 +41,7 @@
 # 8. Parentage is described by a fraction of particles that pass from one to
 #    the other; we have both descendent fractions and ancestory fractions. 
 
-import numpy as na
+import numpy as np
 import h5py
 import time
 import pdb
@@ -119,7 +119,7 @@
             x,y,z = [float(f) for f in line.split(None, 3)[:-1]]
             hp.append([x,y,z])
         if hp != []:
-            self.halo_positions = na.array(hp)
+            self.halo_positions = np.array(hp)
             self.halo_kdtree = KDTree(self.halo_positions)
         else:
             self.halo_positions = None
@@ -158,7 +158,7 @@
 class HaloParticleList(object):
     def __init__(self, halo_id, position, particle_ids):
         self.halo_id = halo_id
-        self.position = na.array(position)
+        self.position = np.array(position)
         self.particle_ids = particle_ids
         self.number_of_particles = particle_ids.size
 
@@ -168,7 +168,7 @@
     def find_relative_parentage(self, child):
         # Return two values: percent this halo gave to the other, and percent
         # of the other that comes from this halo
-        overlap = na.intersect1d(self.particle_ids, child.particle_ids).size
+        overlap = np.intersect1d(self.particle_ids, child.particle_ids).size
         of_child_from_me = float(overlap)/child.particle_ids.size
         of_mine_from_me = float(overlap)/self.particle_ids.size
         return of_child_from_me, of_mine_from_me


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os, glob, time, gc, md5, sys
 import h5py
 import types
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -174,7 +171,7 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
-        self.with_halos = na.ones(len(restart_files), dtype='bool')
+        self.with_halos = np.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
         self.halo_finder_function = halo_finder_function # which halo finder to use
         self.halo_finder_threshold = halo_finder_threshold # overdensity threshold
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
-            child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            child_points = np.array(child_points)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                query = np.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 
@@ -400,7 +387,7 @@
         # The +1 is an extra element in the array that collects garbage
         # values. This is allowing us to eliminate a try/except later.
         # This extra array element will be cut off eventually.
-        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+        self.child_mass_arr = np.zeros(len(candidates)*NumNeighbors + 1,
             dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
@@ -450,9 +437,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,31 +447,38 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(np.ones(len(thisIDs),
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = np.array([], dtype='int64')
+                parent_masses = np.array([], dtype='float64')
+                parent_halos = np.array([], dtype='int32')
+            else:
+                parent_IDs = np.concatenate(parent_IDs).astype('int64')
+                parent_masses = np.concatenate(parent_masses).astype('float64')
+                parent_halos = np.concatenate(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
-        parent_send = na.ones(parent_IDs.size, dtype='bool')
-        
+        parent_send = np.ones(parent_IDs.size, dtype='bool')
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
+        child_IDs = []
+        child_masses = []
+        child_halos = []
         for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
@@ -492,20 +486,28 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(np.ones(len(thisIDs),
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = np.array([], dtype='int64')
+            child_masses = np.array([], dtype='float64')
+            child_halos = np.array([], dtype='int32')
+        else:
+            child_IDs = np.concatenate(child_IDs).astype('int64')
+            child_masses = np.concatenate(child_masses)
+            child_halos = np.concatenate(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
         
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
-        child_send = na.ones(child_IDs.size, dtype='bool')
-        del sort
+        child_send = np.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,
@@ -618,8 +620,8 @@
     def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
             parent_masses, parent_send = None, child_send = None):
         # Pick out IDs that are in both arrays.
-        parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
-        child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+        parent_in_child = np.in1d(parent_IDs, child_IDs, assume_unique = True)
+        child_in_parent = np.in1d(child_IDs, parent_IDs, assume_unique = True)
         # Pare down the arrays to just matched particle IDs.
         parent_halos_cut = parent_halos[parent_in_child]
         child_halos_cut = child_halos[child_in_parent]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -24,7 +24,7 @@
 """
 
 from copy import deepcopy
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -105,11 +105,11 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = na.log10(temp_profile[field])
+            temp_profile[field] = np.log10(temp_profile[field])
 
     virial = dict((field, 0.0) for field in fields)
 
-    if (not (na.array(overDensity) >= virial_overdensity).any()) and \
+    if (not (np.array(overDensity) >= virial_overdensity).any()) and \
             must_be_virialized:
         mylog.debug("This halo is not virialized!")
         return [False, {}]
@@ -123,7 +123,7 @@
     elif (overDensity[-1] >= virial_overdensity):
         index = -2
     else:
-        for q in (na.arange(len(overDensity),0,-1)-1):
+        for q in (np.arange(len(overDensity),0,-1)-1):
             if (overDensity[q] < virial_overdensity) and (overDensity[q-1] >= virial_overdensity):
                 index = q - 1
                 break
@@ -144,7 +144,7 @@
 
     if use_log:
         for field in virial.keys():
-            virial[field] = na.power(10, virial[field])
+            virial[field] = np.power(10, virial[field])
 
     for vfilter in virial_filters:
         if eval("%s %s %s" % (virial[vfilter[0]],vfilter[1],vfilter[2])):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os
 import h5py
 import types
@@ -684,7 +684,7 @@
                 max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
                                                                                  lazy_reader=True)
                 max_grid = self.pf.h.grids[mg]
-                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                max_cell = np.unravel_index(maxi, max_grid.ActiveDimensions)
                 sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
                                                              max_grid['y-velocity'][max_cell],
                                                              max_grid['z-velocity'][max_cell]])
@@ -845,7 +845,7 @@
                               (self.projection_output_dir, halo['id'],
                                dataset_name, axis_labels[w])
                             if (frb[hp['field']] != 0).any():
-                                write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                                write_image(np.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
                             else:
                                 mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
                                             (hp['field'], halo['id']))
@@ -1076,7 +1076,7 @@
                     profile[field].append(float(onLine[q]))
 
         for field in fields:
-            profile[field] = na.array(profile[field])
+            profile[field] = np.array(profile[field])
 
         profile_obj._data = profile
 
@@ -1171,7 +1171,7 @@
         for halo in self.filtered_halos:
             for halo_field in halo_fields:
                 if isinstance(halo[halo_field], types.ListType):
-                    field_data = na.array(halo[halo_field])
+                    field_data = np.array(halo[halo_field])
                     field_data.tofile(out_file, sep="\t", format=format)
                 else:
                     if halo_field == 'id':
@@ -1179,7 +1179,7 @@
                     else:
                         out_file.write("%s" % halo[halo_field])
                 out_file.write("\t")
-            field_data = na.array([halo[field] for field in fields])
+            field_data = np.array([halo[field] for field in fields])
             field_data.tofile(out_file, sep="\t", format=format)
             out_file.write("\n")
         out_file.close()
@@ -1207,7 +1207,7 @@
             value_list = []
             for halo in self.filtered_halos:
                 value_list.append(halo[halo_field])
-            value_list = na.array(value_list)
+            value_list = np.array(value_list)
             out_file.create_dataset(halo_field, data=value_list)
         out_file.close()
 
@@ -1215,7 +1215,7 @@
         fid = open(filename, "w")
         fields = [field for field in sorted(profile.keys()) if field != "UsedBins"]
         fid.write("\t".join(["#"] + fields + ["\n"]))
-        field_data = na.array([profile[field] for field in fields])
+        field_data = np.array([profile[field] for field in fields])
         for line in range(field_data.shape[1]):
             field_data[:, line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -1300,17 +1300,17 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px,
+        plot.field_data['px'] = np.concatenate([plot['px'], add_x_px, add_y_px,
                                                 add2_x_px, add2_y_px])
-        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py,
+        plot.field_data['py'] = np.concatenate([plot['py'], add_x_py, add_y_py,
                                                 add2_x_py, add2_y_py])
-        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
+        plot.field_data['pdx'] = np.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
                                                  add2_x_pdx, add2_y_pdx])
-        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
+        plot.field_data['pdy'] = np.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
                                                  add2_x_pdy, add2_y_pdy])
-        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field,
+        plot.field_data[field] = np.concatenate([plot[field], add_x_field, add_y_field,
                                                  add2_x_field, add2_y_field])
-        plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
+        plot.field_data['weight_field'] = np.concatenate([plot['weight_field'],
                                                           add_x_weight_field, add_y_weight_field,
                                                           add2_x_weight_field, add2_y_weight_field])
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
@@ -24,7 +24,7 @@
 """
 
 import h5py, os.path
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.data_containers import YTFieldData
@@ -57,7 +57,7 @@
         self.Level = level
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
-        self.start_index = na.min([grid.get_global_startindex() for grid in
+        self.start_index = np.min([grid.get_global_startindex() for grid in
                              base_pf.h.select_grids(level)], axis=0).astype('int64')
         self.dds = base_pf.h.select_grids(level)[0].dds.copy()
         dims = (self.RightEdge-self.LeftEdge)/self.dds
@@ -106,11 +106,11 @@
         self.pf = pf
         self.always_copy = always_copy
         self.min_level = min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                              pf.h.select_grids(min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.h.select_grids(min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                                    pf.h.select_grids(min_level)], axis=0).astype('float64')
         if offset is None: offset = (max_right + min_left)/2.0
         self.left_edge_offset = offset
@@ -151,7 +151,7 @@
         # Grid objects on this level...
         if grids is None: grids = self.pf.h.select_grids(level+self.min_level)
         level_node.attrs['delta'] = grids[0].dds*self.mult_factor
-        level_node.attrs['relativeRefinementFactor'] = na.array([2]*3, dtype='int32')
+        level_node.attrs['relativeRefinementFactor'] = np.array([2]*3, dtype='int32')
         level_node.attrs['numGrids'] = len(grids)
         for i,g in enumerate(grids):
             self.export_grid(afile, level_node, g, i, field)
@@ -169,8 +169,8 @@
         int_origin, lint, origin, dds = self._convert_grid(grid)
         grid_node.attrs['integerOrigin'] = int_origin
         grid_node.attrs['origin'] = origin
-        grid_node.attrs['ghostzoneFlags'] = na.zeros(6, dtype='int32')
-        grid_node.attrs['numGhostzones'] = na.zeros(3, dtype='int32')
+        grid_node.attrs['ghostzoneFlags'] = np.zeros(6, dtype='int32')
+        grid_node.attrs['numGhostzones'] = np.zeros(3, dtype='int32')
         grid_node.attrs['dims'] = grid.ActiveDimensions[::-1].astype('int32')
         if not self.always_copy and self.pf.h.data_style == 6 \
            and field in self.pf.h.field_list:
@@ -203,11 +203,11 @@
         # First we set up our translation between original and extracted
         self.data_style = data_style
         self.min_level = pf.min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
         level_dx = pf.base_pf.h.select_grids(pf.min_level)[0].dds[0]
         dims = ((max_right-min_left)/level_dx)
@@ -247,12 +247,12 @@
         # Here we need to set up the grid info, which for the Enzo hierarchy
         # is done like:
         # self.grid_dimensions.flat[:] = ei
-        # self.grid_dimensions -= na.array(si, self.float_type)
+        # self.grid_dimensions -= np.array(si, self.float_type)
         # self.grid_dimensions += 1
         # self.grid_left_edge.flat[:] = LE
         # self.grid_right_edge.flat[:] = RE
         # self.grid_particle_count.flat[:] = np
-        # self.grids = na.array(self.grids, dtype='object')
+        # self.grids = np.array(self.grids, dtype='object')
         #
         # For now, we make the presupposition that all of our grids are
         # strictly nested and we are not doing any cuts.  However, we do
@@ -285,7 +285,7 @@
 
         self.grid_left_edge = self._convert_coords(self.grid_left_edge)
         self.grid_right_edge = self._convert_coords(self.grid_right_edge)
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
 
     def _fill_grid_arrays(self, grid, i):
         # This just fills in the grid arrays for a single grid --


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -22,7 +22,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/level_sets/clump_tools.py
--- a/yt/analysis_modules/level_sets/clump_tools.py
+++ b/yt/analysis_modules/level_sets/clump_tools.py
@@ -23,8 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
-nar = na.array
+import numpy as np
+nar = np.array
 
 counter = 0
 def recursive_all_clumps(clump,list,level,parentnumber):
@@ -89,7 +89,7 @@
     yt.visualization.plot_modification.ClumpContourCallback"""
     minDensity = [c['Density'].min() for c in clump_list]
     
-    args = na.argsort(minDensity)
+    args = np.argsort(minDensity)
     list = nar(clump_list)[args]
     reverse = range(list.size-1,-1,-1)
     return list[reverse]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -24,7 +24,7 @@
 """
 
 from itertools import chain
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.data_point_utilities as data_point_utilities
@@ -63,12 +63,12 @@
     tr = []
     for k in joins.keys():
         v = joins.pop(k)
-        tr.append((k, na.array(list(v), dtype="int64")))
+        tr.append((k, np.array(list(v), dtype="int64")))
     return tr
 
 def identify_contours(data_source, field, min_val, max_val,
                           cached_fields=None):
-    cur_max_id = na.sum([g.ActiveDimensions.prod() for g in data_source._grids])
+    cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
     pbar = get_pbar("First pass", len(data_source._grids))
     grids = sorted(data_source._grids, key=lambda g: -g.Level)
     total_contours = 0
@@ -76,27 +76,27 @@
     for gi,grid in enumerate(grids):
         pbar.update(gi+1)
         cm = data_source._get_cut_mask(grid)
-        if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='bool')
+        if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
         old_field_parameters = grid.field_parameters
         grid.field_parameters = data_source.field_parameters
-        local_ind = na.where( (grid[field] > min_val)
+        local_ind = np.where( (grid[field] > min_val)
                             & (grid[field] < max_val) & cm )
         grid.field_parameters = old_field_parameters
         if local_ind[0].size == 0: continue
-        kk = na.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
-        grid["tempContours"] = na.ones(grid.ActiveDimensions, dtype='int64') * -1
+        kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
+        grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
         grid["tempContours"][local_ind] = kk[:]
         cur_max_id -= local_ind[0].size
-        xi_u,yi_u,zi_u = na.where(grid["tempContours"] > -1)
-        cor_order = na.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
+        xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
+        cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
         fd_orig = grid["tempContours"].copy()
         xi = xi_u[cor_order]
         yi = yi_u[cor_order]
         zi = zi_u[cor_order]
         while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
             pass
-        total_contours += na.unique(grid["tempContours"][grid["tempContours"] > -1]).size
-        new_contours = na.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
+        total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
+        new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
         tree += zip(new_contours, new_contours)
     tree = set(tree)
     pbar.finish()
@@ -110,10 +110,10 @@
         boundary_tree = amr_utils.construct_boundary_relationships(fd)
         tree.update(((a, b) for a, b in boundary_tree))
     pbar.finish()
-    sort_new = na.array(list(tree), dtype='int64')
+    sort_new = np.array(list(tree), dtype='int64')
     mylog.info("Coalescing %s joins", sort_new.shape[0])
     joins = coalesce_join_tree(sort_new)
-    #joins = [(i, na.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
+    #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
     pbar = get_pbar("Joining ", len(joins))
     # This process could and should be done faster
     print "Joining..."
@@ -136,9 +136,9 @@
     data_source.get_data("tempContours", in_grids=True)
     contour_ind = {}
     i = 0
-    for contour_id in na.unique(data_source["tempContours"]):
+    for contour_id in np.unique(data_source["tempContours"]):
         if contour_id == -1: continue
-        contour_ind[i] = na.where(data_source["tempContours"] == contour_id)
+        contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
         mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
         i += 1
     mylog.info("Identified %s contours between %0.5e and %0.5e",


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/radial_column_density/radial_column_density.py
--- a/yt/analysis_modules/radial_column_density/radial_column_density.py
+++ b/yt/analysis_modules/radial_column_density/radial_column_density.py
@@ -105,14 +105,14 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.center = na.asarray(center)
+        self.center = np.asarray(center)
         self.max_radius = max_radius
         self.steps = steps
         self.base = base
         self.Nside = Nside
         self.ang_divs = ang_divs
-        self.real_ang_divs = int(na.abs(ang_divs))
-        self.phi, self.theta = na.mgrid[0.0:2*na.pi:ang_divs, 0:na.pi:ang_divs]
+        self.real_ang_divs = int(np.abs(ang_divs))
+        self.phi, self.theta = np.mgrid[0.0:2*np.pi:ang_divs, 0:np.pi:ang_divs]
         self.phi1d = self.phi[:,0]
         self.theta1d = self.theta[0,:]
         self.dphi = self.phi1d[1] - self.phi1d[0]
@@ -135,20 +135,20 @@
         # but this will work for now.
         right = self.pf.domain_right_edge - self.center
         left = self.center - self.pf.domain_left_edge
-        min_r = na.min(right)
-        min_l = na.min(left)
-        self.max_radius = na.min([self.max_radius, min_r, min_l])
+        min_r = np.min(right)
+        min_l = np.min(left)
+        self.max_radius = np.min([self.max_radius, min_r, min_l])
     
     def _make_bins(self):
         # We'll make the bins start from the smallest cell size to the
         # specified radius. Column density inside the same cell as our 
         # center is kind of ill-defined, anyway.
         if self.base == 'lin':
-            self.bins = na.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
+            self.bins = np.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
                 self.steps)
         elif self.base == 'log':
-            self.bins = na.logspace(na.log10(self.pf.h.get_smallest_dx()),
-                na.log10(self.max_radius), self.steps)
+            self.bins = np.logspace(np.log10(self.pf.h.get_smallest_dx()),
+                np.log10(self.max_radius), self.steps)
     
     def _build_surfaces(self, field):
         # This will be index by bin index.
@@ -172,17 +172,17 @@
             Values of zero are found outside the maximum radius and
             in the cell of the user-specified center point.
             This setting is useful if the field is going to be logged
-            (e.g. na.log10) where zeros are inconvenient.
+            (e.g. np.log10) where zeros are inconvenient.
             Default = None
         """
         x = data['x']
         sh = x.shape
-        ad = na.prod(sh)
+        ad = np.prod(sh)
         if type(data) == type(FieldDetector()):
-            return na.ones(sh)
+            return np.ones(sh)
         y = data['y']
         z = data['z']
-        pos = na.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
+        pos = np.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
         del x, y, z
         vals = self._interpolate_value(pos)
         del pos
@@ -199,25 +199,25 @@
         # according to the points angle.
         # 1. Find the angle from the center point to the position.
         vec = pos - self.center
-        phi = na.arctan2(vec[:, 1], vec[:, 0])
+        phi = np.arctan2(vec[:, 1], vec[:, 0])
         # Convert the convention from [-pi, pi) to [0, 2pi).
         sel = (phi < 0)
-        phi[sel] += 2 * na.pi
+        phi[sel] += 2 * np.pi
         # Find the radius.
-        r = na.sqrt(na.sum(vec * vec, axis = 1))
+        r = np.sqrt(np.sum(vec * vec, axis = 1))
         # Keep track of the points outside of self.max_radius, which we'll
         # handle separately before we return.
         outside = (r > self.max_radius)
-        theta = na.arccos(vec[:, 2] / r)
+        theta = np.arccos(vec[:, 2] / r)
         # 2. Find the bin for this position.
-        digi = na.digitize(r, self.bins)
+        digi = np.digitize(r, self.bins)
         # Find the values on the inner and outer surfaces.
-        in_val = na.zeros_like(r)
-        out_val = na.zeros_like(r)
+        in_val = np.zeros_like(r)
+        out_val = np.zeros_like(r)
         # These two will be used for interpolation.
-        in_r = na.zeros_like(r)
-        out_r = na.zeros_like(r)
-        for bin in na.unique(digi):
+        in_r = np.zeros_like(r)
+        out_r = np.zeros_like(r)
+        for bin in np.unique(digi):
             sel = (digi == bin)
             # Special case if we're outside the largest sphere.
             if bin == len(self.bins):
@@ -229,7 +229,7 @@
                 continue
             # Special case if we're inside the smallest sphere.
             elif bin == 0:
-                in_val[sel] = na.zeros_like(phi[sel])
+                in_val[sel] = np.zeros_like(phi[sel])
                 in_r[sel] = 0.
                 out_val[sel] = self._interpolate_surface_value(1,
                     phi[sel], theta[sel])
@@ -244,11 +244,11 @@
                     phi[sel], theta[sel])
                 out_r[sel] = self.bins[bin]
         # Interpolate using a linear fit in column density / r space.
-        val = na.empty_like(r)
+        val = np.empty_like(r)
         # Special case for inside smallest sphere.
         sel = (digi == 0)
         val[sel] = (1. - (out_r[sel] - r[sel]) / out_r[sel]) * out_val[sel]
-        na.invert(sel, sel) # In-place operation!
+        np.invert(sel, sel) # In-place operation!
         val[sel] = (out_val[sel] - in_val[sel]) / (out_r[sel] - in_r[sel]) * \
             (r[sel] - in_r[sel]) + in_val[sel]
         # Fix the things to zero that should be zero.
@@ -259,8 +259,8 @@
         # Given a surface bin and an angle, interpolate the value on
         # that surface to the angle.
         # 1. Find the four values closest to the angle.
-        phi_bin = na.digitize(phi, self.phi1d)
-        theta_bin = na.digitize(theta, self.theta1d)
+        phi_bin = np.digitize(phi, self.phi1d)
+        theta_bin = np.digitize(theta, self.theta1d)
         val00 = self.surfaces[bin][phi_bin - 1, theta_bin - 1]
         val01 = self.surfaces[bin][phi_bin - 1, theta_bin]
         val10 = self.surfaces[bin][phi_bin, theta_bin - 1]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -47,18 +47,18 @@
 
         self.bounds = bounds
         self.ev_bounds = ev_bounds
-        self.ev_vals = na.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
+        self.ev_vals = np.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
         
     def _get_interpolator(self, ev_min, ev_max):
         """
         Integrates from ev_min to ev_max and returns an interpolator.
         """
-        e_is, e_ie = na.digitize([ev_min, ev_max], self.ev_vals)
-        bin_table = na.trapz(self.table[...,e_is-1:e_ie],
+        e_is, e_ie = np.digitize([ev_min, ev_max], self.ev_vals)
+        bin_table = np.trapz(self.table[...,e_is-1:e_ie],
                              2.41799e17*
             (self.ev_vals[e_is:e_ie+1]-self.ev_vals[e_is-1:e_is]),
                              axis=-1)
-        bin_table = na.log10(bin_table.clip(1e-80,bin_table.max()))
+        bin_table = np.log10(bin_table.clip(1e-80,bin_table.max()))
         return BilinearFieldInterpolator(
             bin_table, self.bounds, self.field_names[:],
             truncate=True)
@@ -73,8 +73,8 @@
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : na.log10(data["NumberDensity"]),
-                  'Temperature'   : na.log10(data["Temperature"])}
+            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+                  'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
@@ -91,8 +91,8 @@
     e_n_bins, e_min, e_max = e_spec
     T_n_bins, T_min, T_max = T_spec
     # The second one is the fast-varying one
-    rho_is, e_is = na.mgrid[0:rho_n_bins,0:e_n_bins]
-    table = na.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
+    rho_is, e_is = np.mgrid[0:rho_n_bins,0:e_n_bins]
+    table = np.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
     mylog.info("Parsing Cloudy files")
     for i,ri,ei in zip(range(rho_n_bins*e_n_bins), rho_is.ravel(), e_is.ravel()):
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 import math, itertools
 
@@ -66,8 +66,8 @@
         """
         self._pf = pf
         self._data_source = data_source
-        self.star_mass = na.array(star_mass)
-        self.star_creation_time = na.array(star_creation_time)
+        self.star_mass = np.array(star_mass)
+        self.star_creation_time = np.array(star_creation_time)
         self.volume = volume
         self.bin_count = bins
         # Check to make sure we have the right set of informations.
@@ -114,13 +114,13 @@
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = na.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
-        inds = na.digitize(ct_stars, self.time_bins) - 1
+        inds = np.digitize(ct_stars, self.time_bins) - 1
         # Sum up the stars created in each time bin.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        for index in na.unique(inds):
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        for index in np.unique(inds):
             self.mass_bins[index] += sum(mass_stars[inds == index])
         # Calculate the cumulative mass sum over time by forward adding.
         self.cum_mass_bins = self.mass_bins.copy()
@@ -162,13 +162,13 @@
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])
             self.Msol_cumulative.append(self.cum_mass_bins[i])
-        self.time = na.array(self.time)
-        self.lookback_time = na.array(self.lookback_time)
-        self.redshift = na.array(self.redshift)
-        self.Msol_yr = na.array(self.Msol_yr)
-        self.Msol_yr_vol = na.array(self.Msol_yr_vol)
-        self.Msol = na.array(self.Msol)
-        self.Msol_cumulative = na.array(self.Msol_cumulative)
+        self.time = np.array(self.time)
+        self.lookback_time = np.array(self.lookback_time)
+        self.redshift = np.array(self.redshift)
+        self.Msol_yr = np.array(self.Msol_yr)
+        self.Msol_yr_vol = np.array(self.Msol_yr_vol)
+        self.Msol = np.array(self.Msol)
+        self.Msol_cumulative = np.array(self.Msol_cumulative)
     
     def write_out(self, name="StarFormationRate.out"):
         r"""Write out the star analysis to a text file *name*. The columns are in
@@ -234,10 +234,10 @@
 METAL3 = 0.2828
 METAL4 = 0.6325
 METAL5 = 1.5811
-METALS = na.array([METAL1, METAL2, METAL3, METAL4, METAL5])
+METALS = np.array([METAL1, METAL2, METAL3, METAL4, METAL5])
 
 # Translate METALS array digitize to the table dicts
-MtoD = na.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
+MtoD = np.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
 
 """
 This spectrum code is based on code from Ken Nagamine, converted from C to Python.
@@ -340,7 +340,7 @@
         >>> spec.calculate_spectrum(data_source=sp, min_age = 1.e6)
         """
         # Initialize values
-        self.final_spec = na.zeros(self.wavelength.size, dtype='float64')
+        self.final_spec = np.zeros(self.wavelength.size, dtype='float64')
         self._data_source = data_source
         if iterable(star_mass):
             self.star_mass = star_mass
@@ -372,7 +372,7 @@
                 """)
                 return None
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             if star_metallicity_fraction is not None:
                 self.star_metal = star_metallicity_fraction
@@ -382,7 +382,7 @@
             self.star_creation_time = ct[ct > 0]
             self.star_mass = self._data_source["ParticleMassMsun"][ct > 0]
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             else:
                 self.star_metal = self._data_source["metallicity_fraction"][ct > 0]
@@ -390,7 +390,7 @@
         self.star_metal /= Zsun
         # Age of star in years.
         dt = (self.time_now - self.star_creation_time * self._pf['Time']) / YEAR
-        dt = na.maximum(dt, 0.0)
+        dt = np.maximum(dt, 0.0)
         # Remove young stars
         sub = dt >= self.min_age
         if len(sub) == 0: return
@@ -398,18 +398,18 @@
         dt = dt[sub]
         self.star_creation_time = self.star_creation_time[sub]
         # Figure out which METALS bin the star goes into.
-        Mindex = na.digitize(self.star_metal, METALS)
+        Mindex = np.digitize(self.star_metal, METALS)
         # Replace the indices with strings.
         Mname = MtoD[Mindex]
         # Figure out which age bin this star goes into.
-        Aindex = na.digitize(dt, self.age)
+        Aindex = np.digitize(dt, self.age)
         # Ratios used for the interpolation.
         ratio1 = (dt - self.age[Aindex-1]) / (self.age[Aindex] - self.age[Aindex-1])
         ratio2 = (self.age[Aindex] - dt) / (self.age[Aindex] - self.age[Aindex-1])
         # Sort the stars by metallicity and then by age, which should reduce
         # memory access time by a little bit in the loop.
-        indexes = na.arange(self.star_metal.size)
-        sort = na.asarray([indexes[i] for i in na.lexsort([indexes, Aindex, Mname])])
+        indexes = np.arange(self.star_metal.size)
+        sort = np.asarray([indexes[i] for i in np.lexsort([indexes, Aindex, Mname])])
         Mname = Mname[sort]
         Aindex = Aindex[sort]
         ratio1 = ratio1[sort]
@@ -426,15 +426,15 @@
             # Get the one just before the one above.
             flux_1 = self.flux[star[0]][star[1]-1,:]
             # interpolate in log(flux), linear in time.
-            int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
+            int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux)
             # Add this flux to the total, weighted by mass.
-            self.final_spec += na.power(10., int_flux) * star[4]
+            self.final_spec += np.power(10., int_flux) * star[4]
             pbar.update(i)
         pbar.finish()    
         
         # Normalize.
-        self.total_mass = na.sum(self.star_mass)
-        self.avg_mass = na.mean(self.star_mass)
+        self.total_mass = np.sum(self.star_mass)
+        self.avg_mass = np.mean(self.star_mass)
         tot_metal = sum(self.star_metal * self.star_mass)
         self.avg_metal = math.log10(tot_metal / self.total_mass / Zsun)
 
@@ -455,25 +455,25 @@
 #             # From the flux array for this metal, and our selection, build
 #             # a new flux array just for the ages of these stars, in the 
 #             # same order as the selection of stars.
-#             this_flux = na.matrix(self.flux[metal_name][A])
+#             this_flux = np.matrix(self.flux[metal_name][A])
 #             # Make one for the last time step for each star in the same fashion
 #             # as above.
-#             this_flux_1 = na.matrix(self.flux[metal_name][A-1])
+#             this_flux_1 = np.matrix(self.flux[metal_name][A-1])
 #             # This is kind of messy, but we're going to multiply this_fluxes
 #             # by the appropriate ratios and add it together to do the 
 #             # interpolation in log(flux) and linear in time.
 #             print r1.size
-#             r1 = na.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
-#             r2 = na.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
+#             r1 = np.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
+#             r2 = np.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
 #             print this_flux_1.shape, r1.shape
-#             int_flux = na.multiply(na.log10(this_flux_1),r1) \
-#                 + na.multiply(na.log10(this_flux),r2)
+#             int_flux = np.multiply(np.log10(this_flux_1),r1) \
+#                 + np.multiply(np.log10(this_flux),r2)
 #             # Weight the fluxes by mass.
-#             sm = na.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
-#             int_flux = na.multiply(na.power(10., int_flux), sm)
+#             sm = np.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
+#             int_flux = np.multiply(np.power(10., int_flux), sm)
 #             # Sum along the columns, converting back to an array, adding
 #             # to the full spectrum.
-#             self.final_spec += na.array(int_flux.sum(axis=0))[0,:]
+#             self.final_spec += np.array(int_flux.sum(axis=0))[0,:]
 
     
     def write_out(self, name="sum_flux.out"):
@@ -518,8 +518,8 @@
         >>> spec.write_out_SED(name = "SED.out", flux_norm = 6000.)
         """
         # find the f_nu closest to flux_norm
-        fn_wavelength = na.argmin(abs(self.wavelength - flux_norm))
-        f_nu = self.final_spec * na.power(self.wavelength, 2.) / LIGHT
+        fn_wavelength = np.argmin(abs(self.wavelength - flux_norm))
+        f_nu = self.final_spec * np.power(self.wavelength, 2.) / LIGHT
         # Normalize f_nu
         self.f_nu = f_nu / f_nu[fn_wavelength]
         # Write out.


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -32,7 +32,7 @@
     pass
 
 import time
-import numpy as na
+import numpy as np
 import numpy.linalg as linalg
 import collections
 
@@ -78,14 +78,14 @@
 
     """
 
-    fc = na.array(fc)
-    fwidth = na.array(fwidth)
+    fc = np.array(fc)
+    fwidth = np.array(fwidth)
     
     #we must round the dle,dre to the nearest root grid cells
     ile,ire,super_level,ncells_wide= \
             round_ncells_wide(pf.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide)
 
-    assert na.all((ile-ire)==(ile-ire)[0])
+    assert np.all((ile-ire)==(ile-ire)[0])
     mylog.info("rounding specified region:")
     mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth)))
     mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
@@ -153,7 +153,7 @@
         print "[%03i %03i %03i] "%tuple(dre),
         print " with %i halos"%num_halos
         dle,dre = domain
-        dle, dre = na.array(dle),na.array(dre)
+        dle, dre = np.array(dle),np.array(dre)
         fn = fni 
         fn += "%03i_%03i_%03i-"%tuple(dle)
         fn += "%03i_%03i_%03i"%tuple(dre)
@@ -178,7 +178,7 @@
     dn = pf.domain_dimensions
     for halo in halo_list:
         fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir
-        dle,dre = na.floor(fle*dn), na.ceil(fre*dn)
+        dle,dre = np.floor(fle*dn), np.ceil(fre*dn)
         dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int'))
         if (dle,dre) in domains.keys():
             domains[(dle,dre)] += halo,
@@ -211,7 +211,7 @@
     del field_data
 
     #first we cast every cell as an oct
-    #ngrids = na.max([g.id for g in pf._grids])
+    #ngrids = np.max([g.id for g in pf._grids])
     grids = {}
     levels_all = {} 
     levels_finest = {}
@@ -220,13 +220,13 @@
         levels_all[l]=0
     pbar = get_pbar("Initializing octs ",len(pf.h.grids))
     for gi,g in enumerate(pf.h.grids):
-        ff = na.array([g[f] for f in fields])
+        ff = np.array([g[f] for f in fields])
         og = amr_utils.OctreeGrid(
                 g.child_index_mask.astype('int32'),
                 ff.astype("float64"),
                 g.LeftEdge.astype("float64"),
                 g.ActiveDimensions.astype("int32"),
-                na.ones(1,dtype="float64")*g.dds[0],
+                np.ones(1,dtype="float64")*g.dds[0],
                 g.Level,
                 g.id)
         grids[g.id] = og
@@ -246,11 +246,11 @@
     #oct_list =  amr_utils.OctreeGridList(grids)
     
     #initialize arrays to be passed to the recursion algo
-    o_length = na.sum(levels_all.values())
-    r_length = na.sum(levels_all.values())
-    output   = na.zeros((o_length,len(fields)), dtype='float64')
-    refined  = na.zeros(r_length, dtype='int32')
-    levels   = na.zeros(r_length, dtype='int32')
+    o_length = np.sum(levels_all.values())
+    r_length = np.sum(levels_all.values())
+    output   = np.zeros((o_length,len(fields)), dtype='float64')
+    refined  = np.zeros(r_length, dtype='int32')
+    levels   = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -332,7 +332,7 @@
         #calculate the floating point LE of the children
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
-        subgrid_ile = na.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
+        subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
         for i, (vertex,hilbert_child) in enumerate(hilbert):
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
@@ -340,7 +340,7 @@
                 subgrid = grid #we don't actually descend if we're a superlevel
                 child_ile = cell_index + vertex*2**(-level)
             else:
-                child_ile = subgrid_ile+na.array(vertex)
+                child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
                     subgrid,hilbert_child,output,refined,levels,grids,level+1,
@@ -381,17 +381,17 @@
     col_list.append(pyfits.Column("mass_metals", format='D',
                     array=fd['MetalMass'], unit="Msun"))
     # col_list.append(pyfits.Column("mass_stars", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("age_m", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("age_l", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("L_bol", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # col_list.append(pyfits.Column("L_lambda", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
@@ -402,7 +402,7 @@
                     array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
-                    array=na.zeros(size, dtype='D')))
+                    array=np.zeros(size, dtype='D')))
     cols = pyfits.ColDefs(col_list)
     mg_table = pyfits.new_table(cols)
     mg_table.header.update("M_g_tot", tm)
@@ -411,7 +411,7 @@
     mg_table.name = "GRIDDATA"
 
     # Add a dummy Primary; might be a better way to do this!
-    col_list = [pyfits.Column("dummy", format="F", array=na.zeros(1, dtype='float32'))]
+    col_list = [pyfits.Column("dummy", format="F", array=np.zeros(1, dtype='float32'))]
     cols = pyfits.ColDefs(col_list)
     md_table = pyfits.new_table(cols)
     md_table.header.update("snaptime", pf.current_time*pf['years'])
@@ -437,12 +437,12 @@
 
 def round_ncells_wide(dds,fle,fre,nwide=None):
     fc = (fle+fre)/2.0
-    assert na.all(fle < fc)
-    assert na.all(fre > fc)
-    ic = na.rint(fc*dds) #nearest vertex to the center
+    assert np.all(fle < fc)
+    assert np.all(fre > fc)
+    ic = np.rint(fc*dds) #nearest vertex to the center
     ile,ire = ic.astype('int'),ic.astype('int')
     cfle,cfre = fc.copy(),fc.copy()
-    idx = na.array([0,0,0]) #just a random non-equal array
+    idx = np.array([0,0,0]) #just a random non-equal array
     width = 0.0
     if nwide is None:
         #expand until borders are included and
@@ -450,41 +450,41 @@
         idxq,out=False,True
         while not out or not idxq:
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 0.1/dds
             #quit if idxq is true:
-            idxq = idx[0]>0 and na.all(idx==idx[0])
-            out  = na.all(fle>cfle) and na.all(fre<cfre) 
+            idxq = idx[0]>0 and np.all(idx==idx[0])
+            out  = np.all(fle>cfle) and np.all(fre<cfre) 
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
         #expand until we are nwide cells span
-        while not na.all(idx==nwide):
-            assert na.any(idx<=nwide)
+        while not np.all(idx==nwide):
+            assert np.any(idx<=nwide)
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 1e-2*1.0/dds
-    assert na.all(idx==nwide)
+    assert np.all(idx==nwide)
     assert idx[0]>0
-    maxlevel = -na.rint(na.log2(nwide)).astype('int')
-    assert abs(na.log2(nwide)-na.rint(na.log2(nwide)))<1e-5 #nwide should be a power of 2
+    maxlevel = -np.rint(np.log2(nwide)).astype('int')
+    assert abs(np.log2(nwide)-np.rint(np.log2(nwide)))<1e-5 #nwide should be a power of 2
     return ile,ire,maxlevel,nwide
 
 def round_nearest_edge(pf,fle,fre):
     dds = pf.domain_dimensions
-    ile = na.floor(fle*dds).astype('int')
-    ire = na.ceil(fre*dds).astype('int') 
+    ile = np.floor(fle*dds).astype('int')
+    ire = np.ceil(fre*dds).astype('int') 
     
     #this is the number of cells the super octree needs to expand to
     #must round to the nearest power of 2
-    width = na.max(ire-ile)
+    width = np.max(ire-ile)
     width = nearest_power(width)
     
-    maxlevel = -na.rint(na.log2(width)).astype('int')
+    maxlevel = -np.rint(np.log2(width)).astype('int')
     return ile,ire,maxlevel
 
 def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
@@ -497,14 +497,14 @@
         dd = pf.h.all_data()
     idx = dd["particle_type"] == star_type
     if pos is None:
-        pos = na.array([dd["particle_position_%s" % ax]
+        pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
     if vel is None:
-        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+        vel = np.array([dd["particle_velocity_%s" % ax][idx]
                         for ax in 'xyz']).transpose()
         # Velocity is cm/s, we want it to be kpc/yr
         #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
@@ -525,8 +525,8 @@
     formation_time = pf.current_time*pf['years']-age
     #create every column
     col_list = []
-    col_list.append(pyfits.Column("ID", format="J", array=na.arange(current_mass.size).astype('int32')))
-    col_list.append(pyfits.Column("parent_ID", format="J", array=na.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("ID", format="J", array=np.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("parent_ID", format="J", array=np.arange(current_mass.size).astype('int32')))
     col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
     col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
     col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
@@ -540,7 +540,7 @@
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
     #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=na.zeros(current_mass.size)))
+    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -570,7 +570,7 @@
                 / data["dynamical_time"])
         xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
                 / data["dynamical_time"])
-        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*np.exp(-xv1)))
         minitial = data["ParticleMassMsun"] / denom
         return minitial
 
@@ -698,14 +698,14 @@
     camera_positions in Sunrise.
     """
 
-    sim_center = na.array(sim_center)
+    sim_center = np.array(sim_center)
     if sim_sphere_radius is None:
         sim_sphere_radius = 10.0/pf['kpc']
     if sim_axis_short is None:
         if dd is None:
             dd = pf.h.all_data()
-        pos = na.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
+        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
+        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
         mas = dd["particle_mass"]
         pos = pos[idx]
         mas = mas[idx]
@@ -722,14 +722,14 @@
     if scene_distance is  None:
         scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
     if scene_fov is None:
-        radii = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))
+        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
         #idx= radii < sim_halo_radius*0.10
         #radii = radii[idx]
         #mass  = mas[idx] #copying mass into mas
-        si = na.argsort(radii)
+        si = np.argsort(radii)
         radii = radii[si]
         mass  = mas[si]
-        idx, = na.where(na.cumsum(mass)>mass.sum()/2.0)
+        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
         re = radii[idx[0]]
         scene_fov = 5*re
         scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
@@ -745,11 +745,11 @@
     
     #rotate the camera
     if scene_rot :
-        irotation = na.eye(3)
-    sunrise_pos = matmul(irotation,na.array(scene_position)*scene_distance) #do NOT include sim center
+        irotation = np.eye(3)
+    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
     sunrise_up  = matmul(irotation,scene_up)
     sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*na.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
+    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
 
     #change to physical kpc
     sunrise_pos *= pf['kpc']
@@ -763,11 +763,11 @@
     use this to muliply two matricies, it will think that you're
     trying to multiply by a set of vectors and all hell will break
     loose."""    
-    assert type(v) is not na.matrix
-    v = na.asarray(v)
-    m, vs = [na.asmatrix(a) for a in (m, v)]
+    assert type(v) is not np.matrix
+    v = np.asarray(v)
+    m, vs = [np.asmatrix(a) for a in (m, v)]
 
-    result = na.asarray(na.transpose(m * na.transpose(vs)))    
+    result = np.asarray(np.transpose(m * np.transpose(vs)))    
     if len(v.shape) == 1:
         return result[0]
     return result
@@ -775,14 +775,14 @@
 
 def mag(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
-        return na.sqrt( (vs**2).sum() )
-    return na.sqrt( (vs**2).sum(axis=1) )
+        return np.sqrt( (vs**2).sum() )
+    return np.sqrt( (vs**2).sum(axis=1) )
 
 def mag2(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
         return (vs**2).sum()
     return (vs**2).sum(axis=1)
@@ -791,25 +791,25 @@
 def position_moment(rs, ms=None, axes=None):
     """Find second position moment tensor.
     If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = na.asarray(rs)
+    rs = np.asarray(rs)
     Npart, N = rs.shape
-    if ms is None: ms = na.ones(Npart)
-    else: ms = na.asarray(ms)    
+    if ms is None: ms = np.ones(Npart)
+    else: ms = np.asarray(ms)    
     if axes is not None:
-        axes = na.asarray(axes,dtype=float64)
+        axes = np.asarray(axes,dtype=float64)
         axes = axes/axes.max()
         norms2 = mag2(rs/axes)
     else:
-        norms2 = na.ones(Npart)
+        norms2 = np.ones(Npart)
     M = ms.sum()
-    result = na.zeros((N,N))
+    result = np.zeros((N,N))
     # matrix is symmetric, so only compute half of it then fill in the
     # other half
     for i in range(N):
         for j in range(i+1):
             result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
         
-    result = result + result.transpose() - na.identity(N)*result
+    result = result + result.transpose() - np.identity(N)*result
     return result
     
 
@@ -826,7 +826,7 @@
     make the long axis line up with the x axis and the short axis line
     up with the x (z) axis for the 2 (3) dimensional case."""
     # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: na.sqrt(na.sum(x**2.0))
+    mag = lambda x: np.sqrt(np.sum(x**2.0))
     v = v/mag(v)
     w = w/mag(w)    
     if check:
@@ -843,7 +843,7 @@
     w_prime = euler_passive(w,phi,theta,0.)
     if w_prime[0] < 0: w_prime = -w_prime
     # Now last Euler angle should just be this:
-    psi = na.arctan2(w_prime[1],w_prime[0])
+    psi = np.arctan2(w_prime[1],w_prime[0])
     return phi, theta, psi
 
 def find_euler_phi_theta(v):
@@ -851,19 +851,19 @@
     direction"""
     # Make sure the vector is normalized
     v = v/mag(v)
-    theta = na.arccos(v[2])
-    phi = na.arctan2(v[0],-v[1])
+    theta = np.arccos(v[2])
+    phi = np.arctan2(v[0],-v[1])
     return phi,theta
 
 def euler_matrix(phi, the, psi):
     """Make an Euler transformation matrix"""
-    cpsi=na.cos(psi)
-    spsi=na.sin(psi)
-    cphi=na.cos(phi)
-    sphi=na.sin(phi)
-    cthe=na.cos(the)
-    sthe=na.sin(the)
-    m = na.mat(na.zeros((3,3)))
+    cpsi=np.cos(psi)
+    spsi=np.sin(psi)
+    cphi=np.cos(phi)
+    sphi=np.sin(phi)
+    cthe=np.cos(the)
+    sthe=np.sin(the)
+    m = np.mat(np.zeros((3,3)))
     m[0,0] = cpsi*cphi - cthe*sphi*spsi
     m[0,1] = cpsi*sphi + cthe*cphi*spsi
     m[0,2] = spsi*sthe
@@ -912,9 +912,9 @@
 cameraset_ring = collections.OrderedDict()
 
 segments = 20
-for angle in na.linspace(0,360,segments):
-    pos = [na.cos(angle),0.,na.sin(angle)]
-    vc  = [na.cos(90-angle),0.,na.sin(90-angle)] 
+for angle in np.linspace(0,360,segments):
+    pos = [np.cos(angle),0.,np.sin(angle)]
+    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
     cameraset_ring['02i'%angle]=(pos,vc)
             
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -144,10 +144,10 @@
             length_range[0] = math.sqrt(3) * self.pf.h.get_smallest_dx()
         # Make the list of ruler lengths.
         if length_type == "lin":
-            self.lengths = na.linspace(length_range[0], length_range[1],
+            self.lengths = np.linspace(length_range[0], length_range[1],
                 length_number)
         elif length_type == "log":
-            self.lengths = na.logspace(math.log10(length_range[0]),
+            self.lengths = np.logspace(math.log10(length_range[0]),
                 math.log10(length_range[1]), length_number)
         else:
             # Something went wrong.
@@ -177,7 +177,7 @@
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge
-        self.mt = na.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
+        self.mt = np.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
     
     def add_function(self, function, out_labels, sqrt, corr_norm=None):
         r"""Add a function to the list that will be evaluated at the
@@ -265,7 +265,7 @@
                 mylog.info("Doing length %1.5e" % length)
             # Things stop when this value below equals total_values.
             self.generated_points = 0
-            self.gen_array = na.zeros(self.size, dtype='int64')
+            self.gen_array = np.zeros(self.size, dtype='int64')
             self.comm_cycle_count = 0
             self.final_comm_cycle_count = 0
             self.sent_done = False
@@ -280,7 +280,7 @@
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
-                        #(na.abs(na.log10(na.abs(self.recv_points))) > 20).any():
+                        #(np.abs(np.log10(np.abs(self.recv_points))) > 20).any():
                     raise ValueError("self.recv_points is no good!")
                 self.points = self.recv_points.copy()
                 self.fields_vals = self.recv_fields_vals.copy()
@@ -312,7 +312,7 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        fKD.pos = na.asfortranarray(na.empty((3,xp.size), dtype='float64'))
+        fKD.pos = np.asfortranarray(np.empty((3,xp.size), dtype='float64'))
         # Normalize the grid points only within the kdtree.
         fKD.pos[0, :] = xp[:] / self.period[0]
         fKD.pos[1, :] = yp[:] / self.period[1]
@@ -332,8 +332,8 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        self.sizes = [na.unique(xp).size, na.unique(yp).size, na.unique(zp).size]        
-        self.sort = na.lexsort([zp, yp, xp])
+        self.sizes = [np.unique(xp).size, np.unique(yp).size, np.unique(zp).size]        
+        self.sort = np.lexsort([zp, yp, xp])
         del xp, yp, zp
         self.ds.clear_data()
     
@@ -341,7 +341,7 @@
         """
         Builds an array to store the field values array.
         """
-        self.fields_vals = na.empty((self.comm_size, len(self.fields)*2), \
+        self.fields_vals = np.empty((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         # At the same time build a dict to label the columns.
         self.fields_columns = {}
@@ -353,7 +353,7 @@
         Initializes the array that contains the random points as all negatives
         to start with.
         """
-        self.points = na.ones((self.comm_size, 6), dtype='float64') * -1.0
+        self.points = np.ones((self.comm_size, 6), dtype='float64') * -1.0
     
     def _setup_done_hooks_on_root(self):
         """
@@ -364,7 +364,7 @@
         self.recv_done = {}
         for task in xrange(self.size):
             if task == self.mine: continue
-            self.recv_done[task] = na.zeros(1, dtype='int64')
+            self.recv_done[task] = np.zeros(1, dtype='int64')
             self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
@@ -376,13 +376,13 @@
         if self.sent_done: return
         if self.mine !=0:
             # I send when I *think* things should finish.
-            self.send_done = na.ones(1, dtype='int64') * \
+            self.send_done = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
             self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
-            self.recv_done[0] = na.ones(1, dtype='int64') * \
+            self.recv_done[0] = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
         self.sent_done = True
     
@@ -416,10 +416,10 @@
         Creates the recv buffers and calls a non-blocking MPI receive pointing
         to the left-hand neighbor.
         """
-        self.recv_points = na.ones((self.comm_size, 6), dtype='float64') * -1.
-        self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
+        self.recv_points = np.ones((self.comm_size, 6), dtype='float64') * -1.
+        self.recv_fields_vals = np.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
-        self.recv_gen_array = na.zeros(self.size, dtype='int64')
+        self.recv_gen_array = np.zeros(self.size, dtype='int64')
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
@@ -470,7 +470,7 @@
         Picks out size random pairs separated by length *length*.
         """
         # First make random points inside this subvolume.
-        r1 = na.empty((size,3), dtype='float64')
+        r1 = np.empty((size,3), dtype='float64')
         for dim in range(3):
             r1[:,dim] = self.mt.uniform(low=self.ds.left_edge[dim],
                 high=self.ds.right_edge[dim], size=size)
@@ -480,15 +480,15 @@
         # but phi and theta are switched to the Physics convention.
         if self.constant_phi is None:
             phi = self.mt.uniform(low=0, high=2.*math.pi, size=size)
-        else: phi = self.constant_phi * na.ones(size, dtype='float64')
+        else: phi = self.constant_phi * np.ones(size, dtype='float64')
         if self.constant_theta is None:
             v = self.mt.uniform(low=0., high=1, size=size)
-            theta = na.arccos(2 * v - 1)
-        else: theta = self.constant_theta * na.ones(size, dtype='float64')
-        r2 = na.empty((size,3), dtype='float64')
-        r2[:,0] = r1[:,0] + length * na.cos(phi) * na.sin(theta)
-        r2[:,1] = r1[:,1] + length * na.sin(phi) * na.sin(theta)
-        r2[:,2] = r1[:,2] + length * na.cos(theta)
+            theta = np.arccos(2 * v - 1)
+        else: theta = self.constant_theta * np.ones(size, dtype='float64')
+        r2 = np.empty((size,3), dtype='float64')
+        r2[:,0] = r1[:,0] + length * np.cos(phi) * np.sin(theta)
+        r2[:,1] = r1[:,1] + length * np.sin(phi) * np.sin(theta)
+        r2[:,2] = r1[:,2] + length * np.cos(theta)
         # Reflect so it's inside the (full) volume.
         r2 %= self.period
         return (r1, r2)
@@ -508,7 +508,7 @@
             points[:, 1] = points[:, 1] / self.period[1]
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
-            fKD.nn_tags = na.asfortranarray(na.empty((1, points.shape[0]), dtype='int64'))
+            fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
             find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
@@ -521,7 +521,7 @@
         """
         # First find the grid data index field.
         indices = self._find_nearest_cell(points)
-        results = na.empty((len(indices), len(self.fields)), dtype='float64')
+        results = np.empty((len(indices), len(self.fields)), dtype='float64')
         # Put the field values into the columns of results.
         for field in self.fields:
             col = self.fields_columns[field]
@@ -547,7 +547,7 @@
                 self.generated_points += size
                 # If size != select.sum(), we need to pad the end of new_r1/r2
                 # which is what is effectively happening below.
-                newpoints = na.ones((ssum, 6), dtype='float64') * -1.
+                newpoints = np.ones((ssum, 6), dtype='float64') * -1.
                 newpoints[:size,:3] = new_r1
                 newpoints[:size,3:] = new_r2
                 # Now we insert them into self.points.
@@ -564,9 +564,9 @@
             # or I don't need to make any new points and I'm just processing the
             # array. Start by finding the indices of the points I own.
             self.points.shape = (self.comm_size*2, 3) # Doesn't make a copy - fast!
-            select = na.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
                 (self.points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             mypoints = self.points[select]
             if mypoints.size > 0:
                 # Get the fields values.
@@ -583,19 +583,19 @@
             # To run the functions, what is key is that the
             # second point in the pair is ours.
             second_points = self.points[:,3:]
-            select = na.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
                 (second_points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             if select.any():
                 points_to_eval = self.points[select]
                 fields_to_eval = self.fields_vals[select]
                 
                 # Find the normal vector between our points.
-                vec = na.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
-                norm = na.sqrt(na.sum(na.multiply(vec,vec), axis=1))
+                vec = np.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
+                norm = np.sqrt(np.sum(np.multiply(vec,vec), axis=1))
                 # I wish there was a better way to do this, but I can't find it.
                 for i, n in enumerate(norm):
-                    vec[i] = na.divide(vec[i], n)
+                    vec[i] = np.divide(vec[i], n)
                 
                 # Now evaluate the functions.
                 for fcn_set in self._fsets:
@@ -604,7 +604,7 @@
                     fcn_set._bin_results(length, fcn_results)
                 
                 # Now clear the buffers at the processed points.
-                self.points[select] = na.array([-1.]*6, dtype='float64')
+                self.points[select] = np.array([-1.]*6, dtype='float64')
                 
             else:
                 # We didn't clear any points, so we should move on with our
@@ -712,8 +712,8 @@
         self.corr_norm = corr_norm # A number used to normalize a correlation function.
         # These below are used to track how many times the function returns
         # unbinned results.
-        self.too_low = na.zeros(len(self.out_labels), dtype='int32')
-        self.too_high = na.zeros(len(self.out_labels), dtype='int32')
+        self.too_low = np.zeros(len(self.out_labels), dtype='int32')
+        self.too_high = np.zeros(len(self.out_labels), dtype='int32')
         
     def set_pdf_params(self, bin_type="lin", bin_number=1000, bin_range=None):
         r"""Set the parameters used to build the Probability Distribution Function
@@ -772,14 +772,14 @@
             bin_type, bin_number = [bin_type], [bin_number]
             bin_range = [bin_range]
         self.bin_type = bin_type
-        self.bin_number = na.array(bin_number) - 1
+        self.bin_number = np.array(bin_number) - 1
         self.dims = range(len(bin_type))
         # Create the dict that stores the arrays to store the bin hits, and
         # the arrays themselves.
         self.length_bin_hits = {}
         for length in self.tpf.lengths:
             # It's easier to index flattened, but will be unflattened later.
-            self.length_bin_hits[length] = na.zeros(self.bin_number,
+            self.length_bin_hits[length] = np.zeros(self.bin_number,
                 dtype='int64').flatten()
         # Create the bin edges for each dimension.
         # self.bins is indexed by dimension
@@ -792,10 +792,10 @@
                 raise ValueError("bin_range[1] must be larger than bin_range[0]")
             # Make the edges for this dimension.
             if bin_type[dim] == "lin":
-                self.bin_edges[dim] = na.linspace(bin_range[dim][0], bin_range[dim][1],
+                self.bin_edges[dim] = np.linspace(bin_range[dim][0], bin_range[dim][1],
                     bin_number[dim])
             elif bin_type[dim] == "log":
-                self.bin_edges[dim] = na.logspace(math.log10(bin_range[dim][0]),
+                self.bin_edges[dim] = np.logspace(math.log10(bin_range[dim][0]),
                     math.log10(bin_range[dim][1]), bin_number[dim])
             else:
                 raise SyntaxError("bin_edges is either \"lin\" or \"log\".")
@@ -822,32 +822,32 @@
         is flattened, so we need to figure out the offset for this hit by
         factoring the sizes of the other dimensions.
         """
-        hit_bin = na.zeros(results.shape[0], dtype='int64')
+        hit_bin = np.zeros(results.shape[0], dtype='int64')
         multi = 1
-        good = na.ones(results.shape[0], dtype='bool')
+        good = np.ones(results.shape[0], dtype='bool')
         for dim in range(len(self.out_labels)):
             for d1 in range(dim):
                 multi *= self.bin_edges[d1].size
             if dim == 0 and len(self.out_labels)==1:
                 try:
-                    digi = na.digitize(results, self.bin_edges[dim])
+                    digi = np.digitize(results, self.bin_edges[dim])
                 except ValueError:
                     # The user probably did something like 
                     # return a * b rather than
                     # return a[0] * b[0], which will only happen
                     # for single field functions.
-                    digi = na.digitize(results[0], self.bin_edges[dim])
+                    digi = np.digitize(results[0], self.bin_edges[dim])
             else:
-                digi = na.digitize(results[:,dim], self.bin_edges[dim])
+                digi = np.digitize(results[:,dim], self.bin_edges[dim])
             too_low = (digi == 0)
             too_high = (digi == self.bin_edges[dim].size)
             self.too_low[dim] += (too_low).sum()
             self.too_high[dim] += (too_high).sum()
-            newgood = na.bitwise_and(na.invert(too_low), na.invert(too_high))
-            good = na.bitwise_and(good, newgood)
-            hit_bin += na.multiply((digi - 1), multi)
-        digi_bins = na.arange(self.length_bin_hits[length].size+1)
-        hist, digi_bins = na.histogram(hit_bin[good], digi_bins)
+            newgood = np.bitwise_and(np.invert(too_low), np.invert(too_high))
+            good = np.bitwise_and(good, newgood)
+            hit_bin += np.multiply((digi - 1), multi)
+        digi_bins = np.arange(self.length_bin_hits[length].size+1)
+        hist, digi_bins = np.histogram(hit_bin[good], digi_bins)
         self.length_bin_hits[length] += hist
 
     def _dim_sum(self, a, dim):
@@ -855,11 +855,11 @@
         Given a multidimensional array a, this finds the sum over all the
         elements leaving the dimension dim untouched.
         """
-        dims = na.arange(len(a.shape))
-        dims = na.flipud(dims)
+        dims = np.arange(len(a.shape))
+        dims = np.flipud(dims)
         gt_dims = dims[dims > dim]
         lt_dims = dims[dims < dim]
-        iter_dims = na.concatenate((gt_dims, lt_dims))
+        iter_dims = np.concatenate((gt_dims, lt_dims))
         for this_dim in iter_dims:
             a = a.sum(axis=this_dim)
         return a
@@ -882,6 +882,6 @@
         """
         xi = {}
         for length in self.tpf.lengths:
-            xi[length] = -1 + na.sum(self.length_bin_hits[length] * \
+            xi[length] = -1 + np.sum(self.length_bin_hits[length] * \
                 self.bin_edges[0][:-1]) / self.corr_norm
         return xi


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import numpy.core.records as rec
 
 # Now define convenience functions
@@ -41,5 +41,5 @@
     """
     blanks = []
     for atype in desc['formats']:
-        blanks.append(na.zeros(elements, dtype=atype))
+        blanks.append(np.zeros(elements, dtype=atype))
     return rec.fromarrays(blanks, **desc)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -24,7 +24,7 @@
 """
 
 import glob
-import numpy as na
+import numpy as np
 import os, os.path, inspect, types
 from functools import wraps
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -29,7 +29,7 @@
 
 data_object_registry = {}
 
-import numpy as na
+import numpy as np
 import math
 import weakref
 import exceptions
@@ -74,9 +74,9 @@
         return item
     except AttributeError:
         if item:
-            return na.ones(shape, dtype='bool')
+            return np.ones(shape, dtype='bool')
         else:
-            return na.zeros(shape, dtype='bool')
+            return np.zeros(shape, dtype='bool')
 
 def restore_grid_state(func):
     """
@@ -181,13 +181,13 @@
         if field not in self.field_data.keys():
             if field == "RadiusCode":
                 center = self.field_parameters['center']
-                tempx = na.abs(self['x'] - center[0])
-                tempx = na.minimum(tempx, self.DW[0] - tempx)
-                tempy = na.abs(self['y'] - center[1])
-                tempy = na.minimum(tempy, self.DW[1] - tempy)
-                tempz = na.abs(self['z'] - center[2])
-                tempz = na.minimum(tempz, self.DW[2] - tempz)
-                tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
+                tempx = np.abs(self['x'] - center[0])
+                tempx = np.minimum(tempx, self.DW[0] - tempx)
+                tempy = np.abs(self['y'] - center[1])
+                tempy = np.minimum(tempy, self.DW[1] - tempy)
+                tempz = np.abs(self['z'] - center[2])
+                tempz = np.minimum(tempz, self.DW[2] - tempz)
+                tr = np.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
             else:
                 raise KeyError(field)
         else: tr = self.field_data[field]
@@ -235,14 +235,14 @@
             self.set_field_parameter(key, val)
 
     def __set_default_field_parameters(self):
-        self.set_field_parameter("center",na.zeros(3,dtype='float64'))
-        self.set_field_parameter("bulk_velocity",na.zeros(3,dtype='float64'))
+        self.set_field_parameter("center",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
             pass
-        elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
-            center = na.array(center)
+        elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
+            center = np.array(center)
         elif center in ("c", "center"):
             center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
@@ -250,7 +250,7 @@
         elif center.startswith("max_"):
             center = self.pf.h.find_max(center[4:])[1]
         else:
-            center = na.array(center, dtype='float64')
+            center = np.array(center, dtype='float64')
         self.center = center
         self.set_field_parameter('center', center)
 
@@ -376,7 +376,7 @@
         field_order += [field for field in fields if field not in field_order]
         fid = open(filename,"w")
         fid.write("\t".join(["#"] + field_order + ["\n"]))
-        field_data = na.array([self.field_data[field] for field in field_order])
+        field_data = np.array([self.field_data[field] for field in field_order])
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -421,11 +421,11 @@
         return grids
 
     def select_grid_indices(self, level):
-        return na.where(self.grid_levels == level)
+        return np.where(self.grid_levels == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
-            self.__grid_left_edge = na.array([g.LeftEdge for g in self._grids])
+            self.__grid_left_edge = np.array([g.LeftEdge for g in self._grids])
         return self.__grid_left_edge
 
     def __del_grid_left_edge(self):
@@ -441,7 +441,7 @@
 
     def __get_grid_right_edge(self):
         if self.__grid_right_edge == None:
-            self.__grid_right_edge = na.array([g.RightEdge for g in self._grids])
+            self.__grid_right_edge = np.array([g.RightEdge for g in self._grids])
         return self.__grid_right_edge
 
     def __del_grid_right_edge(self):
@@ -457,7 +457,7 @@
 
     def __get_grid_levels(self):
         if self.__grid_levels == None:
-            self.__grid_levels = na.array([g.Level for g in self._grids])
+            self.__grid_levels = np.array([g.Level for g in self._grids])
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +474,7 @@
 
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
-            self.__grid_dimensions = na.array([g.ActiveDimensions for g in self._grids])
+            self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
         return self.__grid_dimensions
 
     def __del_grid_dimensions(self):
@@ -516,13 +516,13 @@
             if field not in self.hierarchy.field_list and not in_grids:
                 if field not in ("dts", "t") and self._generate_field(field):
                     continue # True means we already assigned it
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
             if not self.field_data.has_key(field):
                 continue
             if self._sortkey is None:
-                self._sortkey = na.argsort(self[self.sort_by])
+                self._sortkey = np.argsort(self[self.sort_by])
             # We *always* sort the field here if we have not successfully
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
@@ -581,7 +581,7 @@
 
     def _get_list_of_grids(self):
         # This bugs me, but we will give the tie to the LeftEdge
-        y = na.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
+        y = np.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
                     & (self.px < self.pf.hierarchy.grid_right_edge[:,self.px_ax])
                     & (self.py >=  self.pf.hierarchy.grid_left_edge[:,self.py_ax])
                     & (self.py < self.pf.hierarchy.grid_right_edge[:,self.py_ax]))
@@ -604,10 +604,10 @@
         else:
             sl = self._cut_masks[grid.id]
         if not iterable(grid[field]):
-            gf = grid[field] * na.ones(grid.child_mask[sl].shape)
+            gf = grid[field] * np.ones(grid.child_mask[sl].shape)
         else:
             gf = grid[field][sl]
-        return gf[na.where(grid.child_mask[sl])]
+        return gf[np.where(grid.child_mask[sl])]
 
 class AMRRayBase(AMR1DData):
     _type_name = "ray"
@@ -646,10 +646,10 @@
         >>> print ray["Density"], ray["t"], ray["dts"]
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
-        self.start_point = na.array(start_point, dtype='float64')
-        self.end_point = na.array(end_point, dtype='float64')
+        self.start_point = np.array(start_point, dtype='float64')
+        self.end_point = np.array(end_point, dtype='float64')
         self.vec = self.end_point - self.start_point
-        #self.vec /= na.sqrt(na.dot(self.vec, self.vec))
+        #self.vec /= np.sqrt(np.dot(self.vec, self.vec))
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
         self._dts, self._ts = {}, {}
@@ -659,7 +659,7 @@
         # Get the value of the line at each LeftEdge and RightEdge
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        p = na.zeros(self.pf.h.num_grids, dtype='bool')
+        p = np.zeros(self.pf.h.num_grids, dtype='bool')
         # Check left faces first
         for i in range(3):
             i1 = (i+1) % 3
@@ -670,10 +670,10 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( na.all( LE <= self.start_point, axis=1 ) 
-                & na.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( na.all( LE <= self.end_point,   axis=1 ) 
-                & na.all( RE >= self.end_point,   axis=1 ) )
+        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+                & np.all( RE >= self.start_point, axis=1 ) )
+        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+                & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
     def _get_line_at_coord(self, v, index):
@@ -684,24 +684,24 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
         gf = grid[field]
         if not iterable(gf):
-            gf = gf * na.ones(grid.child_mask.shape)
+            gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
                        grid.dds, self.center, self.vec)
-        self._dts[grid.id] = na.abs(dts)
-        self._ts[grid.id] = na.abs(ts)
+        self._dts[grid.id] = np.abs(dts)
+        self._ts[grid.id] = np.abs(ts)
         return mask
 
 class AMRStreamlineBase(AMR1DData):
@@ -745,11 +745,11 @@
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
         self.positions = positions
-        self.dts = na.empty_like(positions[:,0])
-        self.dts[:-1] = na.sqrt(na.sum((self.positions[1:]-
+        self.dts = np.empty_like(positions[:,0])
+        self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-1]
-        self.ts = na.add.accumulate(self.dts)
+        self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
         self._dts, self._ts = {}, {}
@@ -760,14 +760,14 @@
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
         # Check left faces first
-        min_streampoint = na.min(self.positions, axis=0)
-        max_streampoint = na.max(self.positions, axis=0)
-        p = na.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
+        min_streampoint = np.min(self.positions, axis=0)
+        max_streampoint = np.max(self.positions, axis=0)
+        p = np.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
         self._grids = self.hierarchy.grids[p]
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
@@ -775,13 +775,13 @@
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
-        points_in_grid = na.all(self.positions > grid.LeftEdge, axis=1) & \
-                         na.all(self.positions <= grid.RightEdge, axis=1) 
-        pids = na.where(points_in_grid)[0]
+        points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
+                         np.all(self.positions <= grid.RightEdge, axis=1) 
+        pids = np.where(points_in_grid)[0]
         for i, pos in zip(pids, self.positions[points_in_grid]):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
@@ -842,8 +842,8 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = na.array([])
-            else: data = na.concatenate(data)
+            if len(data) == 0: data = np.array([])
+            else: data = np.concatenate(data)
             temp_data[field] = data
             # Now the next field can use this field
             self[field] = temp_data[field] 
@@ -891,7 +891,7 @@
 
         >>> proj = pf.h.proj(0, "Density")
         >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_100kpc.png')
         """
         if center is None:
             center = self.get_field_parameter("center")
@@ -944,11 +944,11 @@
         """
         import yt.utilities.delaunay as de
         if log_spacing:
-            zz = na.log10(self[field])
+            zz = np.log10(self[field])
         else:
             zz = self[field]
-        xi, yi = na.array( \
-                 na.mgrid[LE[0]:RE[0]:side*1j, \
+        xi, yi = np.array( \
+                 np.mgrid[LE[0]:RE[0]:side*1j, \
                           LE[1]:RE[1]:side*1j], 'float64')
         zi = de.Triangulation(self['px'],self['py']).nn_interpolator(zz)\
                  [LE[0]:RE[0]:side*1j, \
@@ -1082,7 +1082,7 @@
             points = None
             t = self.comm.par_combine_object(None, datatype="array", op="cat")
         else:
-            points = na.concatenate(points)
+            points = np.concatenate(points)
             # We have to transpose here so that _par_combine_object works
             # properly, as it and the alltoall assume the long axis is the last
             # one.
@@ -1124,27 +1124,27 @@
         nx = grid.child_mask.shape[xaxis]
         ny = grid.child_mask.shape[yaxis]
         mask = self.__cut_mask_child_mask(grid)[sl]
-        cm = na.where(mask.ravel()== 1)
-        cmI = na.indices((nx,ny))
+        cm = np.where(mask.ravel()== 1)
+        cmI = np.indices((nx,ny))
         ind = cmI[0, :].ravel()   # xind
         npoints = cm[0].shape
         # create array of "npoints" ones that will be reused later
-        points = na.ones(npoints, 'float64')
+        points = np.ones(npoints, 'float64')
         # calculate xpoints array
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
         del cmI   # no longer needed 
-        t = na.vstack( (t, points * ind[cm] * dy + \
+        t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
         del ind, cm   # no longer needed
         # calculate zpoints array
-        t = na.vstack((t, points * self.coord))
+        t = np.vstack((t, points * self.coord))
         # calculate dx array
-        t = na.vstack((t, points * dx * 0.5))
+        t = np.vstack((t, points * dx * 0.5))
         # calculate dy array
-        t = na.vstack((t, points * dy * 0.5))
+        t = np.vstack((t, points * dy * 0.5))
         # return [xpoints, ypoints, zpoints, dx, dy] as (5, npoints) array
         return t.swapaxes(0, 1)
 
@@ -1169,7 +1169,7 @@
             dv = self.hierarchy.io._read_data_slice(grid, field, self.axis, sl_ind) * conv_factor
         else:
             dv = grid[field]
-            if dv.size == 1: dv = na.ones(grid.ActiveDimensions)*dv
+            if dv.size == 1: dv = np.ones(grid.ActiveDimensions)*dv
             dv = dv[sl]
         mask = self.__cut_mask_child_mask(grid)[sl]
         dataVals = dv.ravel()[mask.ravel() == 1]
@@ -1251,11 +1251,11 @@
         # ax + by + cz + d = 0
         self.orienter = Orientation(normal, north_vector = north_vector)
         self._norm_vec = self.orienter.normal_vector
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._x_vec = self.orienter.unit_vectors[0]
         self._y_vec = self.orienter.unit_vectors[1]
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
@@ -1276,7 +1276,7 @@
         # @todo: Convert to using corners
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        vertices = na.array([[LE[:,0],LE[:,1],LE[:,2]],
+        vertices = np.array([[LE[:,0],LE[:,1],LE[:,2]],
                              [RE[:,0],RE[:,1],RE[:,2]],
                              [LE[:,0],LE[:,1],RE[:,2]],
                              [RE[:,0],RE[:,1],LE[:,2]],
@@ -1285,27 +1285,27 @@
                              [LE[:,0],RE[:,1],LE[:,2]],
                              [RE[:,0],LE[:,1],RE[:,2]]])
         # This gives us shape: 8, 3, n_grid
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
         self.D = D
         self._grids = self.hierarchy.grids[
-            na.where(na.logical_not(na.all(D<0,axis=0) | na.all(D>0,axis=0) )) ]
+            np.where(np.logical_not(np.all(D<0,axis=0) | np.all(D>0,axis=0) )) ]
 
     @cache_mask
     def _get_cut_mask(self, grid):
         # This is slow.  Suggestions for improvement would be great...
         ss = grid.ActiveDimensions
-        D = na.ones(ss) * self._d
+        D = np.ones(ss) * self._d
         x = grid.LeftEdge[0] + grid.dds[0] * \
-                (na.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
         y = grid.LeftEdge[1] + grid.dds[1] * \
-                (na.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
         z = grid.LeftEdge[2] + grid.dds[2] * \
-                (na.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
         D += (x * self._norm_vec[0]).reshape(ss[0],1,1)
         D += (y * self._norm_vec[1]).reshape(1,ss[1],1)
         D += (z * self._norm_vec[2]).reshape(1,1,ss[2])
-        diag_dist = na.sqrt(na.sum(grid.dds**2.0))
-        cm = (na.abs(D) <= 0.5*diag_dist) # Boolean
+        diag_dist = np.sqrt(np.sum(grid.dds**2.0))
+        cm = (np.abs(D) <= 0.5*diag_dist) # Boolean
         return cm
 
     def _generate_coords(self):
@@ -1313,12 +1313,12 @@
         for grid in self._get_grids():
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
-        else: points = na.concatenate(points)
+        else: points = np.concatenate(points)
         t = self.comm.par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
-        self['px'] = na.dot(pos, self._x_vec)
-        self['py'] = na.dot(pos, self._y_vec)
-        self['pz'] = na.dot(pos, self._norm_vec)
+        self['px'] = np.dot(pos, self._x_vec)
+        self['py'] = np.dot(pos, self._y_vec)
+        self['pz'] = np.dot(pos, self._norm_vec)
         self['pdx'] = t[:,3] * 0.5
         self['pdy'] = t[:,3] * 0.5
         self['pdz'] = t[:,3] * 0.5
@@ -1326,14 +1326,14 @@
     def _generate_grid_coords(self, grid):
         pointI = self._get_point_indices(grid)
         coords = [grid[ax][pointI].ravel() for ax in 'xyz']
-        coords.append(na.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
-        return na.array(coords).swapaxes(0,1)
+        coords.append(np.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
+        return np.array(coords).swapaxes(0,1)
 
     def _get_data_from_grid(self, grid, field):
         if not self.pf.field_info[field].particle_type:
             pointI = self._get_point_indices(grid)
             if grid[field].size == 1: # dx, dy, dz, cellvolume
-                t = grid[field] * na.ones(grid.ActiveDimensions)
+                t = grid[field] * np.ones(grid.ActiveDimensions)
                 return t[pointI].ravel()
             return grid[field][pointI].ravel()
         else:
@@ -1344,10 +1344,10 @@
 
     @cache_point_indices
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _gen_node_name(self):
         cen_name = ("%s" % (self.center,)).replace(" ","_")[1:-1]
@@ -1391,7 +1391,7 @@
         >>> L = sp.quantities["AngularMomentumVector"]()
         >>> cutting = pf.h.cutting(L, c)
         >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
             w, u = width
@@ -1435,34 +1435,34 @@
         self.width = width
         self.dims = dims
         self.dds = self.width / self.dims
-        self.bounds = na.array([0.0,1.0,0.0,1.0])
+        self.bounds = np.array([0.0,1.0,0.0,1.0])
         
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
 
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         if node_name is False:
             self._refresh_data()
@@ -1479,11 +1479,11 @@
         # within width/2 of the center.
         vertices = self.hierarchy.gridCorners
         # Shape = (8,3,n_grid)
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        valid_grids = na.where(na.logical_not(na.all(D<0,axis=0) |
-                                              na.all(D>0,axis=0) ))[0]
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        valid_grids = np.where(np.logical_not(np.all(D<0,axis=0) |
+                                              np.all(D>0,axis=0) ))[0]
         # Now restrict these grids to a rect. prism that bounds the slice
-        sliceCorners = na.array([ \
+        sliceCorners = np.array([ \
             self.center + 0.5*self.width * (+self._x_vec + self._y_vec),
             self.center + 0.5*self.width * (+self._x_vec - self._y_vec),
             self.center + 0.5*self.width * (-self._x_vec - self._y_vec),
@@ -1491,12 +1491,12 @@
         sliceLeftEdge = sliceCorners.min(axis=0)
         sliceRightEdge = sliceCorners.max(axis=0)
         # Check for bounding box and grid overlap
-        leftOverlap = na.less(self.hierarchy.gridLeftEdge[valid_grids],
+        leftOverlap = np.less(self.hierarchy.gridLeftEdge[valid_grids],
                               sliceRightEdge).all(axis=1)
-        rightOverlap = na.greater(self.hierarchy.gridRightEdge[valid_grids],
+        rightOverlap = np.greater(self.hierarchy.gridRightEdge[valid_grids],
                                   sliceLeftEdge).all(axis=1)
         self._grids = self.hierarchy.grids[valid_grids[
-            na.where(leftOverlap & rightOverlap)]]
+            np.where(leftOverlap & rightOverlap)]]
         self._grids = self._grids[::-1]
 
     def _generate_coords(self):
@@ -1512,7 +1512,7 @@
             pointI = self._get_point_indices(grid)
             if len(pointI) == 0: return
             vc = self._calc_vertex_centered_data(grid, field)
-            bds = na.array(zip(grid.LeftEdge,
+            bds = np.array(zip(grid.LeftEdge,
                                grid.RightEdge)).ravel()
             interp = TrilinearFieldInterpolator(vc, bds, ['x', 'y', 'z'])
             self[field][pointI] = interp( \
@@ -1538,27 +1538,27 @@
         self.width = width
         self.dds = self.width / self.dims
         self.set_field_parameter('center', center)
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
 
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         self._refresh_data()
         return
@@ -1584,7 +1584,7 @@
                     continue # A "True" return means we did it
             if not self._vc_data.has_key(field):
                 self._vc_data[field] = {}
-            self[field] = na.zeros(_size, dtype='float64')
+            self[field] = np.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
             self[field] = self.comm.mpi_allreduce(\
@@ -1686,9 +1686,9 @@
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
         self.proj_style = style
         if style == "mip":
-            self.func = na.max
+            self.func = np.max
         elif style == "integrate":
-            self.func = na.sum # for the future
+            self.func = np.sum # for the future
         else:
             raise NotImplementedError(style)
         self.weight_field = weight_field
@@ -1743,7 +1743,7 @@
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
-        return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
+        return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
                         style = self.proj_style)
 
     def _get_dls(self, grid, fields):
@@ -1755,8 +1755,8 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        dls = na.array(dls)
-        convs = na.array(convs)
+        dls = np.array(dls)
+        convs = np.array(convs)
         if self.proj_style == "mip":
             dls[:] = 1.0
             convs[:] = 1.0
@@ -1822,14 +1822,14 @@
                 ds = gs[0].dds[0]
             else:
                 ds = 0.0
-            dxs.append(na.ones(nvals.shape[0], dtype='float64') * ds)
-        coord_data = na.concatenate(coord_data, axis=0).transpose()
-        field_data = na.concatenate(field_data, axis=0).transpose()
+            dxs.append(np.ones(nvals.shape[0], dtype='float64') * ds)
+        coord_data = np.concatenate(coord_data, axis=0).transpose()
+        field_data = np.concatenate(field_data, axis=0).transpose()
         if self._weight is None:
             dls, convs = self._get_dls(self._grids[0], fields)
             field_data *= convs[:,None]
-        weight_data = na.concatenate(weight_data, axis=0).transpose()
-        dxs = na.concatenate(dxs, axis=0).transpose()
+        weight_data = np.concatenate(weight_data, axis=0).transpose()
+        dxs = np.concatenate(dxs, axis=0).transpose()
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = dxs
@@ -1843,7 +1843,7 @@
         data['pdy'] = data['pdx'] # generalization is out the window!
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -1853,7 +1853,7 @@
     def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
         # We build up the fields to add
         if self._weight is None or fields is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -1873,16 +1873,16 @@
         weight_proj = self.func(weight_data, axis=self.axis) * wdl
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.logical_or.reduce(used_data, self.axis)
+            used_points = np.logical_or.reduce(used_data, self.axis)
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         xind, yind = [arr[used_points].ravel()
-                      for arr in na.indices(full_proj[0].shape)]
+                      for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
-        to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
+        to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
         tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
                     to_add, weight_proj[used_points].ravel())
 
@@ -1894,8 +1894,8 @@
         if len(grids_to_initialize) == 0: return
         pbar = get_pbar('Initializing tree % 2i / % 2i' \
                           % (level, self._max_level), len(grids_to_initialize))
-        start_index = na.empty(2, dtype="int64")
-        dims = na.empty(2, dtype="int64")
+        start_index = np.empty(2, dtype="int64")
+        dims = np.empty(2, dtype="int64")
         xax = x_dict[self.axis]
         yax = y_dict[self.axis]
         for pi, grid in enumerate(grids_to_initialize):
@@ -1920,7 +1920,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2024,7 +2024,7 @@
         self._max_level = max_level
         self._weight = weight_field
         self.preload_style = preload_style
-        self.func = na.sum # for the future
+        self.func = np.sum # for the future
         self.__retval_coords = {}
         self.__retval_fields = {}
         self.__retval_coarse = {}
@@ -2083,7 +2083,7 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        return na.array(dls), na.array(convs)
+        return np.array(dls), np.array(convs)
 
     def __project_level(self, level, fields):
         grids_to_project = self.source.select_grids(level)
@@ -2112,12 +2112,12 @@
             field_data.append([pi[fine] for pi in self.__retval_fields[grid.id]])
             self.__retval_coords[grid.id] = [pi[coarse] for pi in self.__retval_coords[grid.id]]
             self.__retval_fields[grid.id] = [pi[coarse] for pi in self.__retval_fields[grid.id]]
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
         if self._weight is not None:
             field_data = field_data / coord_data[3,:].reshape((1,coord_data.shape[1]))
         else:
-            field_data *= convs[...,na.newaxis]
+            field_data *= convs[...,np.newaxis]
         mylog.info("Level %s done: %s final", \
                    level, coord_data.shape[1])
         pdx = grids_to_project[0].dds[x_dict[self.axis]] # this is our dl
@@ -2142,7 +2142,7 @@
                 args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
                 args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
                 args.append(1) # Refinement factor
-                args.append(na.ones(args[0].shape, dtype='int64'))
+                args.append(np.ones(args[0].shape, dtype='int64'))
                 kk = CombineGrids(*args)
                 goodI = args[-1].astype('bool')
                 self.__retval_coords[grid2.id] = \
@@ -2169,8 +2169,8 @@
                     # that this complicated rounding is because sometimes
                     # epsilon differences in dds between the grids causes this
                     # to round to up or down from the expected value.
-                    args.append(int(na.rint(grid2.dds / grid1.dds)[0]))
-                    args.append(na.ones(args[0].shape, dtype='int64'))
+                    args.append(int(np.rint(grid2.dds / grid1.dds)[0]))
+                    args.append(np.ones(args[0].shape, dtype='int64'))
                     kk = CombineGrids(*args)
                     goodI = args[-1].astype('bool')
                     self.__retval_coords[grid2.id] = \
@@ -2213,8 +2213,8 @@
                 self.__project_level(level, fields)
             coord_data.append(my_coords)
             field_data.append(my_fields)
-            pdxs.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
-            pdys.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
+            pdxs.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
+            pdys.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
             if self._check_region and False:
                 check=self.__cleanup_level(level - 1)
                 if len(check) > 0: all_data.append(check)
@@ -2225,10 +2225,10 @@
                 del self.__overlap_masks[grid.id]
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
-        pdxs = na.concatenate(pdxs, axis=1)
-        pdys = na.concatenate(pdys, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
+        pdxs = np.concatenate(pdxs, axis=1)
+        pdys = np.concatenate(pdys, axis=1)
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = pdxs; del pdxs
@@ -2244,7 +2244,7 @@
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
         data = self.comm.par_combine_object(data, datatype='dict', op='cat')
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -2260,7 +2260,7 @@
         # in _get_data_from_grid *and* we attempt not to load weight data
         # independently of the standard field data.
         if self._weight is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -2278,18 +2278,18 @@
         weight_proj = self.func(weight_data, axis=self.axis)
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.where(na.logical_or.reduce(used_data, self.axis))
+            used_points = np.where(np.logical_or.reduce(used_data, self.axis))
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         if zero_out:
-            subgrid_mask = na.logical_and.reduce(
-                                na.logical_or(grid.child_mask,
+            subgrid_mask = np.logical_and.reduce(
+                                np.logical_or(grid.child_mask,
                                              ~used_data),
                                 self.axis).astype('int64')
         else:
-            subgrid_mask = na.ones(full_proj[0].shape, dtype='int64')
-        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj[0].shape)]
+            subgrid_mask = np.ones(full_proj[0].shape, dtype='int64')
+        xind, yind = [arr[used_points].ravel() for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
@@ -2300,7 +2300,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2367,30 +2367,30 @@
         >>> print fproj["Density"]
         """
         AMR2DData.__init__(self, axis, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.dims = na.array([dims]*2)
-        self.ActiveDimensions = na.array([dims]*3, dtype='int32')
+        self.dims = np.array([dims]*2)
+        self.ActiveDimensions = np.array([dims]*3, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
-        self.global_startindex = na.rint((self.left_edge - self.pf.domain_left_edge)
+        self.global_startindex = np.rint((self.left_edge - self.pf.domain_left_edge)
                                          /self.dds).astype('int64')
         self._dls = {}
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        if na.any(self.left_edge < self.pf.domain_left_edge) or \
-           na.any(self.right_edge > self.pf.domain_right_edge):
+        if np.any(self.left_edge < self.pf.domain_left_edge) or \
+           np.any(self.right_edge > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids(
                             self.left_edge, self.right_edge)
         else:
             grids,ind = self.pf.hierarchy.get_box_grids(
                             self.left_edge, self.right_edge)
         level_ind = (self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
         self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
 
     def _generate_coords(self):
@@ -2398,9 +2398,9 @@
         yax = y_dict[self.axis]
         ci = self.left_edge + self.dds*0.5
         cf = self.left_edge + self.dds*(self.ActiveDimensions-0.5)
-        cx = na.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
-        cy = na.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
-        blank = na.ones( (self.ActiveDimensions[xax],
+        cx = np.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
+        cy = np.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
+        blank = np.ones( (self.ActiveDimensions[xax],
                           self.ActiveDimensions[yax]), dtype='float64')
         self['px'] = cx[None,:] * blank
         self['py'] = cx[:,None] * blank
@@ -2422,7 +2422,7 @@
         if len(fields_to_get) == 0: return
         temp_data = {}
         for field in fields_to_get:
-            self[field] = na.zeros(self.dims, dtype='float64')
+            self[field] = np.zeros(self.dims, dtype='float64')
         dls = self.__setup_dls(fields_to_get)
         for i,grid in enumerate(self._get_grids()):
             mylog.debug("Getting fields from %s", i)
@@ -2483,10 +2483,10 @@
             if ( (i%100) == 0):
                 mylog.info("Working on % 7i / % 7i", i, len(self._grids))
             grid.set_field_parameter("center", self.center)
-            points.append((na.ones(
+            points.append((np.ones(
                 grid.ActiveDimensions,dtype='float64')*grid['dx'])\
                     [self._get_point_indices(grid)])
-            t = na.concatenate([t,points])
+            t = np.concatenate([t,points])
             del points
         self['dx'] = t
         #self['dy'] = t
@@ -2496,8 +2496,8 @@
     @restore_grid_state
     def _generate_grid_coords(self, grid, field=None):
         pointI = self._get_point_indices(grid)
-        dx = na.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
-        tr = na.array([grid['x'][pointI].ravel(), \
+        dx = np.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
+        tr = np.array([grid['x'][pointI].ravel(), \
                 grid['y'][pointI].ravel(), \
                 grid['z'][pointI].ravel(), \
                 grid["RadiusCode"][pointI].ravel(),
@@ -2533,7 +2533,7 @@
                 if self._generate_field(field):
                     continue # True means we already assigned it
             mylog.info("Getting field %s from %s", field, len(self._grids))
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
         for field in fields_to_get:
@@ -2545,21 +2545,21 @@
     def _get_data_from_grid(self, grid, field):
         if field in self.pf.field_info and self.pf.field_info[field].particle_type:
             # int64 -> float64 with the first real set of data
-            if grid.NumberOfParticles == 0: return na.array([], dtype='int64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='int64')
             pointI = self._get_particle_indices(grid)
             if self.pf.field_info[field].vector_field:
                 f = grid[field]
-                return na.array([f[i,:][pointI] for i in range(3)])
+                return np.array([f[i,:][pointI] for i in range(3)])
             if self._is_fully_enclosed(grid): return grid[field].ravel()
             return grid[field][pointI].ravel()
         if field in self.pf.field_info and self.pf.field_info[field].vector_field:
             pointI = self._get_point_indices(grid)
             f = grid[field]
-            return na.array([f[i,:][pointI] for i in range(3)])
+            return np.array([f[i,:][pointI] for i in range(3)])
         else:
             tr = grid[field]
             if tr.size == 1: # dx, dy, dz, cellvolume
-                tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
+                tr = tr * np.ones(grid.ActiveDimensions, dtype='float64')
             if len(grid.Children) == 0 and grid.OverlappingSiblings is None \
                 and self._is_fully_enclosed(grid):
                 return tr.ravel()
@@ -2579,19 +2579,19 @@
             if grid.has_key(field):
                 new_field = grid[field]
             else:
-                new_field = na.ones(grid.ActiveDimensions, dtype=dtype) * default_val
+                new_field = np.ones(grid.ActiveDimensions, dtype=dtype) * default_val
             new_field[pointI] = self[field][i:i+np]
             grid[field] = new_field
             i += np
 
     def _is_fully_enclosed(self, grid):
-        return na.all(self._get_cut_mask)
+        return np.all(self._get_cut_mask)
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _get_cut_particle_mask(self, grid):
         if self._is_fully_enclosed(grid):
@@ -2600,9 +2600,9 @@
         return self._get_cut_mask(fake_grid)
 
     def _get_particle_indices(self, grid):
-        k = na.zeros(grid.NumberOfParticles, dtype='bool')
+        k = np.zeros(grid.NumberOfParticles, dtype='bool')
         k = (k | self._get_cut_particle_mask(grid))
-        return na.where(k)
+        return np.where(k)
 
     def cut_region(self, field_cuts):
         """
@@ -2705,16 +2705,16 @@
                 samples.append(svals)
             verts.append(my_verts)
         pb.finish()
-        verts = na.concatenate(verts).transpose()
+        verts = np.concatenate(verts).transpose()
         verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
         verts = verts.transpose()
         if sample_values is not None:
-            samples = na.concatenate(samples)
+            samples = np.concatenate(samples)
             samples = self.comm.par_combine_object(samples, op='cat',
                                 datatype='array')
         if rescale:
-            mi = na.min(verts, axis=0)
-            ma = na.max(verts, axis=0)
+            mi = np.min(verts, axis=0)
+            ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
             f = open(filename, "w")
@@ -2818,7 +2818,7 @@
         mask = self._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(field)
         if fluxing_field is None:
-            ff = na.ones(vals.shape, dtype="float64")
+            ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
         xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
@@ -2835,10 +2835,10 @@
         them to be plotted.
         """
         if log_space:
-            cons = na.logspace(na.log10(min_val),na.log10(max_val),
+            cons = np.logspace(np.log10(min_val),np.log10(max_val),
                                num_levels+1)
         else:
-            cons = na.linspace(min_val, max_val, num_levels+1)
+            cons = np.linspace(min_val, max_val, num_levels+1)
         contours = {}
         if cache: cached_fields = defaultdict(lambda: dict())
         else: cached_fields = None
@@ -2867,7 +2867,7 @@
         """
         for grid in self._grids:
             if default_value != None:
-                grid[field] = na.ones(grid.ActiveDimensions)*default_value
+                grid[field] = np.ones(grid.ActiveDimensions)*default_value
             grid[field][self._get_point_indices(grid)] = value
 
     _particle_handler = None
@@ -2951,36 +2951,36 @@
         grid_vals, xi, yi, zi = [], [], [], []
         for grid in self._base_region._grids:
             xit,yit,zit = self._base_region._get_point_indices(grid)
-            grid_vals.append(na.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
+            grid_vals.append(np.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
             xi.append(xit)
             yi.append(yit)
             zi.append(zit)
-        grid_vals = na.concatenate(grid_vals)[self._base_indices]
-        grid_order = na.argsort(grid_vals)
+        grid_vals = np.concatenate(grid_vals)[self._base_indices]
+        grid_order = np.argsort(grid_vals)
         # Note: grid_vals is still unordered
-        grid_ids = na.unique(grid_vals)
-        xi = na.concatenate(xi)[self._base_indices][grid_order]
-        yi = na.concatenate(yi)[self._base_indices][grid_order]
-        zi = na.concatenate(zi)[self._base_indices][grid_order]
-        bc = na.bincount(grid_vals)
+        grid_ids = np.unique(grid_vals)
+        xi = np.concatenate(xi)[self._base_indices][grid_order]
+        yi = np.concatenate(yi)[self._base_indices][grid_order]
+        zi = np.concatenate(zi)[self._base_indices][grid_order]
+        bc = np.bincount(grid_vals)
         splits = []
         for i,v in enumerate(bc):
             if v > 0: splits.append(v)
-        splits = na.add.accumulate(splits)
-        xis, yis, zis = [na.array_split(aa, splits) for aa in [xi,yi,zi]]
+        splits = np.add.accumulate(splits)
+        xis, yis, zis = [np.array_split(aa, splits) for aa in [xi,yi,zi]]
         self._indices = {}
         h = self._base_region.pf.h
         for grid_id, x, y, z in itertools.izip(grid_ids, xis, yis, zis):
             # grid_id needs no offset
             ll = h.grids[grid_id].ActiveDimensions.prod() \
-               - (na.logical_not(h.grids[grid_id].child_mask)).sum()
+               - (np.logical_not(h.grids[grid_id].child_mask)).sum()
             # This means we're completely enclosed, except for child masks
             if x.size == ll:
                 self._indices[grid_id] = None
             else:
                 # This will slow things down a bit, but conserve memory
                 self._indices[grid_id] = \
-                    na.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
+                    np.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
                 self._indices[grid_id][(x,y,z)] = True
         self._grids = h.grids[self._indices.keys()]
 
@@ -2992,16 +2992,16 @@
         return False
 
     def _get_cut_mask(self, grid):
-        cm = na.zeros(grid.ActiveDimensions, dtype='bool')
+        cm = np.zeros(grid.ActiveDimensions, dtype='bool')
         cm[self._get_point_indices(grid, False)] = True
         return cm
 
-    __empty_array = na.array([], dtype='bool')
+    __empty_array = np.array([], dtype='bool')
     def _get_point_indices(self, grid, use_child_mask=True):
         # Yeah, if it's not true, we don't care.
         tr = self._indices.get(grid.id-grid._id_offset, self.__empty_array)
-        if tr is None: tr = na.where(grid.child_mask)
-        else: tr = na.where(tr)
+        if tr is None: tr = np.where(grid.child_mask)
+        else: tr = np.where(tr)
         return tr
 
     def __repr__(self):
@@ -3018,7 +3018,7 @@
             grid = self.pf.h.grids[g]
             if g in other._indices and g in self._indices:
                 # We now join the indices
-                ind = na.zeros(grid.ActiveDimensions, dtype='bool')
+                ind = np.zeros(grid.ActiveDimensions, dtype='bool')
                 ind[self._indices[g]] = True
                 ind[other._indices[g]] = True
                 if ind.prod() == grid.ActiveDimensions.prod(): ind = None
@@ -3056,7 +3056,7 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        point_mask = na.ones(grid.ActiveDimensions, dtype='bool')
+        point_mask = np.ones(grid.ActiveDimensions, dtype='bool')
         point_mask *= self._base_region._get_cut_mask(grid)
         for cut in self._field_cuts:
             point_mask *= eval(cut)
@@ -3076,35 +3076,35 @@
         within the cylinder will be selected.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
+        self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
         self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._refresh_data()
 
     def _get_list_of_grids(self):
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((self.pf.h.grid_corners -
+        D = np.sqrt(np.sum((self.pf.h.grid_corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
+        R = np.sqrt(D**2.0-H**2.0)
         self._grids = self.hierarchy.grids[
-            ( (na.any(na.abs(H)<self._height,axis=0))
-            & (na.any(R<self._radius,axis=0)
-            & (na.logical_not((na.all(H>0,axis=0) | (na.all(H<0, axis=0)))) )
+            ( (np.any(np.abs(H)<self._height,axis=0))
+            & (np.any(R<self._radius,axis=0)
+            & (np.logical_not((np.all(H>0,axis=0) | (np.all(H<0, axis=0)))) )
             ) ) ]
         self._grids = self.hierarchy.grids
 
     def _is_fully_enclosed(self, grid):
         corners = grid._corners.reshape((8,3,1))
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((corners -
+        D = np.sqrt(np.sum((corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
-        return (na.all(na.abs(H) < self._height, axis=0) \
-            and na.all(R < self._radius, axis=0))
+        R = np.sqrt(D**2.0-H**2.0)
+        return (np.all(np.abs(H) < self._height, axis=0) \
+            and np.all(R < self._radius, axis=0))
 
     @cache_mask
     def _get_cut_mask(self, grid):
@@ -3115,13 +3115,13 @@
               + grid['y'] * self._norm_vec[1] \
               + grid['z'] * self._norm_vec[2] \
               + self._d
-            d = na.sqrt(
+            d = np.sqrt(
                 (grid['x'] - self.center[0])**2.0
               + (grid['y'] - self.center[1])**2.0
               + (grid['z'] - self.center[2])**2.0
                 )
-            r = na.sqrt(d**2.0-h**2.0)
-            cm = ( (na.abs(h) <= self._height)
+            r = np.sqrt(d**2.0-h**2.0)
+            cm = ( (np.abs(h) <= self._height)
                  & (r <= self._radius))
         return cm
 
@@ -3138,8 +3138,8 @@
         describe the box.  No checks are done to ensure that the box satisfies
         a right-hand rule, but if it doesn't, behavior is undefined.
         """
-        self.origin = na.array(origin)
-        self.box_vectors = na.array(box_vectors, dtype='float64')
+        self.origin = np.array(origin)
+        self.box_vectors = np.array(box_vectors, dtype='float64')
         self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
         center = origin + 0.5*self.box_vectors.sum(axis=0)
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
@@ -3150,11 +3150,11 @@
         xv = self.box_vectors[0,:]
         yv = self.box_vectors[1,:]
         zv = self.box_vectors[2,:]
-        self._x_vec = xv / na.sqrt(na.dot(xv, xv))
-        self._y_vec = yv / na.sqrt(na.dot(yv, yv))
-        self._z_vec = zv / na.sqrt(na.dot(zv, zv))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._z_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = xv / np.sqrt(np.dot(xv, xv))
+        self._y_vec = yv / np.sqrt(np.dot(yv, yv))
+        self._z_vec = zv / np.sqrt(np.dot(zv, zv))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._z_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
@@ -3172,7 +3172,7 @@
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
             
 
@@ -3185,7 +3185,7 @@
     def _get_cut_mask(self, grid):
         if self._is_fully_enclosed(grid):
             return True
-        pm = na.zeros(grid.ActiveDimensions, dtype='int32')
+        pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
                               self._rot_mat, grid.LeftEdge, 
                               grid.RightEdge, grid.dds, pm, 0)
@@ -3228,7 +3228,7 @@
                                                            self.right_edge)
 
     def _is_fully_enclosed(self, grid):
-        return na.all( (grid._corners <= self.right_edge)
+        return np.all( (grid._corners <= self.right_edge)
                      & (grid._corners >= self.left_edge))
 
     @cache_mask
@@ -3282,10 +3282,10 @@
 
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
-        self.right_edge = na.array(right_edge)
+        self.left_edge = np.array(left_edge)
+        self.right_edge = np.array(right_edge)
         self._refresh_data()
-        self.offsets = (na.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
+        self.offsets = (np.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
                         (self.pf.domain_right_edge -
                          self.pf.domain_left_edge)[:,None,None,None])\
                        .transpose().reshape(27,3) # cached and in order
@@ -3300,7 +3300,7 @@
                            self.left_edge[1]+off_y,self.left_edge[2]+off_z]
             region_right = [self.right_edge[0]+off_x,
                             self.right_edge[1]+off_y,self.right_edge[2]+off_z]
-            if (na.all((grid._corners <= region_right) &
+            if (np.all((grid._corners <= region_right) &
                        (grid._corners >= region_left))):
                 return True
         return False
@@ -3310,7 +3310,7 @@
         if self._is_fully_enclosed(grid):
             return True
         else:
-            cm = na.zeros(grid.ActiveDimensions,dtype='bool')
+            cm = np.zeros(grid.ActiveDimensions,dtype='bool')
             dxp, dyp, dzp = self._dx_pad * grid.dds
             for off_x, off_y, off_z in self.offsets:
                 cm = cm | ( (grid['x'] - dxp + off_x < self.right_edge[0])
@@ -3350,7 +3350,7 @@
         Child cells are not returned.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._grids = na.array(grid_list)
+        self._grids = np.array(grid_list)
         self.grid_list = self._grids
 
     def _get_list_of_grids(self):
@@ -3361,13 +3361,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 class AMRMaxLevelCollection(AMR3DData):
@@ -3394,13 +3394,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask and grid.Level < self.max_level:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 
@@ -3441,14 +3441,14 @@
         # Now we sort by level
         grids = grids.tolist()
         grids.sort(key=lambda x: (x.Level, x.LeftEdge[0], x.LeftEdge[1], x.LeftEdge[2]))
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = na.abs(grid._corners - self.center)
-        r = na.minimum(r, na.abs(self.DW[None,:]-r))
-        corner_radius = na.sqrt((r**2.0).sum(axis=1))
-        return na.all(corner_radius <= self.radius)
+        r = np.abs(grid._corners - self.center)
+        r = np.minimum(r, np.abs(self.DW[None,:]-r))
+        corner_radius = np.sqrt((r**2.0).sum(axis=1))
+        return np.all(corner_radius <= self.radius)
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3477,7 +3477,7 @@
         can define a ellipsoid of any proportion.  Only cells whose centers are
         within the ellipsoid will be selected.
         """
-        AMR3DData.__init__(self, na.array(center), fields, pf, **kwargs)
+        AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
@@ -3488,12 +3488,12 @@
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0[1] / e0[0])
+        t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
         RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
         r1 = (e0 * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
         given the tilt about the x axis when e0 was aligned 
@@ -3505,7 +3505,7 @@
         e1 = ((0, 1, 0) * RX).sum(axis = 1)
         e1 = (e1 * RY).sum(axis = 1)
         e1 = (e1 * RZ).sum(axis = 1)
-        e2 = na.cross(e0, e1)
+        e2 = np.cross(e0, e1)
 
         self._e1 = e1
         self._e2 = e2
@@ -3535,7 +3535,7 @@
                                   x.LeftEdge[0], \
                                   x.LeftEdge[1], \
                                   x.LeftEdge[2]))
-        self._grids = na.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype = 'object')
 
     def _is_fully_enclosed(self, grid):
         """
@@ -3545,18 +3545,18 @@
         vr = (grid._corners - self.center)
         # 3 possible cases of locations taking periodic BC into account
         # just listing the components, find smallest later
-        dotarr=na.array([vr, vr + self.DW, vr - self.DW])
+        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
         # these vrdote# finds the product of vr components with e#
         # square the results
         # find the smallest
         # sums it
-        vrdote0_2 = (na.multiply(dotarr, self._e0)**2).min(axis \
+        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
+        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
+        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        return na.all(vrdote0_2 / self._A**2 + \
+        return np.all(vrdote0_2 / self._A**2 + \
                       vrdote1_2 / self._B**2 + \
                       vrdote2_2 / self._C**2 <=1.0)
 
@@ -3572,21 +3572,21 @@
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = na.zeros(grid["x"].shape, dtype = 'float64')
+        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
         dim = grid["x"].shape
         # need this to take into account non-cube root grid tiles
-        dot_evec = na.zeros([3, dim[0], dim[1], dim[2]])
+        dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
         for i, ax in enumerate('xyz'):
             # distance to center
             ar  = grid[ax]-self.center[i]
             # cases to take into account periodic BC
-            case = na.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
             # find which of the 3 cases is smallest in magnitude
-            index = na.abs(case).argmin(axis = 0)
+            index = np.abs(case).argmin(axis = 0)
             # restrict distance to only the smallest cases
-            vec = na.choose(index, case)
+            vec = np.choose(index, case)
             # sum up to get the dot product with e_vectors
-            dot_evec += na.array([vec * self._e0[i], \
+            dot_evec += np.array([vec * self._e0[i], \
                                   vec * self._e1[i], \
                                   vec * self._e2[i]])
         # Calculate the eqn of ellipsoid, if it is inside
@@ -3627,22 +3627,22 @@
         """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = na.array(dims,dtype='int32')
+        self.ActiveDimensions = np.array(dims,dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar
-        self.global_startindex = na.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self, buffer = 0.0):
         if self._grids is not None: return
-        if na.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
-           na.any(self.right_edge + buffer > self.pf.domain_right_edge):
+        if np.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
+           np.any(self.right_edge + buffer > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
                             self.right_edge + buffer, self.level)
@@ -3650,14 +3650,14 @@
             grids,ind = self.pf.hierarchy.get_box_grids_below_level(
                 self.left_edge - buffer,
                 self.right_edge + buffer, self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind])
         self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
     def _refresh_data(self):
         AMR3DData._refresh_data(self)
-        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dx'] = self.dds[0] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dy'] = self.dds[1] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dz'] = self.dds[2] * np.ones(self.ActiveDimensions, dtype='float64')
 
     def get_data(self, fields=None):
         if self._grids is None:
@@ -3677,7 +3677,7 @@
                 except NeedsOriginalGrid, ngt_exception:
                     pass
             obtain_fields.append(field)
-            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
+            self[field] = np.zeros(self.ActiveDimensions, dtype='float64') -999
         if len(obtain_fields) == 0: return
         mylog.debug("Getting fields %s from %s possible grids",
                    obtain_fields, len(self._grids))
@@ -3689,9 +3689,9 @@
             count -= self._get_data_from_grid(grid, obtain_fields)
             if count <= 0: break
         if self._use_pbar: pbar.finish()
-        if count > 0 or na.any(self[obtain_fields[0]] == -999):
+        if count > 0 or np.any(self[obtain_fields[0]] == -999):
             # and self.dx < self.hierarchy.grids[0].dx:
-            n_bad = na.where(self[obtain_fields[0]]==-999)[0].size
+            n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
             
@@ -3737,7 +3737,7 @@
         g_fields = []
         for field in fields:
             if not grid.has_key(field): grid[field] = \
-               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
+               np.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
             g_fields.append(grid[field])
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
@@ -3832,7 +3832,7 @@
         if self.level > 0:
             for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]
-                if na.any(self[field] == -999):
+                if np.any(self[field] == -999):
                     # and self.dx < self.hierarchy.grids[0].dx:
                     n_bad = (self[field]==-999).sum()
                     mylog.error("Covering problem: %s cells are uncovered", n_bad)
@@ -3846,35 +3846,35 @@
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint(LL / dx).astype('int64') - 1
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
-            self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
+            self.global_startindex = np.array(np.floor(LL/ dx), dtype='int64')
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
-        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
+        dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
+        output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
 
         for field in fields:
-            output_field = na.zeros(output_dims, dtype="float64")
+            output_field = np.zeros(output_dims, dtype="float64")
             output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
@@ -3944,7 +3944,7 @@
             self._all_regions.append(item)
             # So cut_masks don't get messed up.
             item._boolean_touched = True
-        self._all_regions = na.unique(self._all_regions)
+        self._all_regions = np.unique(self._all_regions)
     
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
@@ -3969,7 +3969,7 @@
                 # The whole grid is in the hybrid region if a) its cut_mask
                 # in the original region is identical to the new one and b)
                 # the original region cut_mask is all ones.
-                if (local == na.bitwise_and(overall, local)).all() and \
+                if (local == np.bitwise_and(overall, local)).all() and \
                         (local == True).all():
                     self._all_overlap.append(grid)
                     continue
@@ -3997,7 +3997,7 @@
         return (grid in self._all_overlap)
 
     def _get_list_of_grids(self):
-        self._grids = na.array(self._some_overlap + self._all_overlap,
+        self._grids = np.array(self._some_overlap + self._all_overlap,
             dtype='object')
 
     def _get_cut_mask(self, grid, field=None):
@@ -4054,13 +4054,13 @@
             if i == 0: continue
             if item == "AND":
                 # So, the next item in level_masks we want to AND.
-                na.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
             if item == "NOT":
                 # It's convenient to remember that NOT == AND NOT
-                na.bitwise_and(this_cut_mask, na.invert(level_masks[i+1]),
+                np.bitwise_and(this_cut_mask, np.invert(level_masks[i+1]),
                     this_cut_mask)
             if item == "OR":
-                na.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
         if not isinstance(grid, FakeGridForParticles):
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -100,7 +100,7 @@
             if not iterable(rv): rv = (rv,)
             for i in range(self.n_ret): self.retvals[i].append(rv[i])
             g.clear_data()
-        self.retvals = [na.array(self.retvals[i]) for i in range(self.n_ret)]
+        self.retvals = [np.array(self.retvals[i]) for i in range(self.n_ret)]
         return self.c_func(self._data_source, *self.retvals)
 
     def _finalize_parallel(self):
@@ -110,7 +110,7 @@
         # our long axis is the first one!
         rv = []
         for my_list in self.retvals:
-            data = na.array(my_list).transpose()
+            data = np.array(my_list).transpose()
             rv.append(self.comm.par_combine_object(data,
                         datatype="array", op="cat").transpose())
         self.retvals = rv
@@ -185,7 +185,7 @@
 
     return x,y,z, den
 def _combCenterOfMass(data, x,y,z, den):
-    return na.array([x.sum(), y.sum(), z.sum()])/den.sum()
+    return np.array([x.sum(), y.sum(), z.sum()])/den.sum()
 add_quantity("CenterOfMass", function=_CenterOfMass,
              combine_function=_combCenterOfMass, n_ret = 4)
 
@@ -218,7 +218,7 @@
     xv = xv.sum()/w
     yv = yv.sum()/w
     zv = zv.sum()/w
-    return na.array([xv, yv, zv])
+    return np.array([xv, yv, zv])
 add_quantity("BulkVelocity", function=_BulkVelocity,
              combine_function=_combBulkVelocity, n_ret=4)
 
@@ -249,9 +249,9 @@
     return [j_mag]
 
 def _combAngularMomentumVector(data, j_mag):
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     L_vec = j_mag.sum(axis=0)
-    L_vec_norm = L_vec / na.sqrt((L_vec**2.0).sum())
+    L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
     return L_vec_norm
 add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
              combine_function=_combAngularMomentumVector, n_ret=1)
@@ -268,17 +268,17 @@
     amx = data["SpecificAngularMomentumX"]*data["CellMassMsun"]
     amy = data["SpecificAngularMomentumY"]*data["CellMassMsun"]
     amz = data["SpecificAngularMomentumZ"]*data["CellMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
     weight=data["CellMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
 def _combBaryonSpinParameter(data, j_mag, m_enc, e_term_pre, weight):
     # Because it's a vector field, we have to ensure we have enough dimensions
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     W = weight.sum()
     M = m_enc.sum()
-    J = na.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
-    E = na.sqrt(e_term_pre.sum()/W)
+    J = np.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
+    E = np.sqrt(e_term_pre.sum()/W)
     G = 6.67e-8 # cm^3 g^-1 s^-2
     spin = J * E / (M*1.989e33*G)
     return spin
@@ -292,11 +292,11 @@
     """
     m_enc = data["CellMassMsun"].sum() + data["ParticleMassMsun"].sum()
     amx = data["ParticleSpecificAngularMomentumX"]*data["ParticleMassMsun"]
-    if amx.size == 0: return (na.zeros((3,), dtype='float64'), m_enc, 0, 0)
+    if amx.size == 0: return (np.zeros((3,), dtype='float64'), m_enc, 0, 0)
     amy = data["ParticleSpecificAngularMomentumY"]*data["ParticleMassMsun"]
     amz = data["ParticleSpecificAngularMomentumZ"]*data["ParticleMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["ParticleMassMsun"]
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["ParticleMassMsun"]
                        *data["ParticleVelocityMagnitude"]**2.0)
     weight=data["ParticleMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
@@ -360,15 +360,15 @@
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
         kinetic += thermal
     if periodic_test:
-        kinetic = na.ones_like(kinetic)
+        kinetic = np.ones_like(kinetic)
     # Gravitational potential energy
     # We only divide once here because we have velocity in cgs, but radius is
     # in code.
     G = 6.67e-8 / data.convert("cm") # cm^3 g^-1 s^-2
     # Check for periodicity of the clump.
-    two_root = 2. / na.array(data.pf.domain_dimensions)
+    two_root = 2. / np.array(data.pf.domain_dimensions)
     domain_period = data.pf.domain_right_edge - data.pf.domain_left_edge
-    periodic = na.array([0., 0., 0.])
+    periodic = np.array([0., 0., 0.])
     for i,dim in enumerate(["x", "y", "z"]):
         sorted = data[dim][data[dim].argsort()]
         # If two adjacent values are different by (more than) two root grid
@@ -380,7 +380,7 @@
             # define the gap from the right boundary, which we'll use for the
             # periodic adjustment later.
             sel = (diff >= two_root[i])
-            index = na.min(na.nonzero(sel))
+            index = np.min(np.nonzero(sel))
             # The last addition term below ensures that the data makes a full
             # wrap-around.
             periodic[i] = data.pf.domain_right_edge[i] - sorted[index + 1] + \
@@ -402,26 +402,26 @@
             local_data[dim] += periodic[i]
             local_data[dim] %= domain_period[i]
     if periodic_test:
-        local_data["CellMass"] = na.ones_like(local_data["CellMass"])
+        local_data["CellMass"] = np.ones_like(local_data["CellMass"])
     import time
     t1 = time.time()
     if treecode:
         # Calculate the binding energy using the treecode method.
         # Faster but less accurate.
         # The octree doesn't like uneven root grids, so we will make it cubical.
-        root_dx = 1./na.array(data.pf.domain_dimensions).astype('float64')
-        left = min([na.amin(local_data['x']), na.amin(local_data['y']),
-            na.amin(local_data['z'])])
-        right = max([na.amax(local_data['x']), na.amax(local_data['y']),
-            na.amax(local_data['z'])])
-        cover_min = na.array([left, left, left])
-        cover_max = na.array([right, right, right])
+        root_dx = 1./np.array(data.pf.domain_dimensions).astype('float64')
+        left = min([np.amin(local_data['x']), np.amin(local_data['y']),
+            np.amin(local_data['z'])])
+        right = max([np.amax(local_data['x']), np.amax(local_data['y']),
+            np.amax(local_data['z'])])
+        cover_min = np.array([left, left, left])
+        cover_max = np.array([right, right, right])
         # Fix the coverage to match to root grid cell left 
         # edges for making indexes.
         cover_min = cover_min - cover_min % root_dx
         cover_max = cover_max - cover_max % root_dx
-        cover_imin = (cover_min * na.array(data.pf.domain_dimensions)).astype('int64')
-        cover_imax = (cover_max * na.array(data.pf.domain_dimensions) + 1).astype('int64')
+        cover_imin = (cover_min * np.array(data.pf.domain_dimensions)).astype('int64')
+        cover_imax = (cover_max * np.array(data.pf.domain_dimensions) + 1).astype('int64')
         cover_ActiveDimensions = cover_imax - cover_imin
         # Create the octree with these dimensions.
         # One value (mass) with incremental=True.
@@ -429,12 +429,12 @@
         #print 'here', cover_ActiveDimensions
         # Now discover what levels this data comes from, not assuming
         # symmetry.
-        dxes = na.unique(data['dx']) # unique returns a sorted array,
-        dyes = na.unique(data['dy']) # so these will all have the same
-        dzes = na.unique(data['dz']) # order.
+        dxes = np.unique(data['dx']) # unique returns a sorted array,
+        dyes = np.unique(data['dy']) # so these will all have the same
+        dzes = np.unique(data['dz']) # order.
         # We only need one dim to figure out levels, we'll use x.
         dx = 1./data.pf.domain_dimensions[0]
-        levels = (na.log(dx / dxes) / na.log(data.pf.refine_by)).astype('int')
+        levels = (np.log(dx / dxes) / np.log(data.pf.refine_by)).astype('int')
         lsort = levels.argsort()
         levels = levels[lsort]
         dxes = dxes[lsort]
@@ -447,9 +447,9 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = na.array([local_data["CellMass"][sel]], order='F')
+	    vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
-               na.ones_like(thisx).astype('float64'), treecode = 1)
+               np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)
         mylog.info("Using a treecode to find gravitational energy for %d cells." % local_data['x'].size)
@@ -484,7 +484,7 @@
     m = (data['CellMass'] * mass_scale_factor).astype('float32')
     assert(m.size > bsize)
 
-    gsize=int(na.ceil(float(m.size)/bsize))
+    gsize=int(np.ceil(float(m.size)/bsize))
     assert(gsize > 16)
 
     # Now the tedious process of rescaling our values...
@@ -492,7 +492,7 @@
     x = ((data['x'] - data['x'].min()) * length_scale_factor).astype('float32')
     y = ((data['y'] - data['y'].min()) * length_scale_factor).astype('float32')
     z = ((data['z'] - data['z'].min()) * length_scale_factor).astype('float32')
-    p = na.zeros(z.shape, dtype='float32')
+    p = np.zeros(z.shape, dtype='float32')
     
     x_gpu = cuda.mem_alloc(x.size * x.dtype.itemsize)
     y_gpu = cuda.mem_alloc(y.size * y.dtype.itemsize)
@@ -569,7 +569,7 @@
          block=(bsize,1,1), grid=(gsize, gsize), time_kernel=True)
     cuda.memcpy_dtoh(p, p_gpu)
     p1 = p.sum()
-    if na.any(na.isnan(p)): raise ValueError
+    if np.any(np.isnan(p)): raise ValueError
     return p1 * (length_scale_factor / (mass_scale_factor**2.0))
 
 def _Extrema(data, fields, non_zero = False, filter=None):
@@ -613,9 +613,9 @@
                 maxs.append(-1e90)
     return len(fields), mins, maxs
 def _combExtrema(data, n_fields, mins, maxs):
-    mins, maxs = na.atleast_2d(mins, maxs)
+    mins, maxs = np.atleast_2d(mins, maxs)
     n_fields = mins.shape[1]
-    return [(na.min(mins[:,i]), na.max(maxs[:,i])) for i in range(n_fields)]
+    return [(np.min(mins[:,i]), np.max(maxs[:,i])) for i in range(n_fields)]
 add_quantity("Extrema", function=_Extrema, combine_function=_combExtrema,
              n_ret=3)
 
@@ -644,14 +644,14 @@
     """
     ma, maxi, mx, my, mz, mg = -1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        maxi = na.argmax(data[field])
+        maxi = np.argmax(data[field])
         ma = data[field][maxi]
         mx, my, mz = [data[ax][maxi] for ax in 'xyz']
         mg = data["GridIndices"][maxi]
     return (ma, maxi, mx, my, mz, mg)
 def _combMaxLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmax(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmax(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MaxLocation", function=_MaxLocation,
              combine_function=_combMaxLocation, n_ret = 6)
@@ -663,14 +663,14 @@
     """
     ma, mini, mx, my, mz, mg = 1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        mini = na.argmin(data[field])
+        mini = np.argmin(data[field])
         ma = data[field][mini]
         mx, my, mz = [data[ax][mini] for ax in 'xyz']
         mg = data["GridIndices"][mini]
     return (ma, mini, mx, my, mz, mg)
 def _combMinLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmin(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmin(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MinLocation", function=_MinLocation,
              combine_function=_combMinLocation, n_ret = 6)
@@ -691,8 +691,8 @@
         totals.append(data[field].sum())
     return len(fields), totals
 def _combTotalQuantity(data, n_fields, totals):
-    totals = na.atleast_2d(totals)
+    totals = np.atleast_2d(totals)
     n_fields = totals.shape[1]
-    return [na.sum(totals[:,i]) for i in range(n_fields)]
+    return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -30,7 +30,7 @@
 import copy
 import itertools
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -151,8 +151,8 @@
         self.ActiveDimensions = [nd,nd,nd]
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
-        self.dds = na.ones(3, "float64")
-        self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
+        self.dds = np.ones(3, "float64")
+        self['dx'] = self['dy'] = self['dz'] = np.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
 
@@ -161,8 +161,8 @@
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
-            pf.domain_left_edge = na.zeros(3, 'float64')
-            pf.domain_right_edge = na.ones(3, 'float64')
+            pf.domain_left_edge = np.zeros(3, 'float64')
+            pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3
         self.pf = pf
 
@@ -180,12 +180,12 @@
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd, nd, nd), dtype='float64')
-                + 1e-4*na.random.random((nd, nd, nd)))
+                lambda: np.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*np.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd * nd * nd), dtype='float64')
-                + 1e-4*na.random.random((nd * nd * nd)))
+                lambda: np.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*np.random.random((nd * nd * nd)))
 
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
@@ -215,13 +215,13 @@
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(field_name) and FI[field_name].particle_type:
             self.requested.append(field_name)
-            return na.ones(self.NumberOfParticles)
+            return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
-            return na.random.random(3) * 1e-2
+            return np.random.random(3) * 1e-2
         else:
             return 0.0
     _num_ghost_zones = 0


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -27,7 +27,7 @@
 import pdb
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
@@ -79,11 +79,11 @@
         if self.Parent == None:
             left = self.LeftEdge - self.pf.domain_left_edge
             start_index = left / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
 
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
+                       np.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
         self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -184,15 +184,15 @@
                 if self.pf.field_info[field].particle_type and \
                    self.NumberOfParticles == 0:
                     # because this gets upcast to float
-                    self[field] = na.array([],dtype='int64')
+                    self[field] = np.array([],dtype='int64')
                     return self.field_data[field]
                 try:
                     temp = self.hierarchy.io.pop(self, field)
-                    self[field] = na.multiply(temp, conv_factor, temp)
+                    self[field] = np.multiply(temp, conv_factor, temp)
                 except self.hierarchy.io._read_exception, exc:
                     if field in self.pf.field_info:
                         if self.pf.field_info[field].not_in_all:
-                            self[field] = na.zeros(self.ActiveDimensions, dtype='float64')
+                            self[field] = np.zeros(self.ActiveDimensions, dtype='float64')
                         else:
                             raise
                     else: raise
@@ -209,14 +209,14 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
-        return na.array([ # Unroll!
+        return np.array([ # Unroll!
             [self.LeftEdge[0],  self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
@@ -237,9 +237,9 @@
         x = x_dict[axis]
         y = y_dict[axis]
         cond = self.RightEdge[x] >= LE[:,x]
-        cond = na.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
-        cond = na.logical_and(cond, self.RightEdge[y] >= LE[:,y])
-        cond = na.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
+        cond = np.logical_and(cond, self.RightEdge[y] >= LE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
         return cond
 
     def __repr__(self):
@@ -278,19 +278,19 @@
         self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
     def __len__(self):
-        return na.prod(self.ActiveDimensions)
+        return np.prod(self.ActiveDimensions)
 
     def find_max(self, field):
         """ Returns value, index of maximum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmax()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def find_min(self, field):
         """ Returns value, index of minimum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmin()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
@@ -369,8 +369,8 @@
     def __fill_child_mask(self, child, mask, tofill):
         rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = na.maximum(0, cgi / rf - gi)
-        endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],
@@ -383,7 +383,7 @@
         thus, where higher resolution data is available).
 
         """
-        self._child_mask = na.ones(self.ActiveDimensions, 'int32')
+        self._child_mask = np.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
             self.__fill_child_mask(child, self._child_mask, 0)
         if self.OverlappingSiblings is not None:
@@ -398,7 +398,7 @@
         and otherwise has the ID of the grid that resides there.
 
         """
-        self._child_index_mask = na.zeros(self.ActiveDimensions, 'int32') - 1
+        self._child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
             self.__fill_child_mask(child, self._child_index_mask,
                                    child.id)
@@ -425,8 +425,8 @@
         Creates self.coords, which is of dimensions (3, ActiveDimensions)
 
         """
-        ind = na.indices(self.ActiveDimensions)
-        left_shaped = na.reshape(self.LeftEdge, (3, 1, 1, 1))
+        ind = np.indices(self.ActiveDimensions)
+        left_shaped = np.reshape(self.LeftEdge, (3, 1, 1, 1))
         self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
 
     child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
@@ -462,7 +462,7 @@
         return cube
 
     def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
             of = self[field]
@@ -474,9 +474,9 @@
             new_field[1:,:-1,1:] += of
             new_field[1:,1:,:-1] += of
             new_field[1:,1:,1:] += of
-            na.multiply(new_field, 0.125, new_field)
+            np.multiply(new_field, 0.125, new_field)
             if self.pf.field_info[field].take_log:
-                new_field = na.log10(new_field)
+                new_field = np.log10(new_field)
 
             new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
             new_field[:,:, 0]  = 2.0*new_field[:,:,1] - new_field[:,:,2]
@@ -486,17 +486,17 @@
             new_field[0,:,:]  = 2.0*new_field[1,:,:] - new_field[2,:,:]
 
             if self.pf.field_info[field].take_log:
-                na.power(10.0, new_field, new_field)
+                np.power(10.0, new_field, new_field)
         else:
             cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            na.multiply(new_field, 0.125, new_field)
+            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
+            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
+            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
+            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
+            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
+            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
+            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
+            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
+            np.multiply(new_field, 0.125, new_field)
 
         return new_field


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import string, re, gc, time, cPickle, pdb
 import weakref
 
@@ -116,11 +116,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _setup_classes(self, dd):
         # Called by subclass
@@ -172,7 +172,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -361,13 +361,13 @@
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]
         self.level_stats['numcells'] = [0 for i in range(MAXLEVEL)]
         for level in xrange(self.max_level+1):
-            self.level_stats[level]['numgrids'] = na.sum(self.grid_levels == level)
+            self.level_stats[level]['numgrids'] = np.sum(self.grid_levels == level)
             li = (self.grid_levels[:,0] == level)
             self.level_stats[level]['numcells'] = self.grid_dimensions[li,:].prod(axis=1).sum()
 
     @property
     def grid_corners(self):
-        return na.array([
+        return np.array([
           [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.lib import \
@@ -38,15 +38,15 @@
         along *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the two edges, we win!
-        na.choose(na.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
-        na.choose(na.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
+        np.choose(np.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_max(self, field, finest_levels = 3):
@@ -70,18 +70,18 @@
         max_val, maxi, mx, my, mz, mg = \
             source.quantities["MaxLocation"]( field, lazy_reader=True)
         max_grid = self.grids[mg]
-        mc = na.unravel_index(maxi, max_grid.ActiveDimensions)
+        mc = np.unravel_index(maxi, max_grid.ActiveDimensions)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s %s", \
               max_val, mx, my, mz, max_grid, max_grid.Level, mc)
         self.parameters["Max%sValue" % (field)] = max_val
         self.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
-        return max_grid, mc, max_val, na.array((mx,my,mz), dtype='float64')
+        return max_grid, mc, max_val, np.array((mx,my,mz), dtype='float64')
 
     def find_min(self, field):
         """
         Returns (value, center) of location of minimum for a given field
         """
-        gI = na.where(self.grid_levels >= 0) # Slow but pedantic
+        gI = np.where(self.grid_levels >= 0) # Slow but pedantic
         minVal = 1e100
         for grid in self.grids[gI[0]]:
             mylog.debug("Checking %s (level %s)", grid.id, grid.Level)
@@ -90,7 +90,7 @@
                 minCoord = coord
                 minVal = val
                 minGrid = grid
-        mc = na.array(minCoord)
+        mc = np.array(minCoord)
         pos=minGrid.get_position(mc)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", \
               minVal, pos[0], pos[1], pos[2], minGrid, minGrid.Level)
@@ -103,11 +103,11 @@
         """
         Returns the (objects, indices) of grids containing an (x,y,z) point
         """
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         for i in xrange(len(coord)):
-            na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-            na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-        ind = na.where(mask == 1)
+            np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+            np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_field_value_at_point(self, fields, coord):
@@ -134,7 +134,7 @@
         # Get the most-refined grid at this coordinate.
         this = self.find_point(coord)[0][-1]
         cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions
-        mark = na.zeros(3).astype('int')
+        mark = np.zeros(3).astype('int')
         # Find the index for the cell containing this point.
         for dim in xrange(len(coord)):
             mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim])
@@ -151,15 +151,15 @@
         *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the edges, we win!
-        #ind = na.where( na.logical_and(self.grid_right_edge[:,axis] > coord, \
+        #ind = np.where( np.logical_and(self.grid_right_edge[:,axis] > coord, \
                                        #self.grid_left_edge[:,axis] < coord))
-        na.choose(na.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_sphere_grids(self, center, radius):
@@ -167,29 +167,29 @@
         Returns objects, indices of grids within a sphere
         """
         centers = (self.grid_right_edge + self.grid_left_edge)/2.0
-        long_axis = na.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
-        t = na.abs(centers - center)
+        long_axis = np.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
+        t = np.abs(centers - center)
         DW = self.parameter_file.domain_right_edge \
            - self.parameter_file.domain_left_edge
-        na.minimum(t, na.abs(DW-t), t)
-        dist = na.sqrt(na.sum((t**2.0), axis=1))
-        gridI = na.where(dist < (radius + long_axis))
+        np.minimum(t, np.abs(DW-t), t)
+        dist = np.sqrt(np.sum((t**2.0), axis=1))
+        gridI = np.where(dist < (radius + long_axis))
         return self.grids[gridI], gridI
 
     def get_box_grids(self, left_edge, right_edge):
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = na.where((na.all(self.grid_right_edge > left_edge, axis=1)
-                         & na.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
+                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -203,26 +203,26 @@
                     nre = nle + db
                     g, gi = self.get_box_grids(nle, nre)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_box_grids_below_level(self, left_edge, right_edge, level,
                                   min_level = 0):
         # We discard grids if they are ABOVE the level
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
                             self.grid_levels.astype("int32"), mask, min_level)
         mask = mask.astype("bool")
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_periodic_box_grids_below_level(self, left_edge, right_edge, level,
                                            min_level = 0):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -237,5 +237,5 @@
                     g, gi = self.get_box_grids_below_level(nle, nre,
                                             level, min_level)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -86,7 +86,7 @@
         for field in fields:
             f = self.pf.field_info[field]
             to_add = f.get_dependencies(pf = self.pf).requested
-            to_add = list(na.unique(to_add))
+            to_add = list(np.unique(to_add))
             if len(to_add) != 1: raise KeyError
             fields_to_read += to_add
             if f._particle_convert_function is None:
@@ -95,9 +95,9 @@
                 func = f.particle_convert
             func = particle_converter(func)
             conv_factors.append(
-              na.fromiter((func(g) for g in grid_list),
+              np.fromiter((func(g) for g in grid_list),
                           count=len(grid_list), dtype='float64'))
-        conv_factors = na.array(conv_factors).transpose()
+        conv_factors = np.array(conv_factors).transpose()
         self.conv_factors = conv_factors
         rvs = self.pf.h.io._read_particles(
             fields_to_read, rtype, args, grid_list, count_list,
@@ -115,9 +115,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64') 
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64') 
-        args = (na.array(self.left_edge), na.array(self.right_edge), 
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64') 
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64') 
+        args = (np.array(self.left_edge), np.array(self.right_edge), 
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
@@ -140,9 +140,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64')
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64')
-        return (1, (na.array(self.center, dtype='float64'), self.radius,
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64')
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64')
+        return (1, (np.array(self.center, dtype='float64'), self.radius,
             1, DLE, DRE))
 
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
@@ -156,8 +156,8 @@
         ParticleIOHandler.__init__(self, pf, source)
     
     def _get_args(self):
-        args = (na.array(self.center, dtype='float64'),
-                na.array(self.normal, dtype='float64'),
+        args = (np.array(self.center, dtype='float64'),
+                np.array(self.normal, dtype='float64'),
                 self.radius, self.height)
         return (2, args)
         


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -25,7 +25,7 @@
 from yt.utilities.lib import sample_field_at_positions
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import h5py
 
 class ParticleTrajectoryCollection(object) :
@@ -112,16 +112,16 @@
         for pf in self.pfs :
             dd = pf.h.all_data()
             newtags = dd["particle_index"].astype("int")
-            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)) :
                 print "Not all requested particle ids contained in this file!"
                 raise IndexError
-            mask = na.in1d(newtags, indices, assume_unique=True)
-            sorts = na.argsort(newtags[mask])
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
             self.masks.append(mask)            
             self.sorts.append(sorts)
             self.times.append(pf.current_time)
 
-        self.times = na.array(self.times)
+        self.times = np.array(self.times)
 
         # Set up the derived field list and the particle field list
         # so that if the requested field is a particle field, we'll
@@ -226,7 +226,7 @@
         
         if not self.field_data.has_key(field):
             
-            particles = na.empty((0))
+            particles = np.empty((0))
 
             step = int(0)
                 
@@ -238,13 +238,13 @@
 
                     dd = pf.h.all_data()
                     pfield = dd[field][mask]
-                    particles = na.append(particles, pfield[sort])
+                    particles = np.append(particles, pfield[sort])
 
                 else :
 
                     # This is hard... must loop over grids
 
-                    pfield = na.zeros((self.num_indices))
+                    pfield = np.zeros((self.num_indices))
                     x = self["particle_position_x"][:,step]
                     y = self["particle_position_y"][:,step]
                     z = self["particle_position_z"][:,step]
@@ -258,7 +258,7 @@
                                                             grid.RightEdge,
                                                             x, y, z)
 
-                    particles = na.append(particles, pfield)
+                    particles = np.append(particles, pfield)
 
                 step += 1
                 
@@ -294,9 +294,9 @@
         >>> pl.savefig("orbit")
         """
         
-        mask = na.in1d(self.indices, (index,), assume_unique=True)
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
 
-        if not na.any(mask) :
+        if not np.any(mask) :
             print "The particle index %d is not in the list!" % (index)
             raise IndexError
 
@@ -376,7 +376,7 @@
 
         fields = [field for field in sorted(self.field_data.keys())]
         
-        fid.create_dataset("particle_indices", dtype=na.int32,
+        fid.create_dataset("particle_indices", dtype=np.int32,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -115,13 +115,13 @@
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
         #pbar.finish()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 self.__data[field][ub] /= self.__weight_data[field][ub]
                 self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
-            self["%s_std" % field] = na.sqrt(self.__std_data[field])
+            self["%s_std" % field] = np.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
         del self.__data, self.__std_data, self.__weight_data, self.__used
 
@@ -131,7 +131,7 @@
         for key in self.__data:
             my_mean[key] = self._get_empty_field()
             my_weight[key] = self._get_empty_field()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for key in self.__data:
             my_mean[key][ub] = self.__data[key][ub] / self.__weight_data[key][ub]
             my_weight[key][ub] = self.__weight_data[key][ub]
@@ -151,7 +151,7 @@
                                          accumulation, self._args, check_cut = False)
             if weight:
                 f[u] /= w[u]
-                q[u] = na.sqrt(q[u] / w[u])
+                q[u] = np.sqrt(q[u] / w[u])
             self[field] = f
             self["%s_std" % field] = q
         self["UsedBins"] = u
@@ -202,7 +202,7 @@
                 else:
                     pointI = self._data_source._get_point_indices(source)
             data.append(source[field][pointI].ravel().astype('float64'))
-        return na.concatenate(data, axis=0)
+        return np.concatenate(data, axis=0)
 
     def _fix_pickle(self):
         if isinstance(self._data_source, tuple):
@@ -235,10 +235,10 @@
 
         # Get our bins
         if log_space:
-            func = na.logspace
-            lower_bound, upper_bound = na.log10(lower_bound), na.log10(upper_bound)
+            func = np.logspace
+            lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
         else:
-            func = na.linspace
+            func = np.linspace
 
         # These are the bin *edges*
         self._bins = func(lower_bound, upper_bound, n_bins + 1)
@@ -253,7 +253,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros(self[self.bin_field].size, dtype='float64')
+        return np.zeros(self[self.bin_field].size, dtype='float64')
 
     @preserve_source_parameters
     def _bin_field(self, source, field, weight, accumulation,
@@ -263,7 +263,7 @@
         # (i.e., lazy_reader)
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -282,7 +282,7 @@
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
-            binned_field = na.add.accumulate(binned_field)
+            binned_field = np.add.accumulate(binned_field)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -293,7 +293,7 @@
             raise EmptyProfileData()
         # Truncate at boundaries.
         if self.end_collect:
-            mi = na.ones_like(source_data).astype('bool')
+            mi = np.ones_like(source_data).astype('bool')
         else:
             mi = ((source_data > self._bins.min())
                &  (source_data < self._bins.max()))
@@ -301,9 +301,9 @@
         if sd.size == 0:
             raise EmptyProfileData()
         # Stick the bins into our fixed bins, set at initialization
-        bin_indices = na.digitize(sd, self._bins)
+        bin_indices = np.digitize(sd, self._bins)
         if self.end_collect: #limit the range of values to 0 and n_bins-1
-            bin_indices = na.clip(bin_indices, 0, self.n_bins - 1)
+            bin_indices = np.clip(bin_indices, 0, self.n_bins - 1)
         else: #throw away outside values
             bin_indices -= 1
           
@@ -319,7 +319,7 @@
         elif bin_style is 'left': x = x[:-1]
         elif bin_style is 'right': x = x[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
+            if self._x_log: x=np.log10(x)
             x = 0.5*(x[:-1] + x[1:])
             if self._x_log: x=10**x
         else:
@@ -337,11 +337,11 @@
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
-        field_data = na.array(self.choose_bins(bin_style)) 
+        field_data = np.array(self.choose_bins(bin_style)) 
         if bin_style is 'both':
-            field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field] for field in fields]), axis=0)
         else: 
-            field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field][:-1] for field in fields]), axis=0)
         
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -409,18 +409,18 @@
         self.x_n_bins = x_n_bins
         self.y_n_bins = y_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])):
             mylog.error("Your min/max values for x, y have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -428,7 +428,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size), dtype='float64')
 
     @preserve_source_parameters
@@ -436,7 +436,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -456,9 +456,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -470,9 +470,9 @@
             raise EmptyProfileData()
 
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
-            mi = na.where( (source_data_x > self._x_bins.min())
+            mi = np.where( (source_data_x > self._x_bins.min())
                            & (source_data_x < self._x_bins.max())
                            & (source_data_y > self._y_bins.min())
                            & (source_data_y < self._y_bins.max()))
@@ -481,11 +481,11 @@
         if sd_x.size == 0 or sd_y.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y)
@@ -507,8 +507,8 @@
             x = x[1:]
             y = y[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             if self._x_log: x=10**x
@@ -531,7 +531,7 @@
         fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
                           + fields + ["\n"]))
         x,y = self.choose_bins(bin_style)
-        x,y = na.meshgrid(x,y)
+        x,y = np.meshgrid(x,y)
         field_data = [x.ravel(), y.ravel()]
         if bin_style is not 'both':
             field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
@@ -540,7 +540,7 @@
             field_data += [self.field_data[field].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
 
-        field_data = na.array(field_data)
+        field_data = np.array(field_data)
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -579,7 +579,7 @@
         return [self.x_bin_field, self.y_bin_field]
 
 def fix_bounds(upper, lower, logit):
-    if logit: return na.log10(upper), na.log10(lower)
+    if logit: return np.log10(upper), np.log10(lower)
     return upper, lower
 
 class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -599,7 +599,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -617,9 +617,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, used_field.astype('bool')
 
         
@@ -656,24 +656,24 @@
         self.y_n_bins = y_n_bins
         self.z_n_bins = z_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        func = {True:na.logspace, False:na.linspace}[z_log]
+        func = {True:np.logspace, False:np.linspace}[z_log]
         bounds = fix_bounds(z_lower_bound, z_upper_bound, z_log)
         self._z_bins = func(bounds[0], bounds[1], z_n_bins + 1)
         self[z_bin_field] = self._z_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])) \
-            or na.any(na.isnan(self[z_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])) \
+            or np.any(np.isnan(self[z_bin_field])):
             mylog.error("Your min/max values for x, y or z have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -681,7 +681,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size,
                          self[self.z_bin_field].size), dtype='float64')
 
@@ -689,9 +689,9 @@
     def _bin_field(self, source, field, weight, accumulation,
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
-        weight_data = na.ones(source_data.shape).astype('float64')
+        weight_data = np.ones(source_data.shape).astype('float64')
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape).astype('float64')
+        else: weight_data = np.ones(source_data.shape).astype('float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -711,11 +711,11 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
-                binned_field = na.add.accumulate(binned_field, axis=2)
+                binned_field = np.add.accumulate(binned_field, axis=2)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -727,7 +727,7 @@
         if source_data_x.size == 0:
             raise EmptyProfileData()
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
             mi = ( (source_data_x > self._x_bins.min())
                  & (source_data_x < self._x_bins.max())
@@ -741,13 +741,13 @@
         if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
-        bin_indices_z = na.digitize(sd_z, self._z_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
+        bin_indices_z = np.digitize(sd_z, self._z_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
-            bin_indices_z = na.minimum(na.maximum(1, bin_indices_z), self.z_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_z = np.minimum(np.maximum(1, bin_indices_z), self.z_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y, bin_indices_z)
@@ -772,9 +772,9 @@
             y = y[1:]
             z = z[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
-            if self._z_log: z=na.log10(z)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
+            if self._z_log: z=np.log10(z)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             z = 0.5*(z[:-1] + z[1:])
@@ -853,7 +853,7 @@
             if field in set_attr.values(): continue
             order.append(field)
             values.append(self[field].ravel())
-        values = na.array(values).transpose()
+        values = np.array(values).transpose()
         self._data_source.hierarchy.save_data(values, "/Profiles", name,
                                               set_attr, force=force)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -26,7 +26,7 @@
 """
 
 import types
-import numpy as na
+import numpy as np
 import inspect
 import copy
 
@@ -61,66 +61,66 @@
 
 def _dx(field, data):
     return data.dds[0]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
 add_field('dx', function=_dx, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dy(field, data):
     return data.dds[1]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
 add_field('dy', function=_dy, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dz(field, data):
     return data.dds[2]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
 add_field('dz', function=_dz,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _coordX(field, data):
     dim = data.ActiveDimensions[0]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[0])[:,None,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
             +0.5) * data['dx'] + data.LeftEdge[0]
 add_field('x', function=_coordX, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordY(field, data):
     dim = data.ActiveDimensions[1]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[1])[None,:,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
             +0.5) * data['dy'] + data.LeftEdge[1]
 add_field('y', function=_coordY, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordZ(field, data):
     dim = data.ActiveDimensions[2]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[2])[None,None,:]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
             +0.5) * data['dz'] + data.LeftEdge[2]
 add_field('z', function=_coordZ, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _GridLevel(field, data):
-    return na.ones(data.ActiveDimensions)*(data.Level)
+    return np.ones(data.ActiveDimensions)*(data.Level)
 add_field("GridLevel", function=_GridLevel,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
 def _GridIndices(field, data):
-    return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
+    return np.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)], take_log=False)
 
 def _OnesOverDx(field, data):
-    return na.ones(data["Ones"].shape,
+    return np.ones(data["Ones"].shape,
                    dtype=data["Density"].dtype)/data['dx']
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
 
 def _Ones(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64')
+    return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
           validators=[ValidateSpatial(0)],
           projection_conversion="unitary",
@@ -130,7 +130,7 @@
 
 def _SoundSpeed(field, data):
     if data.pf["EOSType"] == 1:
-        return na.ones(data["Density"].shape, dtype='float64') * \
+        return np.ones(data["Density"].shape, dtype='float64') * \
                 data.pf["EOSSoundSpeed"]
     return ( data.pf["Gamma"]*data["Pressure"] / \
              data["Density"] )**(1.0/2.0)
@@ -139,7 +139,7 @@
 
 def _RadialMachNumber(field, data):
     """M{|v|/t_sound}"""
-    return na.abs(data["RadialVelocity"]) / data["SoundSpeed"]
+    return np.abs(data["RadialVelocity"]) / data["SoundSpeed"]
 add_field("RadialMachNumber", function=_RadialMachNumber)
 
 def _MachNumber(field, data):
@@ -157,7 +157,7 @@
     t3 = data['dz'] / (
         data["SoundSpeed"] + \
         abs(data["z-velocity"]))
-    return na.minimum(na.minimum(t1,t2),t3)
+    return np.minimum(np.minimum(t1,t2),t3)
 def _convertCourantTimeStep(data):
     # SoundSpeed and z-velocity are in cm/s, dx is in code
     return data.convert("cm")
@@ -169,7 +169,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["particle_velocity_x"]-bulk_velocity[0])**2.0 + \
              (data["particle_velocity_y"]-bulk_velocity[1])**2.0 + \
              (data["particle_velocity_z"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -181,7 +181,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
              (data["y-velocity"]-bulk_velocity[1])**2.0 + \
              (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -189,13 +189,13 @@
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _TangentialOverVelocityMagnitude(field, data):
-    return na.abs(data["TangentialVelocity"])/na.abs(data["VelocityMagnitude"])
+    return np.abs(data["TangentialVelocity"])/np.abs(data["VelocityMagnitude"])
 add_field("TangentialOverVelocityMagnitude",
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
 def _TangentialVelocity(field, data):
-    return na.sqrt(data["VelocityMagnitude"]**2.0
+    return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)
 add_field("TangentialVelocity", 
           function=_TangentialVelocity,
@@ -223,14 +223,14 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
     ## The spherical coordinates radius is simply the magnitude of the
     ## coords vector.
 
-    return na.sqrt(na.sum(coords**2,axis=-1))
+    return np.sqrt(np.sum(coords**2,axis=-1))
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,7 +245,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -254,11 +254,11 @@
     ## vector.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    JdotCoords = na.sum(J*coords,axis=-1)
+    JdotCoords = np.sum(J*coords,axis=-1)
     
-    return na.arccos( JdotCoords / na.sqrt(na.sum(coords**2,axis=-1)) )
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,7 +269,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
     
@@ -282,18 +282,18 @@
     ## The angle is then given by the arctan of the ratio of the
     ## yprime-component and the xprime-component of the coords vector.
 
-    xprime = na.cross([0.0,1.0,0.0],normal)
-    if na.sum(xprime) == 0: xprime = na.array([0.0, 0.0, 1.0])
-    yprime = na.cross(normal,xprime)
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = na.tile(xprime,tile_shape)
-    Jy = na.tile(yprime,tile_shape)
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
     
-    Px = na.sum(Jx*coords,axis=-1)
-    Py = na.sum(Jy*coords,axis=-1)
+    Px = np.sum(Jx*coords,axis=-1)
+    Py = np.sum(Jy*coords,axis=-1)
     
-    return na.arctan2(Py,Px)
+    return np.arctan2(Py,Px)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -305,7 +305,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -313,10 +313,10 @@
     ## gives a vector of magnitude equal to the cylindrical radius.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    JcrossCoords = na.cross(J,coords)
-    return na.sqrt(na.sum(JcrossCoords**2,axis=-1))
+    JcrossCoords = np.cross(J,coords)
+    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -331,7 +331,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -339,9 +339,9 @@
     ## the cylindrical height.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    return na.sum(J*coords,axis=-1)  
+    return np.sum(J*coords,axis=-1)  
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -399,7 +399,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*np.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -502,7 +502,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*data['dx']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']*data['dx']
     return data["dx"]*data["dy"]*data["dz"]
@@ -520,7 +520,7 @@
           convert_function=_ConvertCellVolumeCGS)
 
 def _ChandraEmissivity(field, data):
-    logT0 = na.log10(data["Temperature"]) - 7
+    logT0 = np.log10(data["Temperature"]) - 7
     return ((data["NumberDensity"].astype('float64')**2.0) \
             *(10**(-0.0103*logT0**8 \
                    +0.0417*logT0**7 \
@@ -579,15 +579,15 @@
 
 def _AveragedDensity(field, data):
     nx, ny, nz = data["Density"].shape
-    new_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    weight_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    i_i, j_i, k_i = na.mgrid[0:3,0:3,0:3]
+    new_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    weight_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    i_i, j_i, k_i = np.mgrid[0:3,0:3,0:3]
     for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):
         sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]
         new_field += data["Density"][sl] * data["CellMass"][sl]
         weight_field += data["CellMass"][sl]
     # Now some fancy footwork
-    new_field2 = na.zeros((nx,ny,nz))
+    new_field2 = np.zeros((nx,ny,nz))
     new_field2[1:-1,1:-1,1:-1] = new_field/weight_field
     return new_field2
 add_field("AveragedDensity",
@@ -615,7 +615,7 @@
         ds = div_fac * data['dz'].flat[0]
         f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
         f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = f
     return new_field
 def _convertDivV(data):
@@ -627,12 +627,12 @@
           convert_function=_convertDivV)
 
 def _AbsDivV(field, data):
-    return na.abs(data['DivV'])
+    return np.abs(data['DivV'])
 add_field("AbsDivV", function=_AbsDivV,
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return -na.ones_like(data["Ones"])
+    return -np.ones_like(data["Ones"])
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
 add_field("tempContours", function=_Contours,
@@ -642,7 +642,7 @@
 def obtain_velocities(data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["x-velocity"] - bv[0]
     yv = data["y-velocity"] - bv[1]
     zv = data["z-velocity"] - bv[2]
@@ -694,18 +694,18 @@
     """
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["particle_velocity_x"] - bv[0]
     yv = data["particle_velocity_y"] - bv[1]
     zv = data["particle_velocity_z"] - bv[2]
     center = data.get_field_parameter('center')
-    coords = na.array([data['particle_position_x'],
+    coords = np.array([data['particle_position_x'],
                        data['particle_position_y'],
                        data['particle_position_z']], dtype='float64')
     new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - na.reshape(center,new_shape)
-    v_vec = na.array([xv,yv,zv], dtype='float64')
-    return na.cross(r_vec, v_vec, axis=0)
+    r_vec = coords - np.reshape(center,new_shape)
+    v_vec = np.array([xv,yv,zv], dtype='float64')
+    return np.cross(r_vec, v_vec, axis=0)
 #add_field("ParticleSpecificAngularMomentum",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
@@ -720,7 +720,7 @@
 def _ParticleSpecificAngularMomentumX(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     y = data["particle_position_y"] - center[1]
     z = data["particle_position_z"] - center[2]
@@ -730,7 +730,7 @@
 def _ParticleSpecificAngularMomentumY(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     z = data["particle_position_z"] - center[2]
@@ -740,7 +740,7 @@
 def _ParticleSpecificAngularMomentumZ(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     y = data["particle_position_y"] - center[1]
@@ -788,20 +788,20 @@
 def _ParticleRadius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["particle_position_x"].shape, dtype='float64')
+    radius = np.zeros(data["particle_position_x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data["particle_position_%s" % ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data["particle_position_%s" % ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _Radius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["x"].shape, dtype='float64')
+    radius = np.zeros(data["x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data[ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data[ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _ConvertRadiusCGS(data):
     return data.convert("cm")
@@ -886,16 +886,16 @@
     center = data.get_field_parameter("center")
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
                 + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
                 + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
                 )/data["RadiusCode"]
-    if na.any(na.isnan(new_field)): # to fix center = point
-        new_field[na.isnan(new_field)] = 0.0
+    if np.any(np.isnan(new_field)): # to fix center = point
+        new_field[np.isnan(new_field)] = 0.0
     return new_field
 def _RadialVelocityABS(field, data):
-    return na.abs(_RadialVelocity(field, data))
+    return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
     return 1e-5
 add_field("RadialVelocity", function=_RadialVelocity,
@@ -916,10 +916,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(x_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(x_vec, v_vec)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -929,10 +929,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(y_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(y_vec, v_vec)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -955,16 +955,16 @@
 def _convertDensity(data):
     return data.convert("Density")
 def _pdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                 data["particle_position_y"].astype(na.float64),
-                 data["particle_position_z"].astype(na.float64),
-                 data["particle_mass"].astype(na.float32),
-                 na.int64(data.NumberOfParticles),
-                 blank, na.array(data.LeftEdge).astype(na.float64),
-                 na.array(data.ActiveDimensions).astype(na.int32),
-                 na.float64(data['dx']))
+    CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                 data["particle_position_y"].astype(np.float64),
+                 data["particle_position_z"].astype(np.float64),
+                 data["particle_mass"].astype(np.float32),
+                 np.int64(data.NumberOfParticles),
+                 blank, np.array(data.LeftEdge).astype(np.float64),
+                 np.array(data.ActiveDimensions).astype(np.int32),
+                 np.float64(data['dx']))
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
@@ -993,7 +993,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape)
+    new_field = np.zeros(data["x-velocity"].shape)
     dvzdy = (data["z-velocity"][1:-1,sl_right,1:-1] -
              data["z-velocity"][1:-1,sl_left,1:-1]) \
              / (div_fac*data["dy"].flat[0])
@@ -1018,7 +1018,7 @@
              / (div_fac*data["dy"].flat[0])
     new_field[1:-1,1:-1,1:-1] += (dvydx - dvxdy)**2.0
     del dvydx, dvxdy
-    new_field = na.abs(new_field)
+    new_field = np.abs(new_field)
     return new_field
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
@@ -1038,7 +1038,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
@@ -1053,7 +1053,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
@@ -1068,7 +1068,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
@@ -1083,7 +1083,7 @@
               units=r"\rm{dyne}/\rm{cm}^{3}")
 
 def _gradPressureMagnitude(field, data):
-    return na.sqrt(data["gradPressureX"]**2 +
+    return np.sqrt(data["gradPressureX"]**2 +
                    data["gradPressureY"]**2 +
                    data["gradPressureZ"]**2)
 add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
@@ -1100,7 +1100,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
@@ -1115,7 +1115,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
@@ -1130,7 +1130,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
@@ -1145,7 +1145,7 @@
               units=r"\rm{g}/\rm{cm}^{4}")
 
 def _gradDensityMagnitude(field, data):
-    return na.sqrt(data["gradDensityX"]**2 +
+    return np.sqrt(data["gradDensityX"]**2 +
                    data["gradDensityY"]**2 +
                    data["gradDensityZ"]**2)
 add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
@@ -1171,7 +1171,7 @@
           units=r"\rm{s}^{-1}")
 
 def _BaroclinicVorticityMagnitude(field, data):
-    return na.sqrt(data["BaroclinicVorticityX"]**2 +
+    return np.sqrt(data["BaroclinicVorticityX"]**2 +
                    data["BaroclinicVorticityY"]**2 +
                    data["BaroclinicVorticityZ"]**2)
 add_field("BaroclinicVorticityMagnitude",
@@ -1189,7 +1189,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
                                  data["z-velocity"][1:-1,sl_left,1:-1]) \
                                  / (div_fac*data["dy"].flat[0])
@@ -1207,7 +1207,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
                                  data["x-velocity"][1:-1,1:-1,sl_left]) \
                                  / (div_fac*data["dz"].flat[0])
@@ -1225,7 +1225,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
                                  data["y-velocity"][sl_left,1:-1,1:-1]) \
                                  / (div_fac*data["dx"].flat[0])
@@ -1244,7 +1244,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityMagnitude(field, data):
-    return na.sqrt(data["VorticityX"]**2 +
+    return np.sqrt(data["VorticityX"]**2 +
                    data["VorticityY"]**2 +
                    data["VorticityZ"]**2)
 add_field("VorticityMagnitude", function=_VorticityMagnitude,
@@ -1263,7 +1263,7 @@
     add_field(n, function=eval("_%s" % n),
               validators=[ValidateSpatial(0)])
 def _VorticityStretchingMagnitude(field, data):
-    return na.sqrt(data["VorticityStretchingX"]**2 +
+    return np.sqrt(data["VorticityStretchingX"]**2 +
                    data["VorticityStretchingY"]**2 +
                    data["VorticityStretchingZ"]**2)
 add_field("VorticityStretchingMagnitude", 
@@ -1285,13 +1285,13 @@
                           ["x-velocity", "y-velocity", "z-velocity"])],
               units=r"\rm{s}^{-2}")
 def _VorticityGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityGrowthX"]**2 +
+    result = np.sqrt(data["VorticityGrowthX"]**2 +
                      data["VorticityGrowthY"]**2 +
                      data["VorticityGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1299,7 +1299,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityGrowthX"]**2 +
+    return np.sqrt(data["VorticityGrowthX"]**2 +
                    data["VorticityGrowthY"]**2 +
                    data["VorticityGrowthZ"]**2)
 add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
@@ -1311,7 +1311,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],
@@ -1344,7 +1344,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityRadPressureMagnitude(field, data):
-    return na.sqrt(data["VorticityRadPressureX"]**2 +
+    return np.sqrt(data["VorticityRadPressureX"]**2 +
                    data["VorticityRadPressureY"]**2 +
                    data["VorticityRadPressureZ"]**2)
 add_field("VorticityRadPressureMagnitude",
@@ -1369,13 +1369,13 @@
                        ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
               units=r"\rm{s}^{-1}")
 def _VorticityRPGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityRPGrowthX"]**2 +
+    result = np.sqrt(data["VorticityRPGrowthX"]**2 +
                      data["VorticityRPGrowthY"]**2 +
                      data["VorticityRPGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1383,7 +1383,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityRPGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityRPGrowthX"]**2 +
+    return np.sqrt(data["VorticityRPGrowthX"]**2 +
                    data["VorticityRPGrowthY"]**2 +
                    data["VorticityRPGrowthZ"]**2)
 add_field("VorticityRPGrowthMagnitudeABS", 
@@ -1396,7 +1396,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 import cPickle
@@ -106,7 +106,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -120,10 +120,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -141,7 +141,7 @@
         #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
         
@@ -180,9 +180,9 @@
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_info = np.array(self.pf.level_info)        
         self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
+        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
         self.pf.level_art_child_masks = {}
@@ -192,10 +192,10 @@
         del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
-                        na.zeros(3, dtype='int64'), # left index of PSG
+                        np.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
-                        na.zeros((1,3), dtype='int64'), # left edges of grids
-                        na.zeros((1,6), dtype='int64') # empty
+                        np.zeros((1,3), dtype='int64'), # left edges of grids
+                        np.zeros((1,6), dtype='int64') # empty
                         )
         
         self.proto_grids = [[root_psg],]
@@ -224,8 +224,8 @@
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
-            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
-                              na.log10(2))
+            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                              np.log10(2))
             hilbert_indices = _ramses_reader.get_hilbert_indices(
                                     level + base_level, left_index)
             #print base_level, hilbert_indices.max(),
@@ -234,7 +234,7 @@
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             
@@ -260,15 +260,15 @@
                 #why would we ever have non-unique octs?
                 #perhaps the hilbert ordering may visit the same
                 #oct multiple times - review only unique octs 
-                #for idomain in na.unique(ddfl[:,1]):
+                #for idomain in np.unique(ddfl[:,1]):
                 #dom_ind = ddfl[:,1] == idomain
                 #dleft_index = ddleft_index[dom_ind,:]
                 #dfl = ddfl[dom_ind,:]
                 
                 dleft_index = ddleft_index
                 dfl = ddfl
-                initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                initial_left = np.min(dleft_index, axis=0)
+                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                 #this creates a grid patch that doesn't cover the whole level
                 #necessarily, but with other patches covers all the regions
                 #with octs. This object automatically shrinks its size
@@ -298,8 +298,8 @@
                 
                 step+=1
                 pbar.update(step)
-            eff_mean = na.mean(psg_eff)
-            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_mean = np.mean(psg_eff)
+            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
             eff_nall = len(psg_eff)
             mylog.info("Average subgrid efficiency %02.1f %%",
                         eff_mean*100.0)
@@ -345,14 +345,14 @@
                 self.grid_right_edge[gi,:] = props[1,:] / dds
                 self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                child_mask = na.zeros(props[2,:],'uint8')
+                child_mask = np.zeros(props[2,:],'uint8')
                 amr_utils.fill_child_mask(fl,props[0],
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*na.array(correction).astype('int64')))
+                    props*np.array(correction).astype('int64')))
                 gi += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         
 
         if self.pf.file_particle_data:
@@ -372,7 +372,7 @@
             pbar.update(1)
             npa,npb=0,0
             npb = lspecies[-1]
-            clspecies = na.concatenate(([0,],lspecies))
+            clspecies = np.concatenate(([0,],lspecies))
             if self.pf.only_particle_type is not None:
                 npb = lspecies[0]
                 if type(self.pf.only_particle_type)==type(5):
@@ -388,13 +388,13 @@
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = na.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            self.pf.particle_type         = np.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = np.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
@@ -461,17 +461,17 @@
             init = self.pf.particle_position.shape[0]
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
-            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
             if type(self.pf.grid_particles) == type(5):
                 particle_level = min(self.pf.max_level,self.pf.grid_particles)
             else:
                 particle_level = 2
-            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
+            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
 
             pbar = get_pbar("Gridding Particles ",init)
             assignment,ilists = amr_utils.assign_particles_to_cell_lists(
                     self.grid_levels.ravel().astype('int32'),
-                    na.zeros(len(pos[:,0])).astype('int32')-1,
+                    np.zeros(len(pos[:,0])).astype('int32')-1,
                     particle_level, #dont grid particles past this
                     self.grid_left_edge.astype('float32'),
                     self.grid_right_edge.astype('float32'),
@@ -500,10 +500,10 @@
             
 
     def _get_grid_parents(self, grid, LE, RE):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
         return self.grids[mask]
 
     def _populate_grid_objects(self):
@@ -519,7 +519,7 @@
         self.max_level = self.grid_levels.max()
 
     # def _populate_grid_objects(self):
-    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     mask = np.empty(self.grids.size, dtype='int32')
     #     pb = get_pbar("Populating grids", len(self.grids))
     #     for gi,g in enumerate(self.grids):
     #         pb.update(gi)
@@ -609,7 +609,7 @@
         self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
-            self.limit_level = na.inf
+            self.limit_level = np.inf
         else:
             limit_level = int(limit_level)
             mylog.info("Using maximum level: %i",limit_level)
@@ -685,7 +685,7 @@
         wmu = self["wmu"]
         #ng = self.domain_dimensions[0]
         #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + na.sqrt(self.omega_matter))
+        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
         #v0 = r0 / t0
         #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
         #e0 = v0**2.0
@@ -696,7 +696,7 @@
         hubble = self.hubble_constant
         ng = self.domain_dimensions[0]
         self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * na.sqrt(self.omega_matter)  #cm/s
+        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         self.t0 = self.r0/self.v0
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
@@ -730,8 +730,8 @@
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
-        self.domain_left_edge = na.zeros(3, dtype="float64")
-        self.domain_right_edge = na.ones(3, dtype="float64")
+        self.domain_left_edge = np.zeros(3, dtype="float64")
+        self.domain_right_edge = np.ones(3, dtype="float64")
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters = {}
@@ -812,10 +812,10 @@
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
         # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
+        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
         # integrand_arr = integrand(spacings)
-        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
         # self.current_time *= self.hubble_time
         self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
@@ -824,7 +824,7 @@
         
         Om0 = self.parameters['Om0']
         hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * na.sqrt(Om0)
+        dummy = 100.0 * hubble * np.sqrt(Om0)
         ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
         boxh = header_vals['boxh'] 
@@ -836,7 +836,7 @@
         self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
         #velocity velocity units in km/s
         self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                na.sqrt(self.parameters["Om0"])
+                np.sqrt(self.parameters["Om0"])
         #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
         self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
         rho0 = self.parameters["rho0"]
@@ -857,10 +857,10 @@
     
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = int(na.rint(self.ncell**(1.0/3.0)))
+        est = int(np.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64')*est 
+        self.domain_dimensions = np.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
         #_skip_record(f) # iOctCh
@@ -927,8 +927,8 @@
         seek_extras = 137
         fh.seek(seek_extras)
         n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
         self.parameters['wspecies'] = self.parameters['wspecies'][:n]
         self.parameters['lspecies'] = self.parameters['lspecies'][:n]
         fh.close()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -44,7 +44,7 @@
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as na
+import numpy as np
 
 #these are just the hydro fields
 known_art_fields = [ 'Density','TotalEnergy',
@@ -178,7 +178,7 @@
     di = dd==0.0
     #dd[di] = -1.0
     tr = dg/dd
-    #tr[na.isnan(tr)] = 0.0
+    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
     #    import pdb;pdb.set_trace()
     tr /= data.pf.conversion_factors["GasEnergy"]
@@ -186,7 +186,7 @@
     tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
-    #assert na.all(na.isfinite(tr))
+    #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
     x = data.pf.conversion_factors["Temperature"]
@@ -258,9 +258,9 @@
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
     tr  = data["Ones"] #create a grid in the right size
-    if na.sum(idx)>0:
-        tr /= na.prod(tr.shape) #divide by the volume
-        tr *= na.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+    if np.sum(idx)>0:
+        tr /= np.prod(tr.shape) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
         return tr
     else:
         return tr*0.0


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import struct
 
 import os
@@ -93,9 +93,9 @@
         f.seek(self.level_offsets[level])
         ncells = 8*self.level_info[level]
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
-        arr = na.fromfile(f, dtype='>f', count=nvals)
+        arr = np.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
         if field==None:
             self.level_data[level] = arr.astype('float32')
@@ -108,13 +108,13 @@
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
+        hvar = np.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
-        na.fromfile(f,dtype='>i',count=2) #throw away the pads
+        np.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
+        var = np.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
-        arr = na.concatenate((hvar,var))
+        arr = np.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
@@ -122,9 +122,9 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = na.array(grid.particle_indices)
+        idx = np.array(grid.particle_indices)
         if field == 'particle_index':
-            return na.array(idx)
+            return np.array(idx)
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
@@ -168,10 +168,10 @@
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
             return tr.swapaxes(0, 2).astype("float64")
-        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
-        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        filled = np.zeros(grid.ActiveDimensions, dtype='uint8')
         to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
@@ -198,9 +198,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -242,20 +242,20 @@
     #fortran indices start at 1
     
     #Skip all the oct hierarchy data
-    le     = na.zeros((nLevel,3),dtype='int64')
-    fl     = na.ones((nLevel,6),dtype='int64')
-    iocts  = na.zeros(nLevel+1,dtype='int64')
+    le     = np.zeros((nLevel,3),dtype='int64')
+    fl     = np.ones((nLevel,6),dtype='int64')
+    iocts  = np.zeros(nLevel+1,dtype='int64')
     idxa,idxb = 0,0
     chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
     left = nLevel
     while left > 0 :
         this_chunk = min(chunk,left)
         idxb=idxa+this_chunk
-        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data = np.fromfile(f,dtype='>i',count=this_chunk*15)
         data=data.reshape(this_chunk,15)
         left-=this_chunk
         le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        fl[idxa:idxb,1] = np.arange(idxa,idxb)
         #pad byte is last, LL2, then ioct right before it
         iocts[idxa:idxb] = data[:,-3] 
         idxa=idxa+this_chunk
@@ -272,12 +272,12 @@
     #now correct iocts for fortran indices start @ 1
     iocts = iocts-1
 
-    assert na.unique(iocts).shape[0] == nLevel
+    assert np.unique(iocts).shape[0] == nLevel
     
     #ioct tries to access arrays much larger than le & fl
     #just make sure they appear in the right order, skipping
     #the empty space in between
-    idx = na.argsort(iocts)
+    idx = np.argsort(iocts)
     
     #now rearrange le & fl in order of the ioct
     le = le[idx]
@@ -294,7 +294,7 @@
     #now read the hvars and vars arrays
     #we are looking for iOctCh
     #we record if iOctCh is >0, in which it is subdivided
-    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
     
     
     
@@ -309,9 +309,9 @@
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
-    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
-    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    f = np.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = np.vsplit(np.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
 def read_stars(file,nstars,Nrow):
@@ -332,8 +332,8 @@
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
     nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
-    ioctch = na.zeros(nLevel,dtype='uint8')
-    idc = na.zeros(nLevel,dtype='int32')
+    ioctch = np.zeros(nLevel,dtype='uint8')
+    idc = np.zeros(nLevel,dtype='int32')
     
     chunk = long(1e6)
     left = nLevel
@@ -342,9 +342,9 @@
     while left > 0:
         chunk = min(chunk,left)
         b += chunk
-        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = np.fromfile(f, dtype='>i', count=chunk*width)
         arr = arr.reshape((width, chunk), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
         ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
         #zero in the mask means there is refinement available
@@ -354,12 +354,12 @@
     return idc,ioctch
     
 nchem=8+2
-dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
                 ",>%sf4"%(2)+",>i4")
 def _read_art_child(f, level_child_offsets,level,nLevel,field):
     pos=f.tell()
     f.seek(level_child_offsets[level])
-    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = np.fromfile(f, dtype='>f', count=nLevel * 8)
     arr = arr.reshape((nLevel,16), order="F")
     arr = arr[3:-1,:].astype("float64")
     f.seek(pos)
@@ -372,8 +372,8 @@
 
 def _read_frecord(f,fmt):
     s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    count = s1/na.dtype(fmt).itemsize
-    ss = na.fromfile(f,fmt,count=count)
+    count = s1/np.dtype(fmt).itemsize
+    ss = np.fromfile(f,fmt,count=count)
     s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
     assert s1==s2
     return ss
@@ -406,14 +406,14 @@
 
 #All of these functions are to convert from hydro time var to 
 #proper time
-sqrt = na.sqrt
-sign = na.sign
+sqrt = np.sqrt
+sign = np.sign
 
 def find_root(f,a,b,tol=1e-6):
     c = (a+b)/2.0
-    last = -na.inf
+    last = -np.inf
     assert(sign(f(a)) != sign(f(b)))  
-    while na.abs(f(c)-last) > tol:
+    while np.abs(f(c)-last) > tol:
         last=f(c)
         if sign(last)==sign(f(b)):
             b=c
@@ -423,9 +423,9 @@
     return c
 
 def quad(fintegrand,xmin,xmax,n=1e4):
-    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    spacings = np.logspace(np.log10(xmin),np.log10(xmax),n)
     integrand_arr = fintegrand(spacings)
-    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    val = np.trapz(integrand_arr,dx=np.diff(spacings))
     return val
 
 def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
@@ -450,14 +450,14 @@
     integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
     #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
     current_time = quad(integrand,1e-4,at)
-    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #spacings = np.logspace(-5,np.log10(at),1e5)
     #integrand_arr = integrand(spacings)
-    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
     current_time *= 9.779/h
     return current_time
 
 def b2t(tb,n = 1e2,logger=None,**kwargs):
-    tb = na.array(tb)
+    tb = np.array(tb)
     if type(tb) == type(1.1): 
         return a2t(b2a(tb))
     if tb.shape == (): 
@@ -465,14 +465,14 @@
     if len(tb) < n: n= len(tb)
     age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
     age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
-    tbs  = -1.*na.logspace(na.log10(-tb.min()),
-                          na.log10(-tb.max()),n)
+    tbs  = -1.*np.logspace(np.log10(-tb.min()),
+                          np.log10(-tb.max()),n)
     ages = []
     for i,tbi in enumerate(tbs):
         ages += a2t(b2a(tbi)),
         if logger: logger(i)
-    ages = na.array(ages)
-    fb2t = na.interp(tb,tbs,ages)
+    ages = np.array(ages)
+    fb2t = np.interp(tb,tbs,ages)
     #fb2t = interp1d(tbs,ages)
     return fb2t
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -109,7 +109,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
@@ -174,12 +174,12 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #na.array(map(int, self._global_header_lines[counter].split()))
+        #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         #domain_re.search(self._global_header_lines[counter]).groups()
@@ -187,9 +187,9 @@
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -273,8 +273,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                                        level, gfn, gfo, dims,
@@ -296,7 +296,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
 
         self.field_list += castro_particle_field_names[:]
@@ -311,7 +311,7 @@
 
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = na.fromiter((int(i)
+        grid_info = np.fromiter((int(i)
                                  for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
@@ -347,15 +347,15 @@
         self._dtype = dtype
 
     def _calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.fullplotdir
 
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
@@ -367,9 +367,9 @@
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids * [self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
                                   for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
@@ -384,9 +384,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -405,7 +405,7 @@
             grid._setup_dx()
 
     def _setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -424,10 +424,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -439,7 +439,7 @@
             except:
                 continue
 
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
 
         for field in self.field_list:
@@ -473,11 +473,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -620,9 +620,9 @@
                     else:
                         self.parameters[paramName] = t
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals.split()])
+                self.domain_right_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals.split()])
+                self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 from yt.utilities.lib import \
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
             castro_particle_field_names.index(field),
             len(castro_particle_field_names),
@@ -85,8 +85,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
+            start = np.array(map(int, start.split(',')))
+            stop = np.array(map(int, stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -126,7 +126,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
+        field = np.fromfile(inFile, count=nElements, dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
      defaultdict
@@ -81,10 +81,10 @@
         if self.Parent == []:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -97,7 +97,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -137,18 +137,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py                                                                                                             
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
@@ -182,8 +182,8 @@
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
             for level_id, box in enumerate(boxes):
-                si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
                 pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
                 grids.append(pg)
@@ -193,9 +193,9 @@
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-#        self.grids = na.array(self.grids, dtype='object')
+#        self.grids = np.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -212,7 +212,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
@@ -316,21 +316,21 @@
     def __calc_left_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         fileh.close()
         return LE
 
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
                   
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
-        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         return R_index - L_index
  
     @classmethod


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,7 +33,7 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-import numpy as na
+import numpy as np
 
 KnownChomboFields = FieldInfoContainer()
 add_chombo_field = KnownChomboFields.add_field
@@ -131,7 +131,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
         


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,7 +25,7 @@
 """
 import h5py
 import re
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -108,4 +108,4 @@
                     if ( (grid.LeftEdge < coord).all() and
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import weakref
-import numpy as na
+import numpy as np
 import os
 import stat
 import string
@@ -90,7 +90,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -179,7 +179,7 @@
                 if self.pf.field_info[field].particle_type: continue
                 temp = self.hierarchy.io._read_raw_data_set(self, field)
                 temp = temp.swapaxes(0, 2)
-                cube.field_data[field] = na.multiply(temp, conv_factor, temp)[sl]
+                cube.field_data[field] = np.multiply(temp, conv_factor, temp)[sl]
         return cube
 
 class EnzoHierarchy(AMRHierarchy):
@@ -291,7 +291,7 @@
         f = open(self.hierarchy_filename, "rb")
         self.grids = [self.grid(1, self)]
         self.grids[0].Level = 0
-        si, ei, LE, RE, fn, np = [], [], [], [], [], []
+        si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
         pbar = get_pbar("Parsing Hierarchy", self.num_grids)
         for grid_id in xrange(self.num_grids):
@@ -304,29 +304,29 @@
             nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
             fn.append(["-1"])
             if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
-            np.append(int(_next_token_line("NumberOfParticles", f)[0]))
-            if nb == 0 and np[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
+            npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
+            if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
             for line in f:
                 if len(line) < 2: break
                 if line.startswith("Pointer:"):
                     vv = patt.findall(line)[0]
                     self.__pointer_handler(vv)
         pbar.finish()
-        self._fill_arrays(ei, si, LE, RE, np)
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        self._fill_arrays(ei, si, LE, RE, npart)
+        temp_grids = np.empty(self.num_grids, dtype='object')
         temp_grids[:] = self.grids
         self.grids = temp_grids
         self.filenames = fn
         self._store_binary_hierarchy()
         t2 = time.time()
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions.flat[:] = ei
-        self.grid_dimensions -= na.array(si, self.float_type)
+        self.grid_dimensions -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge.flat[:] = LE
         self.grid_right_edge.flat[:] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
 
     def __pointer_handler(self, m):
         sgi = int(m[2])-1
@@ -379,7 +379,7 @@
             if Pid > -1:
                 grids[Pid-1]._children_ids.append(grid.id)
             self.filenames.append(pmap[P])
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
         f.close()
         mylog.info("Finished with binary hierarchy reading")
         return True
@@ -408,9 +408,9 @@
             procs.append(int(self.filenames[i][0][-4:]))
             levels.append(g.Level)
 
-        parents = na.array(parents, dtype='int64')
-        procs = na.array(procs, dtype='int64')
-        levels = na.array(levels, dtype='int64')
+        parents = np.array(parents, dtype='int64')
+        procs = np.array(procs, dtype='int64')
+        levels = np.array(levels, dtype='int64')
         f.create_dataset("/ParentIDs", data=parents)
         f.create_dataset("/Processor", data=procs)
         f.create_dataset("/Level", data=levels)
@@ -425,7 +425,7 @@
         mylog.info("Rebuilding grids on level %s", level)
         cmask = (self.grid_levels.flat == (level + 1))
         cmsum = cmask.sum()
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         for grid in self.select_grids(level):
             mask[:] = 0
             LE = self.grid_left_edge[grid.id - grid._id_offset]
@@ -477,20 +477,20 @@
 
     def _generate_random_grids(self):
         if self.num_grids > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(self.grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
             # We also add in a bit to make sure that some of the grids have
             # particles
             gwp = self.grid_particle_count > 0
-            if na.any(gwp) and not na.any(gwp[(random_sample,)]):
+            if np.any(gwp) and not np.any(gwp[(random_sample,)]):
                 # We just add one grid.  This is not terribly efficient.
-                first_grid = na.where(gwp)[0][0]
+                first_grid = np.where(gwp)[0][0]
                 random_sample.resize((21,))
                 random_sample[-1] = first_grid
                 mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(self.grids),1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
@@ -518,7 +518,7 @@
         pstore = []
         for level in range(self.max_level, -1, -1):
             for grid in self.select_grids(level):
-                index = na.where(grid['particle_type'] == ptype)[0]
+                index = np.where(grid['particle_type'] == ptype)[0]
                 total += len(index)
                 pstore.append(index)
                 if total >= max_num: break
@@ -527,7 +527,7 @@
         if total > 0:
             result = {}
             for p in pfields:
-                result[p] = na.zeros(total, 'float64')
+                result[p] = np.zeros(total, 'float64')
             # Now we retrieve data for each field
             ig = count = 0
             for level in range(self.max_level, -1, -1):
@@ -590,7 +590,7 @@
                 grids[pid-1]._children_ids.append(grids[-1].id)
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for i, grid in enumerate(grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -601,7 +601,7 @@
 
     def _initialize_grid_arrays(self):
         EnzoHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def _copy_hierarchy_structure(self):
         # Dimensions are important!
@@ -638,35 +638,35 @@
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(my_grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(my_grids)-1:20j].astype("int32")
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
         return my_grids[(random_sample,)]
 
 class EnzoHierarchy1D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions[:,:1] = ei
-        self.grid_dimensions[:,:1] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:1] = LE
         self.grid_right_edge[:,:1] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         self.grid_left_edge[:,1:] = 0.0
         self.grid_right_edge[:,1:] = 1.0
         self.grid_dimensions[:,1:] = 1
 
 class EnzoHierarchy2D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions[:,:2] = ei
-        self.grid_dimensions[:,:2] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:2] = LE
         self.grid_right_edge[:,:2] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         self.grid_left_edge[:,2] = 0.0
         self.grid_right_edge[:,2] = 1.0
         self.grid_dimensions[:,2] = 1
@@ -702,39 +702,22 @@
         StaticOutput.__init__(self, filename, data_style, file_style=file_style)
         if "InitialTime" not in self.parameters:
             self.current_time = 0.0
-        rp = os.path.join(self.directory, "rates.out")
-        if os.path.exists(rp):
-            try:
-                self.rates = EnzoTable(rp, rates_out_key)
-            except:
-                pass
-        cp = os.path.join(self.directory, "cool_rates.out")
-        if os.path.exists(cp):
-            try:
-                self.cool = EnzoTable(cp, cool_out_key)
-            except:
-                pass
-
-        # Now fixes for different types of Hierarchies
-        # This includes changing the fieldinfo class!
-        if self["TopGridRank"] == 1: self._setup_1d()
-        elif self["TopGridRank"] == 2: self._setup_2d()
 
     def _setup_1d(self):
         self._hierarchy_class = EnzoHierarchy1D
         self._fieldinfo_fallback = Enzo1DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
+            np.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
-            na.concatenate([[self.domain_right_edge], [1.0, 1.0]])
+            np.concatenate([[self.domain_right_edge], [1.0, 1.0]])
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
         self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([self["DomainLeftEdge"], [0.0]])
+            np.concatenate([self.domain_left_edge, [0.0]])
         self.domain_right_edge = \
-            na.concatenate([self["DomainRightEdge"], [1.0]])
+            np.concatenate([self.domain_right_edge, [1.0]])
 
     def get_parameter(self,parameter,type=None):
         """
@@ -827,7 +810,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
@@ -842,17 +825,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         self.current_time = self.parameters["InitialTime"]
         # To be enabled when we can break old pickles:
@@ -870,6 +853,11 @@
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
+        if self.dimensionality == 1:
+            self._setup_1d()
+        elif self.dimensionality == 2:
+            self._setup_2d()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -937,7 +925,7 @@
         with fortran code.
         """
         k = {}
-        k["utim"] = 2.52e17/na.sqrt(self.omega_matter)\
+        k["utim"] = 2.52e17/np.sqrt(self.omega_matter)\
                        / self.hubble_constant \
                        / (1+self.parameters["CosmologyInitialRedshift"])**1.5
         k["urho"] = 1.88e-29 * self.omega_matter \
@@ -949,8 +937,8 @@
                (1.0 + self.current_redshift)
         k["uaye"] = 1.0/(1.0 + self.parameters["CosmologyInitialRedshift"])
         k["uvel"] = 1.225e7*self.parameters["CosmologyComovingBoxSize"] \
-                      *na.sqrt(self.omega_matter) \
-                      *na.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
+                      *np.sqrt(self.omega_matter) \
+                      *np.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
         k["utem"] = 1.88e6 * (self.parameters["CosmologyComovingBoxSize"]**2) \
                       * self.omega_matter \
                       * (1.0 + self.parameters["CosmologyInitialRedshift"])
@@ -990,7 +978,7 @@
         self.conversion_factors.update(enzo.conversion_factors)
         for i in self.parameters:
             if isinstance(self.parameters[i], types.TupleType):
-                self.parameters[i] = na.array(self.parameters[i])
+                self.parameters[i] = np.array(self.parameters[i])
             if i.endswith("Units") and not i.startswith("Temperature"):
                 dataType = i[:-5]
                 self.conversion_factors[dataType] = self.parameters[i]
@@ -998,7 +986,7 @@
         self.domain_right_edge = self.parameters["DomainRightEdge"].copy()
         for i in self.conversion_factors:
             if isinstance(self.conversion_factors[i], types.TupleType):
-                self.conversion_factors[i] = na.array(self.conversion_factors[i])
+                self.conversion_factors[i] = np.array(self.conversion_factors[i])
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
         for p, v in self._conversion_override.items():


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
@@ -193,7 +193,7 @@
     # We can assume that we at least have Density
     # We should actually be guaranteeing the presence of a .shape attribute,
     # but I am not currently implementing that
-    fieldData = na.zeros(data["Density"].shape,
+    fieldData = np.zeros(data["Density"].shape,
                          dtype = data["Density"].dtype)
     if data.pf["MultiSpecies"] == 0:
         if data.has_field_parameter("mu"):
@@ -249,7 +249,7 @@
 KnownEnzoFields["z-velocity"].projection_conversion='1'
 
 def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
+    return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
     f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
@@ -324,39 +324,39 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
     else:
-        filter = na.ones(data.NumberOfParticles, dtype='bool')
+        filter = np.ones(data.NumberOfParticles, dtype='bool')
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
@@ -367,28 +367,28 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           particle_field_data.astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           top, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           particle_field_data.astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           top, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           data["particle_mass"].astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           bottom, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           data["particle_mass"].astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           bottom, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -406,30 +406,30 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
     particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          particle_field_data.astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          top, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          particle_field_data.astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          top, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          data["particle_mass"][filter].astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          bottom, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          data["particle_mass"][filter].astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          bottom, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -466,7 +466,7 @@
           projection_conversion="1")
 
 def _StarAge(field, data):
-    star_age = na.zeros(data['StarCreationTimeYears'].shape)
+    star_age = np.zeros(data['StarCreationTimeYears'].shape)
     with_stars = data['StarCreationTimeYears'] > 0
     star_age[with_stars] = data.pf.time_units['years'] * \
         data.pf.current_time - \
@@ -485,7 +485,7 @@
 def _Bmag(field, data):
     """ magnitude of bvec
     """
-    return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
+    return np.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
 add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
@@ -495,7 +495,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         try:
             return io._read_data_set(data, p_field).astype(dtype)
         except io._read_exception:
@@ -555,13 +555,13 @@
 def _convertParticleMass(data):
     return data.convert("Density")*(data.convert("cm")**3.0)
 def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
     return cf
 def _convertParticleMassMsun(data):
     return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
 def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
     return cf
 add_field("ParticleMass",
@@ -584,7 +584,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']
     return data["dx"]*data["dy"]
@@ -606,11 +606,10 @@
         Enzo2DFieldInfo["CellArea%s" % a]
 
 def _zvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_2d_field("z-velocity", function=_zvel)
 
-
 #
 # Now we do overrides for 1D fields
 #
@@ -638,7 +637,7 @@
         Enzo1DFieldInfo["CellLength%s" % a]
 
 def _yvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -25,7 +25,7 @@
 
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import glob
 import os
 
@@ -236,8 +236,8 @@
             else:
                 my_final_time = self.final_time
 
-            my_times = na.array(map(lambda a:a['time'], my_all_outputs))
-            my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+            my_times = np.array(map(lambda a:a['time'], my_all_outputs))
+            my_indices = np.digitize([my_initial_time, my_final_time], my_times)
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
@@ -294,7 +294,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         self.refine_by = self.parameters["RefineBy"]
         self.dimensionality = self.parameters["TopGridRank"]
@@ -303,17 +303,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         if self.parameters["ComovingCoordinates"]:
             cosmo_attr = {'box_size': 'CosmologyComovingBoxSize',
@@ -374,7 +374,7 @@
                     current_time * self.enzo_cosmology.TimeUnits)
 
             self.all_time_outputs.append(output)
-            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            if np.abs(self.final_time - current_time) / self.final_time < 1e-4: break
             current_time += self.parameters['dtDataDump']
             index += 1
 
@@ -476,8 +476,8 @@
         self.parameters['RedshiftDumpDir'] = "RD"
         self.parameters['ComovingCoordinates'] = 0
         self.parameters['TopGridRank'] = 3
-        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
-        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
         self.parameters['Refineby'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
@@ -491,7 +491,7 @@
 
         self.time_units = {}
         if self.cosmological_simulation:
-            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+            self.parameters['TimeUnits'] = 2.52e17 / np.sqrt(self.omega_matter) \
                 / self.hubble_constant / (1 + self.initial_redshift)**1.5
         self.time_units['1'] = 1.
         self.time_units['seconds'] = self.parameters['TimeUnits']
@@ -586,8 +586,8 @@
             outputs = self.all_outputs
         my_outputs = []
         for value in values:
-            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
-            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+            outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
+            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
                     and outputs[0] not in my_outputs:
                 my_outputs.append(outputs[0])
             else:
@@ -649,7 +649,7 @@
 
         """
 
-        times = na.array(times) / self.time_units[time_units]
+        times = np.array(times) / self.time_units[time_units]
         return self._get_outputs_by_key('time', times, tolerance=tolerance,
                                         outputs=outputs)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import stat
-import numpy as na
+import numpy as np
 import weakref
 
 from yt.funcs import *
@@ -42,7 +42,7 @@
 from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
-     ValidateDataField
+     ValidateDataField, TranslationFunc
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -70,7 +70,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -123,36 +123,39 @@
             self.grid_particle_count[:] = f["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
-        self._particle_indices = na.zeros(self.num_grids + 1, dtype='int64')
-        na.add.accumulate(self.grid_particle_count.squeeze(),
+        self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
+        np.add.accumulate(self.grid_particle_count.squeeze(),
                           out=self._particle_indices[1:])
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
         self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i+1, self, self.grid_levels[i,0])
         
 
         # This is a possibly slow and verbose fix, and should be re-examined!
-        rdx = (self.parameter_file.domain_right_edge -
-                self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+        rdx = (self.parameter_file.domain_width /
+                self.parameter_file.domain_dimensions)
         nlevels = self.grid_levels.max()
-        dxs = na.zeros((nlevels+1,3),dtype='float64')
+        dxs = np.ones((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
-            dxs[i] = rdx/self.parameter_file.refine_by**i
+            dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
+        if ND < 3:
+            dxs[:,ND:] = rdx[ND:]
+
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = na.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = na.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)
         
         offset = 7
-        ii = na.argsort(self.grid_levels.flat)
+        ii = np.argsort(self.grid_levels.flat)
         gid = self._handle["/gid"][:]
         first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
         for g in self.grids[ii].flat:
@@ -184,11 +187,16 @@
                 self.derived_field_list.append(field)
             if (field not in KnownFLASHFields and
                 field.startswith("particle")) :
-                self.parameter_file.field_info.add_field(field,
-                                                         function=NullFunc,
-                                                         take_log=False,
-                                                         validators = [ValidateDataField(field)],
-                                                         particle_type=True)
+                self.parameter_file.field_info.add_field(
+                        field, function=NullFunc, take_log=False,
+                        validators = [ValidateDataField(field)],
+                        particle_type=True)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
                 
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
@@ -203,6 +211,7 @@
                  storage_filename = None,
                  conversion_override = None):
 
+        if self._handle is not None: return
         self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
@@ -364,9 +373,9 @@
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
-        self.domain_left_edge = na.array(
+        self.domain_left_edge = np.array(
             [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
-        self.domain_right_edge = na.array(
+        self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         self.min_level = self.parameters.get("lrefine_min", 1) - 1
 
@@ -392,7 +401,7 @@
         nblockz = self.parameters["nblockz"]
         self.dimensionality = dimensionality
         self.domain_dimensions = \
-            na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
+            np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
         try:
             self.parameters["Gamma"] = self.parameters["gamma"]
         except:


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 
 from yt.utilities.io_handler import \
@@ -54,7 +54,7 @@
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:
-            if grid.NumberOfParticles == 0: return na.array([], dtype='float64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 from itertools import izip
 
 from yt.funcs import *
@@ -104,7 +104,7 @@
         
     def _parse_hierarchy(self):
         f = self._handle # shortcut
-        npa = na.array
+        npa = np.array
         DLE = self.parameter_file.domain_left_edge
         DRE = self.parameter_file.domain_right_edge
         DW = (DRE - DLE)
@@ -119,12 +119,12 @@
                                 + dxs *(1 + self.grid_dimensions)
         self.grid_particle_count.flat[:] = f['/grid_particle_count'][:].astype("int32")
         grid_parent_id = f['/grid_parent_id'][:]
-        self.max_level = na.max(self.grid_levels)
+        self.max_level = np.max(self.grid_levels)
         
         args = izip(xrange(self.num_grids), self.grid_levels.flat,
                     grid_parent_id, LI,
                     self.grid_dimensions, self.grid_particle_count.flat)
-        self.grids = na.empty(len(args), dtype='object')
+        self.grids = np.empty(len(args), dtype='object')
         for gi, (j,lvl,p, le, d, n) in enumerate(args):
             self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
         


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import \


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,9 +38,9 @@
             address = '/data/grid_%010i/particles/%s/%s' % (grid.id, ptype, field)
             data.append(fh[address][:])
         if len(data) > 0:
-            data = na.concatenate(data)
+            data = np.concatenate(data)
         fh.close()
-        return na.array(data)
+        return np.array(data)
     def _read_field_names(self,grid): 
         adr = grid.Address
         fh = h5py.File(grid.filename,mode='r')


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -28,7 +28,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -71,7 +71,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -108,11 +108,11 @@
     def _parse_hierarchy(self):
         f = self._fhandle
         dxs = []
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         levels = (f['grid_level'][:]).copy()
         glis = (f['grid_left_index'][:]).copy()
         gdims = (f['grid_dimensions'][:]).copy()
-        active_dims = ~((na.max(gdims, axis=0) == 1) &
+        active_dims = ~((np.max(gdims, axis=0) == 1) &
                         (self.parameter_file.domain_dimensions == 1))
 
         for i in range(levels.shape[0]):
@@ -125,7 +125,7 @@
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
-        dx = na.array(dxs)
+        dx = np.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
@@ -147,7 +147,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
     defaultdict
@@ -110,7 +110,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -170,9 +170,9 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self.__global_header_lines[counter].split()
         counter += 1
@@ -181,9 +181,9 @@
         counter += 1 # unused line in Maestro BoxLib
         
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
 
         counter += self.n_levels # unused line in Maestro BoxLib
         
@@ -259,8 +259,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -304,17 +304,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
         self.grid_levels = self.grid_levels.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -325,9 +325,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -354,10 +354,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -367,7 +367,7 @@
                 fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -381,11 +381,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -494,9 +494,9 @@
                 t = parameterTypes[paramName](val)
                 exec("self.%s = %s" % (paramName,t))
 
-        self.domain_dimensions = na.array([_n_cellx,_n_celly,_n_cellz])
-        self.domain_left_edge = na.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
-        self.domain_right_edge = na.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
+        self.domain_dimensions = np.array([_n_cellx,_n_celly,_n_cellz])
+        self.domain_left_edge = np.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
+        self.domain_right_edge = np.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
         
         self.cosmological_simulation = self.current_redshift = \
             self.omega_matter = self.omega_lambda = self.hubble_constant = 0


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/maestro/io.py
--- a/yt/frontends/maestro/io.py
+++ b/yt/frontends/maestro/io.py
@@ -28,7 +28,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -72,8 +72,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -113,7 +113,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -35,7 +35,7 @@
 from string import strip, rstrip
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import AMRGridPatch
@@ -108,7 +108,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
@@ -172,20 +172,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #na.array(map(int, self._global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         counter += 1
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self._global_header_lines[counter:counter + self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -269,8 +269,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                              level, gfn, gfo, dims, start, stop,
@@ -290,7 +290,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
         self.field_list += nyx_particle_field_names[:]
         header = open(os.path.join(self.parameter_file.path, "DM", "Header"))
@@ -304,7 +304,7 @@
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel + 1):dummy = header.readline()
 
-        grid_info = na.fromiter((int(i) for line in header.readlines()
+        grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
                                 count=3*self.num_grids).reshape((self.num_grids, 3))
@@ -341,15 +341,15 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.path
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
             g.particle_filename = os.path.join(basedir, "DM",
@@ -361,9 +361,9 @@
         self.grid_particle_count[:, 0] = self.pgrid_info[:, 1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids, 1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]]
                                    for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:, 0].reshape((self.num_grids, 1))
@@ -378,9 +378,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]  # why the same thing twice?
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -398,7 +398,7 @@
             grid._setup_dx()
 
     def __setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -415,10 +415,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -444,11 +444,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids, 3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids, 3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids, 3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids, 1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids, 1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids, 3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids, 3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids, 3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids, 1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids, 1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -464,7 +464,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -607,9 +607,9 @@
                         self.parameters[param_name] = vals
 
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals])
+                self.domain_right_edge = np.array([float(i) for i in vals])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals])
+                self.domain_left_edge = np.array([float(i) for i in vals])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals[0])
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -27,7 +27,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.lib import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
                               nyx_particle_field_names.index(field),
                               len(nyx_particle_field_names), tr)
@@ -68,7 +68,7 @@
         offset2 = int(nElements*bytesPerReal*field_index)
 
         dtype = grid.hierarchy._dtype
-        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        field = np.empty(nElements, dtype=grid.hierarchy._dtype)
         read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
         field = field.reshape(grid.ActiveDimensions, order='F')
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -107,7 +107,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -139,7 +139,7 @@
         simply add it to the if/elif/else block.
 
         """
-        self.grid_particle_count = na.zeros(len(self.grids))
+        self.grid_particle_count = np.zeros(len(self.grids))
 
         for particle_filename in ["StarParticles", "SinkParticles"]:
             fn = os.path.join(self.pf.fullplotdir, particle_filename)
@@ -160,18 +160,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
@@ -211,20 +211,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int,self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #np.array(map(int,self.__global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
         #domain_re.search(self.__global_header_lines[counter]).groups()
         counter += 1
         self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
         counter += self.n_levels
         self.geometry = int(self.__global_header_lines[counter])
         if self.geometry != 0:
@@ -302,8 +302,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -347,17 +347,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = na.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grid_levels = np.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -368,9 +368,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -399,10 +399,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _count_grids(self):
@@ -413,11 +413,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -551,14 +551,14 @@
                 
             elif param.startswith("geometry.prob_hi"):
                 self.domain_right_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
                 self.domain_left_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
-        self.domain_dimensions = na.array(self.parameters["TopGridDimensions"],dtype='int32')
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"],dtype='int32')
         self.refine_by = self.parameters["RefineBy"]
 
         if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.utilities.physical_constants import \
     mh, kboltz
@@ -146,7 +146,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -76,7 +76,7 @@
                     if ( (grid.LeftEdge < coord).all() and 
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)
 
     def _read_data_set(self,grid,field):
         """
@@ -109,8 +109,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -150,7 +150,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 
@@ -79,7 +79,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -93,10 +93,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -116,7 +116,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.tree_proxy = pf.ramses_tree
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -153,12 +153,12 @@
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         level_info = self.tree_proxy.count_zones()
         num_ogrids = sum(level_info)
-        ogrid_left_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_right_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_levels = na.zeros((num_ogrids,1), dtype='int32')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        ogrid_hilbert_indices = na.zeros(num_ogrids, dtype='uint64')
-        ochild_masks = na.zeros((num_ogrids, 8), dtype='int32')
+        ogrid_left_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_right_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_levels = np.zeros((num_ogrids,1), dtype='int32')
+        ogrid_file_locations = np.zeros((num_ogrids,6), dtype='int64')
+        ogrid_hilbert_indices = np.zeros(num_ogrids, dtype='uint64')
+        ochild_masks = np.zeros((num_ogrids, 8), dtype='int32')
         self.tree_proxy.fill_hierarchy_arrays(
             self.pf.domain_dimensions,
             ogrid_left_edge, ogrid_right_edge,
@@ -180,7 +180,7 @@
             if level_info[level] == 0: continue
             # Get the indices of grids on this level
             ggi = (ogrid_levels == level).ravel()
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2 
+            dims = np.ones((ggi.sum(), 3), dtype='int64') * 2 
             mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
             nd = self.pf.domain_dimensions * 2**level
             fl = ogrid_file_locations[ggi,:]
@@ -189,7 +189,7 @@
             # We want grids that cover no more than MAX_EDGE cells in every direction
             psgs = []
             # left_index is integers of the index, with respect to this level
-            left_index = na.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
+            left_index = np.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
             # we've got octs, so it's +2
             pbar = get_pbar("Re-gridding ", left_index.shape[0])
             dlp = [None, None, None]
@@ -203,18 +203,18 @@
             #print level, hilbert_indices.min(), hilbert_indices.max()
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.debug("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
             for ddleft_index, ddfl in zip(lefts, locs):
-                for idomain in na.unique(ddfl[:,0]):
+                for idomain in np.unique(ddfl[:,0]):
                     dom_ind = ddfl[:,0] == idomain
                     dleft_index = ddleft_index[dom_ind,:]
                     dfl = ddfl[dom_ind,:]
-                    initial_left = na.min(dleft_index, axis=0)
-                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                     psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
                                     dleft_index, dfl)
                     if psg.efficiency <= 0: continue
@@ -226,12 +226,12 @@
             pbar.finish()
             self.proto_grids.append(psgs)
             print sum(len(psg.grid_file_locations) for psg in psgs)
-            sums = na.zeros(3, dtype='int64')
+            sums = np.zeros(3, dtype='int64')
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
             #for g in self.proto_grids[level]:
             #    sums += [s.sum() for s in g.sigs]
-            #assert(na.all(sums == dims.prod(axis=1).sum()))
+            #assert(np.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
     def _parse_hierarchy(self):
@@ -251,11 +251,11 @@
                 grids.append(self.grid(gi, self, level, fl, props[0,:]))
                 gi += 1
         self.proto_grids = []
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
 
     def _populate_grid_objects(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         print self.grid_levels.dtype
         for gi,g in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[gi,:],
@@ -346,10 +346,10 @@
         rheader = self.ramses_tree.get_file_info()
         self.parameters.update(rheader)
         self.current_time = self.parameters['time'] * self.parameters['unit_t']
-        self.domain_right_edge = na.ones(3, dtype='float64') \
+        self.domain_right_edge = np.ones(3, dtype='float64') \
                                            * rheader['boxlen']
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_dimensions = na.ones(3, dtype='int32') * 2
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_dimensions = np.ones(3, dtype='int32') * 2
         # This is likely not true, but I am not sure how to otherwise
         # distinguish them.
         mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,7 +24,7 @@
 """
 
 from collections import defaultdict
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,8 +38,8 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
 
     def _read_data_set(self, grid, field):
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float64')
+        filled = np.zeros(grid.ActiveDimensions, dtype='int32')
         to_fill = grid.ActiveDimensions.prod()
         grids = [grid]
         l_delta = 0


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -24,7 +24,7 @@
 """
 
 import weakref
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -71,7 +73,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -152,7 +154,6 @@
             self.pf.field_info.add_field(
                     field, lambda a, b: None,
                     convert_function=cf, take_log=False)
-            
 
     def _parse_hierarchy(self):
         self.grid_dimensions = self.stream_handler.dimensions
@@ -180,7 +181,7 @@
             self._reconstruct_parent_child()
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids = np.empty(self.num_grids, dtype='object')
         for i, grid in enumerate(self.grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -191,7 +192,7 @@
         mylog.debug("Prepared")
 
     def _reconstruct_parent_child(self):
-        mask = na.empty(len(self.grids), dtype='int32')
+        mask = np.empty(len(self.grids), dtype='int32')
         mylog.debug("First pass; identifying child grids")
         for i, grid in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[i,:],
@@ -199,7 +200,7 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = na.where(mask.astype("bool"))
+            ids = np.where(mask.astype("bool"))
             grid._children_ids = ids[0] # where is a tuple
         mylog.debug("Second pass; identifying parents")
         for i, grid in enumerate(self.grids): # Second pass
@@ -208,7 +209,7 @@
 
     def _initialize_grid_arrays(self):
         AMRHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def save_data(self, *args, **kwargs):
         pass
@@ -224,7 +225,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -296,8 +297,8 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
-                      sim_time=0.0, number_of_particles=0):
+def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                      nprocs=1, sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -313,55 +314,66 @@
     ----------
     data : dict
         This is a dict of numpy arrays, where the keys are the field names.
-    domain_dimensiosn : array_like
+    domain_dimensions : array_like
         This is the domain dimensions of the grid
-    domain_size_in_cm : float
-        The size of the domain, in centimeters
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    nprocs: integer, optional
+        If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
     number_of_particles : int, optional
         If particle fields are included, set this to the number of particles
-        
+
     Examples
     --------
 
-    >>> arr = na.random.random((256, 256, 256))
+    >>> arr = np.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
-                
+    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
     """
+
+    domain_dimensions = np.array(domain_dimensions)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
     sfh = StreamDictFieldHandler()
-    sfh.update({0:data})
-    domain_dimensions = na.array(domain_dimensions)
-    if na.unique(domain_dimensions).size != 1:
-        print "We don't support variably sized domains yet."
-        raise RuntimeError
-    domain_left_edge = na.zeros(3, 'float64')
-    domain_right_edge = na.ones(3, 'float64')
-    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
-    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
 
-    grid_levels = na.array([0], dtype='int32').reshape((1,1))
-    grid_dimensions = grid_right_edges - grid_left_edges
-
-    grid_left_edges  = grid_left_edges.astype("float64")
-    grid_left_edges /= domain_dimensions*2**grid_levels
-    grid_left_edges *= domain_right_edge - domain_left_edge
-    grid_left_edges += domain_left_edge
-
-    grid_right_edges  = grid_right_edges.astype("float64")
-    grid_right_edges /= domain_dimensions*2**grid_levels
-    grid_right_edges *= domain_right_edge - domain_left_edge
-    grid_right_edges += domain_left_edge
+    if nprocs > 1:
+        temp = {}
+        new_data = {}
+        for key in data.keys():
+            psize = get_psize(np.array(data[key].shape), nprocs)
+            grid_left_edges, grid_right_edges, temp[key] = \
+                decompose_array(data[key], psize, bbox)
+            grid_dimensions = np.array([grid.shape for grid in temp[key]])
+        for gid in range(nprocs):
+            new_data[gid] = {}
+            for key in temp.keys():
+                new_data[gid].update({key:temp[key][gid]})
+        sfh.update(new_data)
+        del new_data, temp
+    else:
+        sfh.update({0:data})
+        grid_left_edges = domain_left_edge
+        grid_right_edges = domain_right_edge
+        grid_dimensions = domain_dimensions.reshape(nprocs,3)
 
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        na.array([-1], dtype='int64'),
-        number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
-        na.zeros(1).reshape((1,1)),
+        -np.ones(nprocs, dtype='int64'),
+        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
     )
 
@@ -375,10 +387,10 @@
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)
-    spf.units["cm"] = domain_size_in_cm
+    spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
-    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -44,15 +44,15 @@
         self.RightEdge = right_edge
         self.Level = 0
         self.NumberOfParticles = 0
-        self.left_dims = na.array(left_dims, dtype='int32')
-        self.right_dims = na.array(right_dims, dtype='int32')
+        self.left_dims = np.array(left_dims, dtype='int32')
+        self.right_dims = np.array(right_dims, dtype='int32')
         self.ActiveDimensions = self.right_dims - self.left_dims
         self.Parent = None
         self.Children = []
 
     @property
     def child_mask(self):
-        return na.ones(self.ActiveDimensions, dtype='int32')
+        return np.ones(self.ActiveDimensions, dtype='int32')
 
     def __repr__(self):
         return "TigerGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -70,7 +70,7 @@
         # Tiger is unigrid
         self.ngdims = [i/j for i,j in
                 izip(self.pf.root_size, self.pf.max_grid_size)]
-        self.num_grids = na.prod(self.ngdims)
+        self.num_grids = np.prod(self.ngdims)
         self.max_level = 0
 
     def _setup_classes(self):
@@ -87,18 +87,18 @@
         DW = DRE - DLE
         gds = DW / self.ngdims
         rd = [self.pf.root_size[i]-self.pf.max_grid_size[i] for i in range(3)]
-        glx, gly, glz = na.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
+        glx, gly, glz = np.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
                                  DLE[1]:DRE[1]-gds[1]:self.ngdims[1]*1j,
                                  DLE[2]:DRE[2]-gds[2]:self.ngdims[2]*1j]
-        gdx, gdy, gdz = na.mgrid[0:rd[0]:self.ngdims[0]*1j,
+        gdx, gdy, gdz = np.mgrid[0:rd[0]:self.ngdims[0]*1j,
                                  0:rd[1]:self.ngdims[1]*1j,
                                  0:rd[2]:self.ngdims[2]*1j]
         LE, RE, levels, counts = [], [], [], []
         i = 0
         for glei, gldi in izip(izip(glx.flat, gly.flat, glz.flat),
                                izip(gdx.flat, gdy.flat, gdz.flat)):
-            gld = na.array(gldi)
-            gle = na.array(glei)
+            gld = np.array(gldi)
+            gle = np.array(glei)
             gre = gle + gds
             g = self.grid(i, self, gle, gre, gld, gld+self.pf.max_grid_size)
             grids.append(g)
@@ -108,13 +108,13 @@
             levels.append(g.Level)
             counts.append(g.NumberOfParticles)
             i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-        self.grid_dimensions[:] = na.array(dims, dtype='int64')
-        self.grid_left_edge[:] = na.array(LE, dtype='float64')
-        self.grid_right_edge[:] = na.array(RE, dtype='float64')
-        self.grid_levels.flat[:] = na.array(levels, dtype='int32')
-        self.grid_particle_count.flat[:] = na.array(counts, dtype='int32')
+        self.grid_dimensions[:] = np.array(dims, dtype='int64')
+        self.grid_left_edge[:] = np.array(LE, dtype='float64')
+        self.grid_right_edge[:] = np.array(RE, dtype='float64')
+        self.grid_levels.flat[:] = np.array(levels, dtype='int32')
+        self.grid_particle_count.flat[:] = np.array(counts, dtype='int32')
 
     def _populate_grid_objects(self):
         # We don't need to do anything here
@@ -186,8 +186,8 @@
         self.parameters['RefineBy'] = 2
 
     def _set_units(self):
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_right_edge = na.ones(3, dtype='float64')
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_right_edge = np.ones(3, dtype='float64')
         self.units = {}
         self.time_units = {}
         self.time_units['1'] = 1


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/tiger/io.py
--- a/yt/frontends/tiger/io.py
+++ b/yt/frontends/tiger/io.py
@@ -36,17 +36,17 @@
 
     def _read_data_set(self, grid, field):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64')
-        SS = na.array(grid.ActiveDimensions, dtype='int64')
-        RS = na.array(grid.pf.root_size, dtype='int64')
+        LD = np.array(grid.left_dims, dtype='int64')
+        SS = np.array(grid.ActiveDimensions, dtype='int64')
+        RS = np.array(grid.pf.root_size, dtype='int64')
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
         return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64').copy()
-        SS = na.array(grid.ActiveDimensions, dtype='int64').copy()
-        RS = na.array(grid.pf.root_size, dtype='int64').copy()
+        LD = np.array(grid.left_dims, dtype='int64').copy()
+        SS = np.array(grid.ActiveDimensions, dtype='int64').copy()
+        RS = np.array(grid.pf.root_size, dtype='int64').copy()
         LD[axis] += coord
         SS[axis] = 1
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/opengl_widgets/mip_viewer.py
--- a/yt/gui/opengl_widgets/mip_viewer.py
+++ b/yt/gui/opengl_widgets/mip_viewer.py
@@ -31,7 +31,7 @@
 import OpenGL.GL.ARB.framebuffer_object as GL_fbo
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 from small_apps import ViewHandler3D, GenericGLUTScene
@@ -85,8 +85,8 @@
                     yield s[v][i]
 
     def _get_texture_vertices(self):
-        vs = [na.zeros(3, dtype='float32'),
-              na.ones(3, dtype='float32')]
+        vs = [np.zeros(3, dtype='float32'),
+              np.ones(3, dtype='float32')]
         #vs.reverse()
         for b in self.hv.bricks:
             shape = b.my_data[0].shape
@@ -126,7 +126,7 @@
 
         DW = self.hv.pf.domain_right_edge - self.hv.pf.domain_left_edge
         dds = ((brick.RightEdge - brick.LeftEdge) /
-               (na.array([ix,iy,iz], dtype='float32')-1)) / DW
+               (np.array([ix,iy,iz], dtype='float32')-1)) / DW
         BLE = brick.LeftEdge / DW - 0.5
         self._brick_textures.append(
             (id_field, (ix-1,iy-1,iz-1), dds, BLE))
@@ -135,7 +135,7 @@
 
     def _setup_colormap(self):
 
-        buffer = na.mgrid[0.0:1.0:256j]
+        buffer = np.mgrid[0.0:1.0:256j]
         colors = map_to_colors(buffer, "algae")
         
         GL.glActiveTexture(GL.GL_TEXTURE1)
@@ -165,17 +165,17 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(hv.bricks) * 6 * 4
-        self.v = na.fromiter(self._get_brick_vertices(offset),
+        self.v = np.fromiter(self._get_brick_vertices(offset),
                              dtype = 'float32', count = num * 3)
         self.vertices = vbo.VBO(self.v)
 
-        self.t = na.fromiter(self._get_texture_vertices(),
+        self.t = np.fromiter(self._get_texture_vertices(),
                              dtype = 'float32', count = num * 3)
         self.tvertices = vbo.VBO(self.t)
 
         self.ng = len(hv.bricks)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_bricks()
@@ -373,8 +373,8 @@
 
     def reset_view(self):   
         print "RESETTING"
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
     def translate(self, axis, value):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/opengl_widgets/small_apps.py
--- a/yt/gui/opengl_widgets/small_apps.py
+++ b/yt/gui/opengl_widgets/small_apps.py
@@ -30,7 +30,7 @@
 from OpenGL.arrays import vbo, ArrayDatatype
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 ESCAPE = '\033'
@@ -235,7 +235,7 @@
 
     @classmethod
     def from_image_file(cls, fn, tex_unit = GL.GL_TEXTURE0):
-        buffer = na.array(Image.open(fn))
+        buffer = np.array(Image.open(fn))
         print "Uploading buffer", buffer.min(), buffer.max(), buffer.shape, buffer.dtype
         obj = cls(tex_unit)
         obj.upload_image(buffer)
@@ -260,8 +260,8 @@
     @classmethod
     def from_image_files(cls, left_fn, right_fn, tex_unit = GL.GL_TEXTURE0):
         print "Uploading pairs from %s and %s" % (left_fn, right_fn)
-        left_buffer = na.array(Image.open(left_fn))
-        right_buffer = na.array(Image.open(right_fn))
+        left_buffer = np.array(Image.open(left_fn))
+        right_buffer = np.array(Image.open(right_fn))
         obj = cls(tex_unit)
         obj.left_image.upload_image(left_buffer)
         obj.right_image.upload_image(right_buffer)
@@ -294,7 +294,7 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
@@ -408,7 +408,7 @@
 
         GL.glActiveTexture(GL.GL_TEXTURE0)
         id_field = GL.glGenTextures(1)
-        upload = na.log10(grid["Density"].astype("float32")).copy()
+        upload = np.log10(grid["Density"].astype("float32")).copy()
         self.mi = min(upload.min(), self.mi)
         self.ma = max(upload.max(), self.ma)
         #upload = (255*(upload - -31.0) / (-25.0 - -31.0)).astype("uint8")
@@ -452,13 +452,13 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
         self.ng = len(pf.h.grids)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float')
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float')
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_grids()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -29,7 +29,7 @@
 import logging, threading
 import sys
 import urllib, urllib2
-import numpy as na
+import numpy as np
 
 from yt.utilities.bottle import \
     server_names, debug, route, run, request, ServerAdapter, response
@@ -134,7 +134,7 @@
         bp['binary'] = []
         for bkey in bkeys:
             bdata = bp.pop(bkey) # Get the binary data
-            if isinstance(bdata, na.ndarray):
+            if isinstance(bdata, np.ndarray):
                 bdata = bdata.tostring()
             bpserver = BinaryDelivery(bdata, bkey)
             self.binary_payloads.append(bpserver)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -30,7 +30,7 @@
 import cStringIO
 import logging
 import uuid
-import numpy as na
+import numpy as np
 import time
 import urllib
 import urllib2


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import os
-import numpy as na
+import numpy as np
 import zipfile
 import sys
 
@@ -92,9 +92,9 @@
                                     dd*DW[0] / (64*256),
                                     dd*DW[0])
         if self.pf.field_info[self.field].take_log:
-            cmi = na.log10(cmi)
-            cma = na.log10(cma)
-            to_plot = apply_colormap(na.log10(frb[self.field]), color_bounds = (cmi, cma))
+            cmi = np.log10(cmi)
+            cma = np.log10(cma)
+            to_plot = apply_colormap(np.log10(frb[self.field]), color_bounds = (cmi, cma))
         else:
             to_plot = apply_colormap(frb[self.field], color_bounds = (cmi, cma))
         rv = write_png_to_string(to_plot)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/pyro_queue.py
--- a/yt/gui/reason/pyro_queue.py
+++ b/yt/gui/reason/pyro_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/widget_builders.py
--- a/yt/gui/reason/widget_builders.py
+++ b/yt/gui/reason/widget_builders.py
@@ -35,7 +35,7 @@
         self._tf = tf
 
         self.center = self.pf.domain_center
-        self.normal_vector = na.array([0.7,1.0,0.3])
+        self.normal_vector = np.array([0.7,1.0,0.3])
         self.north_vector = [0.,0.,1.]
         self.steady_north = True
         self.fields = ['Density']
@@ -54,7 +54,7 @@
             roi = self.pf.h.region(self.center, self.center-self.width, self.center+self.width)
             self.mi, self.ma = roi.quantities['Extrema'](self.fields[0])[0]
             if self.log_fields[0]:
-                self.mi, self.ma = na.log10(self.mi), na.log10(self.ma)
+                self.mi, self.ma = np.log10(self.mi), np.log10(self.ma)
 
         self._tf = ColorTransferFunction((self.mi-2, self.ma+2), nbins=nbins)
 
@@ -87,10 +87,10 @@
     dd = pf.h.all_data()
     if value is None or rel_val:
         if value is None: value = 0.5
-        mi, ma = na.log10(dd.quantities["Extrema"]("Density")[0])
+        mi, ma = np.log10(dd.quantities["Extrema"]("Density")[0])
         value = 10.0**(value*(ma - mi) + mi)
     vert = dd.extract_isocontours("Density", value)
-    na.multiply(vert, 100, vert)
+    np.multiply(vert, 100, vert)
     return vert
 
 def get_streamlines(pf):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -70,7 +70,7 @@
         if onmax: 
             center = pf.h.find_max('Density')[1]
         else:
-            center = na.array(center)
+            center = np.array(center)
         axis = inv_axis_names[axis.lower()]
         coord = center[axis]
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
@@ -203,7 +203,7 @@
     def _pf_info(self):
         tr = {}
         for k, v in self.pf._mrep._attrs.items():
-            if isinstance(v, na.ndarray):
+            if isinstance(v, np.ndarray):
                 tr[k] = v.tolist()
             else:
                 tr[k] = v
@@ -237,9 +237,9 @@
     def deliver_isocontour(self, field, value, rel_val = False):
         ph = PayloadHandler()
         vert = get_isocontour(self.pf, field, value, rel_val)
-        normals = na.empty(vert.shape)
+        normals = np.empty(vert.shape)
         for i in xrange(vert.shape[0]/3):
-            n = na.cross(vert[i*3,:], vert[i*3+1,:])
+            n = np.cross(vert[i*3,:], vert[i*3+1,:])
             normals[i*3:i*3+3,:] = n[None,:]
         ph.widget_payload(self, {'ptype':'isocontour',
                                  'binary': ['vert', 'normals'],
@@ -260,20 +260,20 @@
         # Assume that path comes in as a list of matrice
         # Assume original vector is (0., 0., 1.), up is (0., 1., 0.)
         
-        views = [na.array(view).transpose() for view in views]
+        views = [np.array(view).transpose() for view in views]
 
-        times = na.linspace(0.0,1.0,len(times))
+        times = np.linspace(0.0,1.0,len(times))
                 
         # This is wrong.
-        reflect = na.array([[1,0,0],[0,1,0],[0,0,-1]])
+        reflect = np.array([[1,0,0],[0,1,0],[0,0,-1]])
 
-        rots = na.array([R[0:3,0:3] for R in views])
+        rots = np.array([R[0:3,0:3] for R in views])
 
-        rots = na.array([na.dot(reflect,rot) for rot in rots])
+        rots = np.array([np.dot(reflect,rot) for rot in rots])
 
-        centers = na.array([na.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
+        centers = np.array([np.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
 
-        ups = na.array([na.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
+        ups = np.array([np.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
 
         #print 'views'
         #for view in views: print view
@@ -284,12 +284,12 @@
         #print 'ups'
         #for up in ups: print up
 
-        pos = na.empty((N,3), dtype="float64")
-        uv = na.empty((N,3), dtype="float64")
-        f = na.zeros((N,3), dtype="float64")
+        pos = np.empty((N,3), dtype="float64")
+        uv = np.empty((N,3), dtype="float64")
+        f = np.zeros((N,3), dtype="float64")
         for i in range(3):
-            pos[:,i] = create_spline(times, centers[:,i], na.linspace(0.0,1.0,N))
-            uv[:,i] = create_spline(times, ups[:,i], na.linspace(0.0,1.0,N))
+            pos[:,i] = create_spline(times, centers[:,i], np.linspace(0.0,1.0,N))
+            uv[:,i] = create_spline(times, ups[:,i], np.linspace(0.0,1.0,N))
     
         path = [pos.tolist(), f.tolist(), uv.tolist()]
     


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -33,6 +33,7 @@
 # First module imports
 import sys, types, os, glob, cPickle, time
 import numpy as na # For historical reasons
+import numpy as np # For modern purposes
 import numpy # In case anyone wishes to use it by name
 
 # This next item will handle most of the actual startup procedures, but it will
@@ -52,7 +53,7 @@
 if __level >= int(ytcfgDefaults["loglevel"]):
     # This won't get displayed.
     mylog.debug("Turning off NumPy error reporting")
-    na.seterr(all = 'ignore')
+    np.seterr(all = 'ignore')
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -25,7 +25,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.visualization.image_writer import write_image, write_bitmap
@@ -61,7 +61,7 @@
 def _rchild_id(id): return (id<<1) + 2
 def _parent_id(id): return (id-1)>>1
 
-steps = na.array([[-1, -1, -1],
+steps = np.array([[-1, -1, -1],
                   [-1, -1,  0],
                   [-1, -1,  1],
                   [-1,  0, -1],
@@ -319,31 +319,31 @@
         if l_max is None:
             self.l_max = self.pf.hierarchy.max_level+1
         else:
-            self.l_max = na.min([l_max,self.pf.hierarchy.max_level+1])
+            self.l_max = np.min([l_max,self.pf.hierarchy.max_level+1])
 
         if le is None:
             self.domain_left_edge = pf.domain_left_edge
         else:
-            self.domain_left_edge = na.array(le)
+            self.domain_left_edge = np.array(le)
 
         if re is None:
             self.domain_right_edge = pf.domain_right_edge
         else:
-            self.domain_right_edge = na.array(re)
+            self.domain_right_edge = np.array(re)
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
 
         levels = pf.hierarchy.get_levels()
         root_grids = levels.next()
         covering_grids = root_grids
-        vol_needed = na.prod(self.domain_right_edge-self.domain_left_edge)
+        vol_needed = np.prod(self.domain_right_edge-self.domain_left_edge)
 
         for i in range(self.pf.hierarchy.max_level):
-            root_l_data = na.clip(na.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
-            root_r_data = na.clip(na.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_l_data = np.clip(np.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_r_data = np.clip(np.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
             
-            vol = na.prod(root_r_data-root_l_data,axis=1).sum()
+            vol = np.prod(root_r_data-root_l_data,axis=1).sum()
             if vol >= vol_needed:
                 covering_grids = root_grids
                 root_grids = levels.next()
@@ -356,18 +356,18 @@
         self.domain_left_edge = ((self.domain_left_edge)/rgdds).astype('int64')*rgdds
         self.domain_right_edge = (((self.domain_right_edge)/rgdds).astype('int64')+1)*rgdds
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
         
         self.my_l_corner = self.domain_left_edge
         self.my_r_corner = self.domain_right_edge
 
         #mylog.info('Making kd tree from le %s to %s'% (self.domain_left_edge, self.domain_right_edge))
         
-        root_l_data = na.array([grid.LeftEdge for grid in root_grids])
-        root_r_data = na.array([grid.RightEdge for grid in root_grids])
-        root_we_want = na.all(root_l_data < self.my_r_corner,axis=1)*\
-                       na.all(root_r_data > self.my_l_corner,axis=1)
+        root_l_data = np.array([grid.LeftEdge for grid in root_grids])
+        root_r_data = np.array([grid.RightEdge for grid in root_grids])
+        root_we_want = np.all(root_l_data < self.my_r_corner,axis=1)*\
+                       np.all(root_r_data > self.my_l_corner,axis=1)
         
         root_grids = root_grids[root_we_want]
 
@@ -550,7 +550,7 @@
         center cell (i,j,k) is ommitted.
         
         """
-        position = na.array(position)
+        position = np.array(position)
         grid = self.locate_brick(position).grid
         ci = ((position-grid.LeftEdge)/grid.dds).astype('int64')
         return self.locate_neighbors(grid,ci)
@@ -583,20 +583,20 @@
         center cell (i,j,k) is ommitted.
         
         """
-        ci = na.array(ci)
+        ci = np.array(ci)
         center_dds = grid.dds
-        position = grid.LeftEdge + (na.array(ci)+0.5)*grid.dds
-        grids = na.empty(26, dtype='object')
-        cis = na.empty([26,3], dtype='int64')
+        position = grid.LeftEdge + (np.array(ci)+0.5)*grid.dds
+        grids = np.empty(26, dtype='object')
+        cis = np.empty([26,3], dtype='int64')
         offs = 0.5*(center_dds + self.sdx)
 
         new_cis = ci + steps
-        in_grid = na.all((new_cis >=0)*
+        in_grid = np.all((new_cis >=0)*
                          (new_cis < grid.ActiveDimensions),axis=1)
         new_positions = position + steps*offs
         grids[in_grid] = grid
                 
-        get_them = na.argwhere(in_grid != True).ravel()
+        get_them = np.argwhere(in_grid != True).ravel()
         cis[in_grid] = new_cis[in_grid]
 
         if (in_grid != True).sum()>0:
@@ -668,7 +668,7 @@
                     dds = []
                     for i,field in enumerate(self.fields):
                         vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                        if self.log_fields[i]: vcd = na.log10(vcd)
+                        if self.log_fields[i]: vcd = np.log10(vcd)
                         dds.append(vcd)
                     current_saved_grids.append(current_node.grid)
                     current_vcds.append(dds)
@@ -677,7 +677,7 @@
                           current_node.li[1]:current_node.ri[1]+1,
                           current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
                 
-                if na.any(current_node.r_corner-current_node.l_corner == 0):
+                if np.any(current_node.r_corner-current_node.l_corner == 0):
                     current_node.brick = None
                 else:
                     current_node.brick = PartitionedGrid(current_node.grid.id, data,
@@ -686,8 +686,8 @@
                                                          current_node.dims.astype('int64'))
                 self.bricks.append(current_node.brick)
                 self.brick_dimensions.append(current_node.dims)
-        self.bricks = na.array(self.bricks)
-        self.brick_dimensions = na.array(self.brick_dimensions)
+        self.bricks = np.array(self.bricks)
+        self.brick_dimensions = np.array(self.brick_dimensions)
         del current_saved_grids, current_vcds
         self.bricks_loaded = True
 
@@ -701,7 +701,7 @@
             dds = []
             for i,field in enumerate(self.fields):
                 vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = na.log10(vcd)
+                if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(current_node.grid)
                 self.current_vcds.append(dds)
@@ -734,14 +734,14 @@
         dds = thisnode.grid.dds
         gle = thisnode.grid.LeftEdge
         gre = thisnode.grid.RightEdge
-        thisnode.li = na.rint((thisnode.l_corner-gle)/dds).astype('int32')
-        thisnode.ri = na.rint((thisnode.r_corner-gle)/dds).astype('int32')
+        thisnode.li = np.rint((thisnode.l_corner-gle)/dds).astype('int32')
+        thisnode.ri = np.rint((thisnode.r_corner-gle)/dds).astype('int32')
         thisnode.dims = (thisnode.ri - thisnode.li).astype('int32')
         # Here the cost is actually inversely proportional to 4**Level (empirical)
-        #thisnode.cost = (na.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
+        #thisnode.cost = (np.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
         thisnode.cost = 1.0
         # Here is the old way
-        # thisnode.cost = na.prod(thisnode.dims).astype('int64')
+        # thisnode.cost = np.prod(thisnode.dims).astype('int64')
 
     def initialize_leafs(self):
         for node in self.depth_traverse():
@@ -754,7 +754,7 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(self.comm.size))
+        par_tree_depth = long(np.log2(self.comm.size))
         for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
                 # There are self.comm.size nodes that meet this criteria
@@ -767,7 +767,7 @@
                 del node.grids
             except:
                 pass
-            if not na.isreal(node.grid):
+            if not np.isreal(node.grid):
                 node.grid = node.grid.id
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
@@ -942,7 +942,7 @@
         v = 0.0
         for node in self.depth_traverse():
             if node.grid is not None:
-                v += na.prod(node.r_corner - node.l_corner)
+                v += np.prod(node.r_corner - node.l_corner)
         return v
 
     def count_cells(self):
@@ -957,10 +957,10 @@
         Total volume of the tree.
         
         """
-        c = na.int64(0)
+        c = np.int64(0)
         for node in self.depth_traverse():
             if node.grid is not None:
-                c += na.prod(node.ri - node.li).astype('int64')
+                c += np.prod(node.ri - node.li).astype('int64')
         return c
 
     def _build(self, grids, parent, l_corner, r_corner):
@@ -994,12 +994,12 @@
         current_node.r_corner = r_corner
         # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(self.comm.size))
+        par_tree_depth = int(np.log2(self.comm.size))
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
         pbar = get_pbar("Building kd-Tree",
-                na.prod(self.domain_right_edge-self.domain_left_edge))
+                np.prod(self.domain_right_edge-self.domain_left_edge))
 
         while current_node is not None:
             pbar.update(volume_partitioned)
@@ -1034,12 +1034,12 @@
                     if len(thisgrid.Children) > 0 and thisgrid.Level < self.l_max:
                         # Get the children that are actually in the current volume
                         children = [child.id - self._id_offset for child in thisgrid.Children  
-                                    if na.all(child.LeftEdge < current_node.r_corner) & 
-                                    na.all(child.RightEdge > current_node.l_corner)]
+                                    if np.all(child.LeftEdge < current_node.r_corner) & 
+                                    np.all(child.RightEdge > current_node.l_corner)]
 
                         # If we have children, get all the new grids, and keep building the tree
                         if len(children) > 0:
-                            current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
+                            current_node.grids = self.pf.hierarchy.grids[np.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
                             #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
@@ -1048,7 +1048,7 @@
                     # Else make a leaf node (brick container)
                     #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    volume_partitioned += na.prod(current_node.r_corner-current_node.l_corner)
+                    volume_partitioned += np.prod(current_node.r_corner-current_node.l_corner)
                     # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
@@ -1078,7 +1078,7 @@
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1089,7 +1089,7 @@
         left and right children.
         '''
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
 
@@ -1106,8 +1106,8 @@
         current_node.split_pos = split
         #less_ids0 = (data[:,0] < split)
         #greater_ids0 = (split < data[:,1])
-        #assert(na.all(less_ids0 == less_ids))
-        #assert(na.all(greater_ids0 == greater_ids))
+        #assert(np.all(less_ids0 == less_ids))
+        #assert(np.all(greater_ids0 == greater_ids))
 
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
@@ -1143,7 +1143,7 @@
             Position of the back center from which to start moving forward.
         front_center: array_like
             Position of the front center to which the traversal progresses.
-        image: na.array
+        image: np.array
             Image plane to contain resulting ray cast.
 
         Returns
@@ -1176,12 +1176,12 @@
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(self.comm.size))
+        rounds = int(np.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+self.comm.rank)
+        path = np.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1215,7 +1215,7 @@
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = 1.0 - na.sum(self.image,axis=2)
+                    ta = 1.0 - np.sum(self.image,axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1237,8 +1237,8 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    #ta = na.exp(-na.sum(arr2,axis=2))
-                    ta = 1.0 - na.sum(arr2, axis=2)
+                    #ta = np.exp(-np.sum(arr2,axis=2))
+                    ta = 1.0 - np.sum(arr2, axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1292,8 +1292,8 @@
                     self.bricks.append(node.brick)
                     self.brick_dimensions.append(node.dims)
 
-            self.bricks = na.array(self.bricks)
-            self.brick_dimensions = na.array(self.brick_dimensions)
+            self.bricks = np.array(self.bricks)
+            self.brick_dimensions = np.array(self.brick_dimensions)
 
             self.bricks_loaded=True
             f.close()
@@ -1333,12 +1333,12 @@
         raise NotImplementedError()
         f = h5py.File(fn,"w")
         Nkd = len(self.tree)
-        kd_l_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_r_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_grids = na.zeros( (Nkd) )
-        kd_split_axs = na.zeros( (Nkd), dtype='int32')
-        kd_split_pos = na.zeros( (Nkd), dtype='float64')
-        kd_owners = na.zeros( (Nkd), dtype='int32')
+        kd_l_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_r_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_grids = np.zeros( (Nkd) )
+        kd_split_axs = np.zeros( (Nkd), dtype='int32')
+        kd_split_pos = np.zeros( (Nkd), dtype='float64')
+        kd_owners = np.zeros( (Nkd), dtype='int32')
         f.create_group("/bricks")
         for i, tree_item in enumerate(self.tree.iteritems()):
             kdid = tree_item[0]
@@ -1369,17 +1369,17 @@
         f.close()
         
     def corners_to_line(self,lc, rc):
-        x = na.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
+        x = np.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
                        rc[0], rc[0], rc[0], rc[0], rc[0],
                        rc[0], lc[0], lc[0], rc[0],
                        rc[0], lc[0], lc[0] ])
         
-        y = na.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
+        y = np.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1],
                        rc[1], rc[1], lc[1] ])
         
-        z = na.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
+        z = np.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
                        lc[2], rc[2], rc[2], lc[2], lc[2],
                        rc[2], rc[2], rc[2], rc[2],
                        lc[2], lc[2], lc[2] ])


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ b/yt/utilities/answer_testing/hydro_tests.py
@@ -99,11 +99,11 @@
     field = None
 
     def run(self):
-        na.random.seed(4333)
-        start_point = na.random.random(self.pf.dimensionality) * \
+        np.random.seed(4333)
+        start_point = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
-        end_point   = na.random.random(self.pf.dimensionality) * \
+        end_point   = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -55,10 +55,10 @@
 
 class ArrayDelta(ValueDelta):
     def __repr__(self):
-        nabove = len(na.where(self.delta > self.acceptable)[0])
+        nabove = len(np.where(self.delta > self.acceptable)[0])
         return "ArrayDelta: Delta max of %s, acceptable of %s.\n" \
                "%d of %d points above the acceptable limit" % \
-               (na.nanmax(self.delta), self.acceptable, nabove,
+               (np.nanmax(self.delta), self.acceptable, nabove,
                 self.delta.size)
 
 class ShapeMismatch(RegressionTestException):
@@ -122,8 +122,8 @@
         """
         if a1.shape != a2.shape:
             raise ShapeMismatch(a1, a2)
-        delta = na.abs(a1 - a2).astype("float64")/(a1 + a2)
-        if na.nanmax(delta) > acceptable:
+        delta = np.abs(a1 - a2).astype("float64")/(a1 + a2)
+        if np.nanmax(delta) > acceptable:
             raise ArrayDelta(delta, acceptable)
         return True
 
@@ -134,7 +134,7 @@
         difference is greater than `acceptable` it is considered a failure and
         an appropriate exception is raised.
         """
-        delta = na.abs(v1 - v2)/(v1 + v2)
+        delta = np.abs(v1 - v2)/(v1 + v2)
         if delta > acceptable:
             raise ValueDelta(delta, acceptable)
         return True


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ b/yt/utilities/answer_testing/particle_tests.py
@@ -32,13 +32,13 @@
         # Tests to make sure there are no particle positions aren't changing
         # drastically. This is very unlikely to be a problem.
         all = self.pf.h.all_data()
-        min = na.empty(3,dtype='float64')
+        min = np.empty(3,dtype='float64')
         max = min.copy()
         dims = ["particle_position_x","particle_position_y",
             "particle_position_z"]
         for i in xrange(3):
-            min[i] = na.min(all[dims[i]])
-            max[i] = na.max(all[dims[i]])
+            min[i] = np.min(all[dims[i]])
+            max[i] = np.max(all[dims[i]])
         self.result = (min,max)
     
     def compare(self, old_result):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -152,7 +152,7 @@
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
                    action="store", type=str,
-                   dest="unit", default='unitary',
+                   dest="unit", default='1',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
                    action="store", type=float,
@@ -1212,7 +1212,7 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
         if args.axis == 4:
             axes = range(3)
         else:
@@ -1266,12 +1266,12 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
 
         L = args.viewpoint
         if L is None:
             L = [1.]*3
-        L = na.array(args.viewpoint)
+        L = np.array(args.viewpoint)
 
         unit = args.unit
         if unit is None:
@@ -1302,7 +1302,7 @@
             roi = pf.h.region(center, center-width, center+width)
             mi, ma = roi.quantities['Extrema'](field)[0]
             if log:
-                mi, ma = na.log10(mi), na.log10(ma)
+                mi, ma = np.log10(mi), np.log10(ma)
         else:
             mi, ma = myrange[0], myrange[1]
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 c_kms = 2.99792458e5 # c in km/s
 G = 6.67259e-8 # cgs
@@ -49,40 +49,40 @@
 
     def ComovingTransverseDistance(self,z_i,z_f):
          if (self.OmegaCurvatureNow > 0):
-             return (self.HubbleDistance() / na.sqrt(self.OmegaCurvatureNow) * 
-                     na.sinh(na.sqrt(self.OmegaCurvatureNow) * 
+             return (self.HubbleDistance() / np.sqrt(self.OmegaCurvatureNow) * 
+                     np.sinh(np.sqrt(self.OmegaCurvatureNow) * 
                           self.ComovingRadialDistance(z_i,z_f) / 
                           self.HubbleDistance()))
          elif (self.OmegaCurvatureNow < 0):
-             return (self.HubbleDistance() / na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
-                     sin(na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
+             return (self.HubbleDistance() / np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
+                     sin(np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
                          self.ComovingRadialDistance(z_i,z_f) / self.HubbleDistance()))
          else:
              return self.ComovingRadialDistance(z_i,z_f)
 
     def ComovingVolume(self,z_i,z_f):
         if (self.OmegaCurvatureNow > 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      ana.sinh(na.fabs(self.OmegaCurvatureNow) * 
+                      anp.sinh(np.fabs(self.OmegaCurvatureNow) * 
                             self.ComovingTransverseDistance(z_i,z_f) / 
-                            self.HubbleDistance()) / na.sqrt(self.OmegaCurvatureNow)) / 1e9)
+                            self.HubbleDistance()) / np.sqrt(self.OmegaCurvatureNow)) / 1e9)
         elif (self.OmegaCurvatureNow < 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / 
-                     na.fabs(self.OmegaCurvatureNow) * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / 
+                     np.fabs(self.OmegaCurvatureNow) * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      asin(na.fabs(self.OmegaCurvatureNow) * 
+                      asin(np.fabs(self.OmegaCurvatureNow) * 
                            self.ComovingTransverseDistance(z_i,z_f) / 
                            self.HubbleDistance()) / 
-                      na.sqrt(na.fabs(self.OmegaCurvatureNow))) / 1e9)
+                      np.sqrt(np.fabs(self.OmegaCurvatureNow))) / 1e9)
         else:
-             return (4 * na.pi * na.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
+             return (4 * np.pi * np.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
                      3 / 1e9)
 
     def AngularDiameterDistance(self,z_i,z_f):
@@ -100,18 +100,18 @@
         return (romberg(self.AgeIntegrand,z,1000) / self.HubbleConstantNow * kmPerMpc)
 
     def AngularScale_1arcsec_kpc(self,z_i,z_f):
-        return (self.AngularDiameterDistance(z_i,z_f) / 648. * na.pi)
+        return (self.AngularDiameterDistance(z_i,z_f) / 648. * np.pi)
 
     def CriticalDensity(self,z):
-        return (3.0 / 8.0 / na.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
+        return (3.0 / 8.0 / np.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
                 (self.OmegaLambdaNow + ((1 + z)**3.0) * self.OmegaMatterNow))
 
     def AgeIntegrand(self,z):
         return (1 / (z + 1) / self.ExpansionFactor(z))
 
     def ExpansionFactor(self,z):
-        return na.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
-                    self.OmegaCurvatureNow * na.sqrt(1 + z) + 
+        return np.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
+                    self.OmegaCurvatureNow * np.sqrt(1 + z) + 
                     self.OmegaLambdaNow)
 
     def InverseExpansionFactor(self,z):
@@ -162,8 +162,8 @@
         """
         # Changed 2.52e17 to 2.52e19 because H_0 is in km/s/Mpc, 
         # instead of 100 km/s/Mpc.
-        return 2.52e19 / na.sqrt(self.OmegaMatterNow) / \
-            self.HubbleConstantNow / na.power(1 + self.InitialRedshift,1.5)
+        return 2.52e19 / np.sqrt(self.OmegaMatterNow) / \
+            self.HubbleConstantNow / np.power(1 + self.InitialRedshift,1.5)
 
     def ComputeRedshiftFromTime(self,time):
         """
@@ -183,18 +183,18 @@
  
         # 1) For a flat universe with OmegaMatterNow = 1, it's easy.
  
-        if ((na.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
+        if ((np.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            a = na.power(time/self.InitialTime,2.0/3.0)
+            a = np.power(time/self.InitialTime,2.0/3.0)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
         #    Actually, this is a little tricky since we must solve an equation
-        #    of the form eta - na.sinh(eta) + x = 0..
+        #    of the form eta - np.sinh(eta) + x = 0..
  
         if ((self.OmegaMatterNow < 1) and 
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            x = 2*TimeHubble0*na.power(1.0 - self.OmegaMatterNow, 1.5) / \
+            x = 2*TimeHubble0*np.power(1.0 - self.OmegaMatterNow, 1.5) / \
                 self.OmegaMatterNow;
  
             # Compute eta in a three step process, first from a third-order
@@ -203,12 +203,12 @@
             # eta.  This works well because parts 1 & 2 are an excellent approximation
             # when x is small and part 3 converges quickly when x is large. 
  
-            eta = na.power(6*x,1.0/3.0)                # part 1
-            eta = na.power(120*x/(20+eta*eta),1.0/3.0) # part 2
+            eta = np.power(6*x,1.0/3.0)                # part 1
+            eta = np.power(120*x/(20+eta*eta),1.0/3.0) # part 2
             for i in range(40):                      # part 3
                 eta_old = eta
-                eta = na.arcsinh(eta + x)
-                if (na.fabs(eta-eta_old) < ETA_TOLERANCE): 
+                eta = np.arcsinh(eta + x)
+                if (np.fabs(eta-eta_old) < ETA_TOLERANCE): 
                     break
                 if (i == 39):
                     print "No convergence after %d iterations." % i
@@ -216,7 +216,7 @@
             # Now use eta to compute the expansion factor (eq. 13-10, part 2).
  
             a = self.OmegaMatterNow/(2.0*(1.0 - self.OmegaMatterNow))*\
-                (na.cosh(eta) - 1.0)
+                (np.cosh(eta) - 1.0)
 
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
         #    Easy, but skip it for now.
@@ -228,10 +228,10 @@
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
+        if ((np.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow > OMEGA_TOLERANCE)):
-            a = na.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
-                na.power(na.sinh(1.5 * na.sqrt(1.0 - self.OmegaMatterNow)*\
+            a = np.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
+                np.power(np.sinh(1.5 * np.sqrt(1.0 - self.OmegaMatterNow)*\
                                      TimeHubble0),2.0/3.0)
 
 
@@ -249,29 +249,29 @@
         # 1) For a flat universe with OmegaMatterNow = 1, things are easy.
  
         if ((self.OmegaMatterNow == 1.0) and (self.OmegaLambdaNow == 0.0)):
-            TimeHubble0 = 2.0/3.0/na.power(1+z,1.5)
+            TimeHubble0 = 2.0/3.0/np.power(1+z,1.5)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
  
         if ((self.OmegaMatterNow < 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (na.sinh(eta) - eta)
+            eta = np.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (np.sinh(eta) - eta)
  
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
  
         if ((self.OmegaMatterNow > 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (eta - na.sin(eta))
+            eta = np.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (eta - np.sin(eta))
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
-            TimeHubble0 = 2.0/3.0/na.sqrt(1-self.OmegaMatterNow)*\
-                na.arcsinh(na.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
-                               na.power(1+z,1.5))
+        if ((np.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
+            TimeHubble0 = 2.0/3.0/np.sqrt(1-self.OmegaMatterNow)*\
+                np.arcsinh(np.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
+                               np.power(1+z,1.5))
   
         # Now convert from Time * H0 to time.
   


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/decompose.py
--- /dev/null
+++ b/yt/utilities/decompose.py
@@ -0,0 +1,156 @@
+"""
+Automagical cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Author: Artur Gawryszczak <gawrysz at gmail.com>
+Affiliation: PCSS
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+SIEVE_PRIMES = \
+    lambda l: l and l[:1] + SIEVE_PRIMES([n for n in l if n % l[0]])
+
+
+def decompose_to_primes(max_prime):
+    """ Decompose number into the primes """
+    for prime in SIEVE_PRIMES(range(2, max_prime)):
+        if prime * prime > max_prime:
+            break
+        while max_prime % prime == 0:
+            yield prime
+            max_prime /= prime
+    if max_prime > 1:
+        yield max_prime
+
+
+def decompose_array(arr, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = arr.shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    patches = split_array(arr, psize)
+    return grid_left_edges, grid_right_edges, patches
+
+
+def evaluate_domain_decomposition(n_d, pieces, ldom):
+    """ Evaluate longest to shortest edge ratio
+        BEWARE: lot's of magic here """
+    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
+    bsize = int(np.sum(
+        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    load_balance = float(np.product(n_d)) / \
+        (float(pieces) * np.product((n_d - 1) / ldom + 1))
+
+    # 0.25 is magic number
+    quality = load_balance / (1 + 0.25 * (bsize / ideal_bsize - 1.0))
+    # \todo add a factor that estimates lower cost when x-direction is
+    # not chopped too much
+    # \deprecated estimate these magic numbers
+    quality *= (1. - (0.001 * ldom[0] + 0.0001 * ldom[1]) / pieces)
+    if np.any(ldom > n_d):
+        quality = 0
+
+    return quality
+
+
+def factorize_number(pieces):
+    """ Return array consiting of prime, its power and number of different
+        decompositions in three dimensions for this prime
+    """
+    factors = [factor for factor in decompose_to_primes(pieces)]
+    temp = np.bincount(factors)
+    return np.array(
+        [(prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) / 2)
+         for prime in np.unique(factors)]
+    )
+
+
+def get_psize(n_d, pieces):
+    """ Calculate the best division of array into px*py*pz subarrays.
+        The goal is to minimize the ratio of longest to shortest edge
+        to minimize the amount of inter-process communication.
+    """
+    fac = factorize_number(pieces)
+    nfactors = len(fac[:, 2])
+    best = 0.0
+    while np.all(fac[:, 2] > 0):
+        ldom = np.ones(3, dtype=np.int)
+        for nfac in range(nfactors):
+            i = int(np.sqrt(0.25 + 2 * (fac[nfac, 2] - 1)) - 0.5)
+            k = fac[nfac, 2] - int(1 + i * (i + 1) / 2)
+            i = fac[nfac, 1] - i
+            j = fac[nfac, 1] - (i + k)
+            ldom *= fac[nfac, 0] ** np.array([i, j, k])
+
+        quality = evaluate_domain_decomposition(n_d, pieces, ldom)
+        if quality > best:
+            best = quality
+            p_size = ldom
+        # search for next unique combination
+        for j in range(nfactors):
+            if fac[j, 2] > 1:
+                fac[j, 2] -= 1
+                break
+            else:
+                if (j < nfactors - 1):
+                    fac[j, 2] = int((fac[j, 1] + 1) * (fac[j, 1] + 2) / 2)
+                else:
+                    fac[:, 2] = 0  # no more combinations to try
+
+    return p_size
+
+
+def split_array(tab, psize):
+    """ Split array into px*py*pz subarrays using internal numpy routine. """
+    temp = [np.array_split(array, psize[1], axis=1)
+            for array in np.array_split(tab, psize[2], axis=2)]
+    temp = [item for sublist in temp for item in sublist]
+    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
+    temp = [item for sublist in temp for item in sublist]
+    return temp
+
+
+if __name__ == "__main__":
+
+    NPROC = 12
+    ARRAY = np.zeros((128, 128, 129))
+    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
+    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
+
+    for idx in range(NPROC):
+        print LE[idx, :], RE[idx, :], DATA[idx].shape


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -1,6 +1,6 @@
 import os
 import weakref
-import numpy as na
+import numpy as np
 import h5py as h5
 from conversion_abc import *
 from glob import glob
@@ -55,11 +55,11 @@
             grid['domain'] = int(splitup[8].rstrip(','))
             self.current_time = grid['time']
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -94,12 +94,12 @@
         proc_names = glob(self.source_dir+'id*')
         #print 'Reading a dataset from %i Processor Files' % len(proc_names)
         N = len(proc_names)
-        grid_dims = na.empty([N,3],dtype='int64')
-        grid_left_edges = na.empty([N,3],dtype='float64')
-        grid_dds = na.empty([N,3],dtype='float64')
-        grid_levels = na.zeros(N,dtype='int64')
-        grid_parent_ids = -1*na.ones(N,dtype='int64')
-        grid_particle_counts = na.zeros([N,1],dtype='int64')
+        grid_dims = np.empty([N,3],dtype='int64')
+        grid_left_edges = np.empty([N,3],dtype='float64')
+        grid_dds = np.empty([N,3],dtype='float64')
+        grid_levels = np.zeros(N,dtype='int64')
+        grid_parent_ids = -1*np.ones(N,dtype='int64')
+        grid_particle_counts = np.zeros([N,1],dtype='int64')
 
         for i in range(N):
             if i == 0:
@@ -128,12 +128,12 @@
 
             if len(line) == 0: break
             
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
                 grid['dimensions'][grid['dimensions']==0]=1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             # Append all hierachy info before reading this grid's data
@@ -149,7 +149,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -159,8 +159,8 @@
 
         gles = grid_left_edges
         gdims = grid_dims
-        dle = na.min(gles,axis=0)
-        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        dle = np.min(gles,axis=0)
+        dre = np.max(gles+grid_dims*grid_dds,axis=0)
         glis = ((gles - dle)/grid_dds).astype('int64')
         gris = glis + gdims
 
@@ -183,17 +183,17 @@
 
         ## --------- Done with top level nodes --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = ddims
         pars_g.attrs['current_time'] = self.current_time
         pars_g.attrs['domain_left_edge'] = dle
         pars_g.attrs['domain_right_edge'] = dre
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(1)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(1)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         # pars_g.attrs['n_cells'] = grid['ncells']
@@ -224,18 +224,18 @@
                 splitup = line.strip().split()
 
                 if "DIMENSIONS" in splitup:
-                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    grid_dims = np.array(splitup[-3:]).astype('int')
                     line = f.readline()
                     continue
                 elif "CELL_DATA" in splitup:
                     grid_ncells = int(splitup[-1])
                     line = f.readline()
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         grid_dims -= 1
                         grid_dims[grid_dims==0]=1
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         print 'product of dimensions %i not equal to number of cells %i' % \
-                              (na.prod(grid_dims), grid_ncells)
+                              (np.prod(grid_dims), grid_ncells)
                         raise TypeError
                     break
                 else:
@@ -250,7 +250,7 @@
                     if not read_table:
                         line = f.readline() # Read the lookup table line
                         read_table = True
-                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
                     if i == 0:
                         self.fields.append(field)
                     # print 'writing field %s' % field
@@ -259,7 +259,7 @@
 
                 elif 'VECTORS' in splitup:
                     field = splitup[1]
-                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
                     data_x = data[0::3].reshape(grid_dims,order='F')
                     data_y = data[1::3].reshape(grid_dims,order='F')
                     data_z = data[2::3].reshape(grid_dims,order='F')
@@ -291,7 +291,7 @@
             if name in self.field_conversions.keys():
                 this_field.attrs['field_to_cgs'] = self.field_conversions[name]
             else:
-                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+                this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
             
 
     def convert(self, hierarchy=True, data=True):
@@ -327,11 +327,11 @@
         elif "Really" in splitup:
             grid['time'] = splitup[-1]
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -365,19 +365,19 @@
             #    print line
 
             if len(line) == 0: break
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             if grid['read_type'] is 'scalar':
                 grid[grid['read_field']] = \
-                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                    np.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
                 self.fields.append(grid['read_field'])
             elif grid['read_type'] is 'vector':
-                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                data = np.fromfile(f, dtype='>f4', count=3*grid['ncells'])
                 grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
@@ -398,7 +398,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -406,8 +406,8 @@
         pars_g = f.create_group('simulation_parameters')
 
         dle = grid['left_edge'] # True only in this case of one grid for the domain
-        gles = na.array([grid['left_edge']])
-        gdims = na.array([grid['dimensions']])
+        gles = np.array([grid['left_edge']])
+        gdims = np.array([grid['dimensions']])
         glis = ((gles - dle)/grid['dds']).astype('int64')
         gris = glis + gdims
 
@@ -416,18 +416,18 @@
         # grid_dimensions
         gdim = f.create_dataset('grid_dimensions',data=gdims)
 
-        levels = na.array([0]).astype('int64') # unigrid example
+        levels = np.array([0]).astype('int64') # unigrid example
         # grid_level
         level = f.create_dataset('grid_level',data=levels)
 
         ## ----------QUESTIONABLE NEXT LINE--------- ##
         # This data needs two dimensions for now. 
-        n_particles = na.array([[0]]).astype('int64')
+        n_particles = np.array([[0]]).astype('int64')
         #grid_particle_count
         part_count = f.create_dataset('grid_particle_count',data=n_particles)
 
         # Assume -1 means no parent.
-        parent_ids = na.array([-1]).astype('int64')
+        parent_ids = np.array([-1]).astype('int64')
         # grid_parent_id
         pids = f.create_dataset('grid_parent_id',data=parent_ids)
 
@@ -451,8 +451,8 @@
 
         ## --------- Attribute Tables --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = grid['dimensions']
         try:
             pars_g.attrs['current_time'] = grid['time']
@@ -461,10 +461,10 @@
         pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
         pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(0)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(0)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         pars_g.attrs['n_cells'] = grid['ncells']
@@ -481,7 +481,7 @@
         if name in self.field_conversions.keys():
             this_field.attrs['field_to_cgs'] = self.field_conversions[name]
         else:
-            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
 
         # Add particle types
         # Nothing to do here


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -83,11 +83,11 @@
     g.attrs["unique_identifier"] = pf.unique_identifier
     g.attrs["cosmological_simulation"] = pf.cosmological_simulation
     # @todo: Where is this in the yt API?
-    #g.attrs["num_ghost_zones"] = pf...
+    g.attrs["num_ghost_zones"] = 0
     # @todo: Where is this in the yt API?
-    #g.attrs["field_ordering"] = pf...
+    g.attrs["field_ordering"] = 0
     # @todo: not yet supported by yt.
-    #g.attrs["boundary_conditions"] = pf...
+    g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')
 
     if pf.cosmological_simulation:
         g.attrs["current_redshift"] = pf.current_redshift
@@ -136,10 +136,12 @@
     # root datasets -- info about the grids
     ###
     f["grid_dimensions"] = pf.h.grid_dimensions
-    f["grid_left_index"] = pf.h.grid_left_edge
+    f["grid_left_index"] = np.array(
+            [g.get_global_startindex() for g in pf.h.grids]
+    ).reshape(pf.h.grid_dimensions.shape[0], 3)
     f["grid_level"] = pf.h.grid_levels
-    # @todo: Do we need to loop over the grids for this?
-    f["grid_parent_id"] = -1
+    # @todo: Fill with proper values
+    f["grid_parent_id"] = -np.ones(pf.h.grid_dimensions.shape[0])
     f["grid_particle_count"] = pf.h.grid_particle_count
 
     ###


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.lib as lib
@@ -35,23 +35,23 @@
         self.truncate = truncate
         x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
         return my_vals.reshape(orig_shape)
 
@@ -61,28 +61,28 @@
         self.truncate = truncate
         x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
         y_vals = data_object[self.y_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        y_i = (na.digitize(y_vals, self.y_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        y_i = (np.digitize(y_vals, self.y_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.BilinearlyInterpolate(self.table,
                                  x_vals, y_vals, self.x_bins, self.y_bins,
                                  x_i, y_i, my_vals)
@@ -94,9 +94,9 @@
         self.truncate = truncate
         x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = na.linspace(z0, z1, table.shape[2]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -104,23 +104,23 @@
         y_vals = data_object[self.y_name].ravel().astype('float64')
         z_vals = data_object[self.z_name].ravel().astype('float64')
 
-        x_i = na.digitize(x_vals, self.x_bins) - 1
-        y_i = na.digitize(y_vals, self.y_bins) - 1
-        z_i = na.digitize(z_vals, self.z_bins) - 1
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
-            or na.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
+        x_i = np.digitize(x_vals, self.x_bins) - 1
+        y_i = np.digitize(y_vals, self.y_bins) - 1
+        z_i = np.digitize(z_vals, self.z_bins) - 1
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
+            or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
-                z_i = na.minimum(na.maximum(z_i,0), len(self.z_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
+                z_i = np.minimum(np.maximum(z_i,0), len(self.z_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.TrilinearlyInterpolate(self.table,
                                  x_vals, y_vals, z_vals,
                                  self.x_bins, self.y_bins, self.z_bins,
@@ -135,11 +135,11 @@
         xm = (self.x_bins[x_i+1] - x_vals) / (self.x_bins[x_i+1] - self.x_bins[x_i])
         ym = (self.y_bins[y_i+1] - y_vals) / (self.y_bins[y_i+1] - self.y_bins[y_i])
         zm = (self.z_bins[z_i+1] - z_vals) / (self.z_bins[z_i+1] - self.z_bins[z_i])
-        if na.any(na.isnan(self.table)):
+        if np.any(np.isnan(self.table)):
             raise ValueError
-        if na.any(na.isnan(x) | na.isnan(y) | na.isnan(z)):
+        if np.any(np.isnan(x) | np.isnan(y) | np.isnan(z)):
             raise ValueError
-        if na.any(na.isnan(xm) | na.isnan(ym) | na.isnan(zm)):
+        if np.any(np.isnan(xm) | np.isnan(ym) | np.isnan(zm)):
             raise ValueError
         my_vals  = self.table[x_i  ,y_i  ,z_i  ] * (xm*ym*zm)
         my_vals += self.table[x_i+1,y_i  ,z_i  ] * (x *ym*zm)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math
 
 def periodic_dist(a, b, period):
@@ -48,20 +48,20 @@
 
     Examples
     --------
-    >>> a = na.array([0.1, 0.1, 0.1])
-    >>> b = na.array([0.9, 0,9, 0.9])
+    >>> a = np.array([0.1, 0.1, 0.1])
+    >>> b = np.array([0.9, 0,9, 0.9])
     >>> period = 1.
     >>> dist = periodic_dist(a, b, 1.)
     >>> dist
     0.3464102
     """
-    a = na.array(a)
-    b = na.array(b)
+    a = np.array(a)
+    b = np.array(b)
     if a.size != b.size: RunTimeError("Arrays must be the same shape.")
-    c = na.empty((2, a.size), dtype="float64")
+    c = np.empty((2, a.size), dtype="float64")
     c[0,:] = abs(a - b)
     c[1,:] = period - abs(a - b)
-    d = na.amin(c, axis=0)**2
+    d = np.amin(c, axis=0)**2
     return math.sqrt(d.sum())
 
 def rotate_vector_3D(a, dim, angle):
@@ -87,8 +87,8 @@
     
     Examples
     --------
-    >>> a = na.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
-    >>> b = rotate_vector_3D(a, 2, na.pi/2)
+    >>> a = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
+    >>> b = rotate_vector_3D(a, 2, np.pi/2)
     >>> print b
     [[  1.00000000e+00  -1.00000000e+00   0.00000000e+00]
     [  6.12323400e-17  -1.00000000e+00   1.00000000e+00]
@@ -100,27 +100,27 @@
     mod = False
     if len(a.shape) == 1:
         mod = True
-        a = na.array([a])
+        a = np.array([a])
     if a.shape[1] !=3:
         raise SyntaxError("The second dimension of the array a must be == 3!")
     if dim == 0:
-        R = na.array([[1, 0,0],
-            [0, na.cos(angle), na.sin(angle)],
-            [0, -na.sin(angle), na.cos(angle)]])
+        R = np.array([[1, 0,0],
+            [0, np.cos(angle), np.sin(angle)],
+            [0, -np.sin(angle), np.cos(angle)]])
     elif dim == 1:
-        R = na.array([[na.cos(angle), 0, -na.sin(angle)],
+        R = np.array([[np.cos(angle), 0, -np.sin(angle)],
             [0, 1, 0],
-            [na.sin(angle), 0, na.cos(angle)]])
+            [np.sin(angle), 0, np.cos(angle)]])
     elif dim == 2:
-        R = na.array([[na.cos(angle), na.sin(angle), 0],
-            [-na.sin(angle), na.cos(angle), 0],
+        R = np.array([[np.cos(angle), np.sin(angle), 0],
+            [-np.sin(angle), np.cos(angle), 0],
             [0, 0, 1]])
     else:
         raise SyntaxError("dim must be 0, 1, or 2!")
     if mod:
-        return na.dot(R, a.T).T[0]
+        return np.dot(R, a.T).T[0]
     else:
-        return na.dot(R, a.T).T
+        return np.dot(R, a.T).T
     
 
 def modify_reference_frame(CoM, L, P, V):
@@ -164,9 +164,9 @@
     
     Examples
     --------
-    >>> CoM = na.array([0.5, 0.5, 0.5])
-    >>> L = na.array([1, 0, 0])
-    >>> P = na.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
+    >>> CoM = np.array([0.5, 0.5, 0.5])
+    >>> L = np.array([1, 0, 0])
+    >>> P = np.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
     >>> V = p.copy()
     >>> LL, PP, VV = modify_reference_frame(CoM, L, P, V)
     >>> LL
@@ -183,7 +183,7 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
-    if (L == na.array([0, 0, 1.])).all():
+    if (L == np.array([0, 0, 1.])).all():
         # Whew! Nothing to do!
         return L, P, V
     # First translate the positions to center of mass reference frame.
@@ -191,7 +191,7 @@
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
     LL[2] = 0.
-    theta = na.arccos(na.inner(LL, [1.,0,0])/na.inner(LL,LL)**.5)
+    theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
     if L[1] < 0:
         theta = -theta
     # Now rotate all the position, velocity, and L vectors by this much around
@@ -200,7 +200,7 @@
     V = rotate_vector_3D(V, 2, theta)
     L = rotate_vector_3D(L, 2, theta)
     # Now find the angle between L and the z-axis.
-    theta = na.arccos(na.inner(L, [0,0,1])/na.inner(L,L)**.5)
+    theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
     # This time we rotate around the y axis.
     P = rotate_vector_3D(P, 1, theta)
     V = rotate_vector_3D(V, 1, theta)
@@ -241,10 +241,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> circV = compute_rotational_velocity(CoM, L, P, V)
     >>> circV
     array([ 1.        ,  0.        ,  0.        ,  1.41421356])
@@ -254,13 +254,13 @@
     L, P, V = modify_reference_frame(CoM, L, P, V)
     # Find the vector in the plane of the galaxy for each position point
     # that is perpendicular to the radial vector.
-    radperp = na.cross([0, 0, 1], P)
+    radperp = np.cross([0, 0, 1], P)
     # Find the component of the velocity along the radperp vector.
     # Unf., I don't think there's a better way to do this.
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rp in enumerate(radperp):
-        temp = na.dot(rp, V[i]) / na.dot(rp, rp) * rp
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rp, V[i]) / np.dot(rp, rp) * rp
+        res[i] = np.dot(temp, temp)**0.5
     return res
     
 def compute_parallel_velocity(CoM, L, P, V):
@@ -296,10 +296,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> paraV = compute_parallel_velocity(CoM, L, P, V)
     >>> paraV
     array([10, -1,  1, -1])
@@ -342,10 +342,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> radV = compute_radial_velocity(CoM, L, P, V)
     >>> radV
     array([ 1.        ,  1.41421356 ,  0.        ,  0.])
@@ -357,10 +357,10 @@
     # with the cylindrical radial vector for this point.
     # Unf., I don't think there's a better way to do this.
     P[:,2] = 0
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rad in enumerate(P):
-        temp = na.dot(rad, V[i]) / na.dot(rad, rad) * rad
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rad, V[i]) / np.dot(rad, rad) * rad
+        res[i] = np.dot(temp, temp)**0.5
     return res
 
 def compute_cylindrical_radius(CoM, L, P, V):
@@ -396,10 +396,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> cyl_r = compute_cylindrical_radius(CoM, L, P, V)
     >>> cyl_r
     array([ 1.        ,  1.41421356,  0.        ,  1.41421356])
@@ -409,7 +409,7 @@
     # Demote all the positions to the z=0 plane, which makes the distance
     # calculation very easy.
     P[:,2] = 0
-    return na.sqrt((P * P).sum(axis=1))
+    return np.sqrt((P * P).sum(axis=1))
     
 def ortho_find(vec1):
     r"""Find two complementary orthonormal vectors to a given vector.
@@ -489,9 +489,9 @@
     >>> c
     array([-0.16903085,  0.84515425, -0.50709255])
     """
-    vec1 = na.array(vec1, dtype=na.float64)
+    vec1 = np.array(vec1, dtype=np.float64)
     # Normalize
-    norm = na.sqrt(na.vdot(vec1, vec1))
+    norm = np.sqrt(np.vdot(vec1, vec1))
     if norm == 0:
         raise ValueError("Zero vector used as input.")
     vec1 /= norm
@@ -513,9 +513,9 @@
         z2 = 0.0
         x2 = -(y1 / x1)
         norm2 = (1.0 + z2 ** 2.0) ** (0.5)
-    vec2 = na.array([x2,y2,z2])
+    vec2 = np.array([x2,y2,z2])
     vec2 /= norm2
-    vec3 = na.cross(vec1, vec2)
+    vec3 = np.cross(vec1, vec2)
     return vec1, vec2, vec3
 
 def quartiles(a, axis=None, out=None, overwrite_input=False):
@@ -570,7 +570,7 @@
 
     Examples
     --------
-    >>> a = na.arange(100).reshape(10,10)
+    >>> a = np.arange(100).reshape(10,10)
     >>> a
     array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
            [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
@@ -601,7 +601,7 @@
             a.sort(axis=axis)
             sorted = a
     else:
-        sorted = na.sort(a, axis=axis)
+        sorted = np.sort(a, axis=axis)
     if axis is None:
         axis = 0
     indexer = [slice(None)] * sorted.ndim
@@ -619,8 +619,8 @@
             indexer[axis] = slice(index, index+1)
         # Use mean in odd and even case to coerce data type
         # and check, use out array.
-        result.append(na.mean(sorted[indexer], axis=axis, out=out))
-    return na.array(result)
+        result.append(np.mean(sorted[indexer], axis=axis, out=out))
+    return np.array(result)
 
 def get_rotation_matrix(theta, rot_vector):
     """
@@ -656,20 +656,20 @@
     array([[ 0.70710678,  0.        ,  0.70710678],
            [ 0.        ,  1.        ,  0.        ],
            [-0.70710678,  0.        ,  0.70710678]])
-    >>> na.dot(rot,a)
+    >>> np.dot(rot,a)
     array([ 0.,  1.,  0.])
     # since a is an eigenvector by construction
-    >>> na.dot(rot,[1,0,0])
+    >>> np.dot(rot,[1,0,0])
     array([ 0.70710678,  0.        , -0.70710678])
     """
 
     ux = rot_vector[0]
     uy = rot_vector[1]
     uz = rot_vector[2]
-    cost = na.cos(theta)
-    sint = na.sin(theta)
+    cost = np.cos(theta)
+    sint = np.sin(theta)
     
-    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+    R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
                   [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import abc
 import json
 import urllib2
@@ -97,10 +97,10 @@
         if hasattr(self, "_pf_mrep"):
             self._pf_mrep.upload()
         for i in metadata:
-            if isinstance(metadata[i], na.ndarray):
+            if isinstance(metadata[i], np.ndarray):
                 metadata[i] = metadata[i].tolist()
             elif hasattr(metadata[i], 'dtype'):
-                metadata[i] = na.asscalar(metadata[i])
+                metadata[i] = np.asscalar(metadata[i])
         metadata['obj_type'] = self.type
         if len(chunks) == 0:
             chunk_info = {'chunks': []}
@@ -129,7 +129,7 @@
         for i, (cn, cv) in enumerate(chunks):
             remaining = cv.size * cv.itemsize
             f = TemporaryFile()
-            na.save(f, cv)
+            np.save(f, cv)
             f.seek(0)
             pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i+1, len(chunks)))
             datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import get_rotation_matrix
@@ -52,31 +52,31 @@
            
         """
         self.steady_north = steady_north
-        if na.all(north_vector == normal_vector):
+        if np.all(north_vector == normal_vector):
             mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
             north_vector = None
         if north_vector is not None: self.steady_north = True
+        self.north_vector = north_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        normal_vector /= np.sqrt( np.dot(normal_vector, normal_vector))
         if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
+            vecs = np.identity(3)
+            t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
-            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            north_vector = na.cross(normal_vector, east_vector).ravel()
+            east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-            east_vector = na.cross(north_vector, normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+                north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
+            east_vector = np.cross(north_vector, normal_vector).ravel()
+        north_vector /= np.sqrt(np.dot(north_vector, north_vector))
+        east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
-        self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        self.inv_mat = np.linalg.pinv(self.unit_vectors)
         
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -73,7 +73,7 @@
             for g in self.grids:
                 for f in fields:
                     if f not in self.queue[g.id]:
-                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        d = np.zeros(g.ActiveDimensions, dtype='float64')
                         self.queue[g.id][f] = d
                 for f in pfields:
                     self.queue[g.id][f] = self._read(g, f)
@@ -87,12 +87,12 @@
         fi = self.pf.field_info[f]
         if fi.particle_type and g.NumberOfParticles == 0:
             # because this gets upcast to float
-            return na.array([],dtype='float64')
+            return np.array([],dtype='float64')
         try:
             temp = self.pf.h.io._read_data_set(g, f)
         except:# self.pf.hierarchy.io._read_exception as exc:
             if fi.not_in_all:
-                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+                temp = np.zeros(g.ActiveDimensions, dtype='float64')
             else:
                 raise
         return temp
@@ -137,9 +137,9 @@
         msg = dict(grid_id = grid.id, field = field, op="read")
         mylog.debug("Requesting %s for %s from %s", field, grid, dest)
         if self.pf.field_info[field].particle_type:
-            data = na.empty(grid.NumberOfParticles, 'float64')
+            data = np.empty(grid.NumberOfParticles, 'float64')
         else:
-            data = na.empty(grid.ActiveDimensions, 'float64')
+            data = np.empty(grid.ActiveDimensions, 'float64')
         hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
         self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
         mylog.debug("Waiting for data.")


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -27,7 +27,7 @@
 import cStringIO
 import itertools
 import logging
-import numpy as na
+import numpy as np
 import sys
 
 from yt.funcs import *
@@ -131,13 +131,13 @@
         # Note that we're doing this in advance, and with a simple means
         # of choosing them; more advanced methods will be explored later.
         if self._use_all:
-            self.my_obj_ids = na.arange(len(self._objs))
+            self.my_obj_ids = np.arange(len(self._objs))
         else:
             if not round_robin:
-                self.my_obj_ids = na.array_split(
-                                na.arange(len(self._objs)), self._skip)[self._offset]
+                self.my_obj_ids = np.array_split(
+                                np.arange(len(self._objs)), self._skip)[self._offset]
             else:
-                self.my_obj_ids = na.arange(len(self._objs))[self._offset::self._skip]
+                self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
         
     def __iter__(self):
         for gid in self.my_obj_ids:
@@ -421,14 +421,14 @@
             njobs, my_size)
         raise RuntimeError
     my_rank = my_communicator.rank
-    all_new_comms = na.array_split(na.arange(my_size), njobs)
+    all_new_comms = np.array_split(np.arange(my_size), njobs)
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
             break
     if parallel_capable:
         communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
-    obj_ids = na.arange(len(objects))
+    obj_ids = np.arange(len(objects))
 
     to_share = {}
     # If our objects object is slice-aware, like time series data objects are,
@@ -525,14 +525,14 @@
         #   cat
         #   join
         # data is selected to be of types:
-        #   na.ndarray
+        #   np.ndarray
         #   dict
         #   data field dict
         if datatype is not None:
             pass
         elif isinstance(data, types.DictType):
             datatype == "dict"
-        elif isinstance(data, na.ndarray):
+        elif isinstance(data, np.ndarray):
             datatype == "array"
         elif isinstance(data, types.ListType):
             datatype == "list"
@@ -549,14 +549,14 @@
             field_keys = data.keys()
             field_keys.sort()
             size = data[field_keys[0]].shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
@@ -581,16 +581,16 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = na.zeros(0, dtype=dtype) # This only works for
+                    data = np.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
@@ -608,7 +608,7 @@
     def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
-        if isinstance(data, na.ndarray) and \
+        if isinstance(data, np.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
             if self.comm.rank == root:
                 info = (data.shape, data.dtype)
@@ -616,7 +616,7 @@
                 info = ()
             info = self.comm.bcast(info, root=root)
             if self.comm.rank != root:
-                data = na.empty(info[0], dtype=info[1])
+                data = np.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
             self.comm.Bcast([data, mpi_type], root = root)
             return data
@@ -636,7 +636,7 @@
     @parallel_passthrough
     def mpi_allreduce(self, data, dtype=None, op='sum'):
         op = op_names[op]
-        if isinstance(data, na.ndarray) and data.dtype != na.bool:
+        if isinstance(data, np.ndarray) and data.dtype != np.bool:
             if dtype is None:
                 dtype = data.dtype
             if dtype != data.dtype:
@@ -743,7 +743,7 @@
         return (obj._owner == self.comm.rank)
 
     def send_quadtree(self, target, buf, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
         self.comm.Send([sizebuf, MPI.LONG], dest=target)
         self.comm.Send([buf[0], MPI.INT], dest=target)
@@ -751,11 +751,11 @@
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
     def recv_quadtree(self, target, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
-        buf = [na.empty((sizebuf[0],), 'int32'),
-               na.empty((sizebuf[0], args[2]),'float64'),
-               na.empty((sizebuf[0],),'float64')]
+        buf = [np.empty((sizebuf[0],), 'int32'),
+               np.empty((sizebuf[0], args[2]),'float64'),
+               np.empty((sizebuf[0],),'float64')]
         self.comm.Recv([buf[0], MPI.INT], source=target)
         self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
         self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
@@ -775,8 +775,8 @@
         sys.exit()
 
         args = qt.get_args() # Will always be the same
-        tgd = na.array([args[0], args[1]], dtype='int64')
-        sizebuf = na.zeros(1, 'int64')
+        tgd = np.array([args[0], args[1]], dtype='int64')
+        sizebuf = np.zeros(1, 'int64')
 
         while mask < size:
             if (mask & rank) != 0:
@@ -802,9 +802,9 @@
             sizebuf[0] = buf[0].size
         self.comm.Bcast([sizebuf, MPI.LONG], root=0)
         if rank != 0:
-            buf = [na.empty((sizebuf[0],), 'int32'),
-                   na.empty((sizebuf[0], args[2]),'float64'),
-                   na.empty((sizebuf[0],),'float64')]
+            buf = [np.empty((sizebuf[0],), 'int32'),
+                   np.empty((sizebuf[0], args[2]),'float64'),
+                   np.empty((sizebuf[0],),'float64')]
         self.comm.Bcast([buf[0], MPI.INT], root=0)
         self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
         self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
@@ -816,7 +816,7 @@
 
 
     def send_array(self, arr, dest, tag = 0):
-        if not isinstance(arr, na.ndarray):
+        if not isinstance(arr, np.ndarray):
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)
             return
@@ -830,7 +830,7 @@
         dt, ne = self.comm.recv(source=source, tag=tag)
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)
-        arr = na.empty(ne, dtype=dt)
+        arr = np.empty(ne, dtype=dt)
         tmp = arr.view(self.__tocast)
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
@@ -841,11 +841,11 @@
             for i in range(send.shape[0]):
                 recv.append(self.alltoallv_array(send[i,:].copy(), 
                                                  total_size, offsets, sizes))
-            recv = na.array(recv)
+            recv = np.array(recv)
             return recv
         offset = offsets[self.comm.rank]
         tmp_send = send.view(self.__tocast)
-        recv = na.empty(total_size, dtype=send.dtype)
+        recv = np.empty(total_size, dtype=send.dtype)
         recv[offset:offset+send.size] = send[:]
         dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
         roff = [off * dtr for off in offsets]
@@ -867,7 +867,7 @@
 
 communication_system = CommunicationSystem()
 if parallel_capable:
-    ranks = na.arange(MPI.COMM_WORLD.size)
+    ranks = np.arange(MPI.COMM_WORLD.size)
     communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
@@ -926,13 +926,13 @@
         xax, yax = x_dict[axis], y_dict[axis]
         cc = MPI.Compute_dims(self.comm.size, 2)
         mi = self.comm.rank
-        cx, cy = na.unravel_index(mi, cc)
-        x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
+        cx, cy = np.unravel_index(mi, cc)
+        x = np.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
 
         DLE, DRE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        LE = na.ones(3, dtype='float64') * DLE
-        RE = na.ones(3, dtype='float64') * DRE
+        LE = np.ones(3, dtype='float64') * DLE
+        RE = np.ones(3, dtype='float64') * DRE
         LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
         RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
         LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
@@ -943,7 +943,7 @@
         return True, reg
 
     def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
-        LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
+        LE, RE = np.array(ds.left_edge), np.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
         if (LE == self.pf.domain_left_edge).all() and \
@@ -973,13 +973,13 @@
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \
@@ -1000,13 +1000,13 @@
         
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import time, threading, random
 
 from yt.funcs import *
@@ -142,8 +142,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
@@ -170,8 +170,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -21,7 +21,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 
 import matplotlib
 import matplotlib.colors as cc
@@ -83,14 +83,14 @@
 matplotlib.rc('image', cmap="algae")
 
 # This next colormap was designed by Tune Kamae and converted here by Matt
-_vs = na.linspace(0,1,255)
-_kamae_red = na.minimum(255,
-                113.9*na.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
+_vs = np.linspace(0,1,255)
+_kamae_red = np.minimum(255,
+                113.9*np.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
               + 3587.9*_vs+2563.4)/255.0
-_kamae_grn = na.minimum(255,
-                70.0*na.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
-_kamae_blu = na.minimum(255,
-                194.5*_vs**2.88+99.72*na.exp(-77.24*(_vs-0.742)**2.0)
+_kamae_grn = np.minimum(255,
+                70.0*np.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
+_kamae_blu = np.minimum(255,
+                194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
 cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
@@ -121,15 +121,15 @@
 _h_cubehelix = 1.0
 
 _cubehelix_data = {
-        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
 }
 
 add_cmap("cubehelix", _cubehelix_data)
 
 # Add colormaps in _colormap_data.py that weren't defined here
-_vs = na.linspace(0,1,255)
+_vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps:
         cdict = { 'red': zip(_vs,v[0],v[0]),
@@ -143,5 +143,5 @@
     r = cmap._lut[:-3, 0]
     g = cmap._lut[:-3, 1]
     b = cmap._lut[:-3, 2]
-    a = na.ones(b.shape)
+    a = np.ones(b.shape)
     return [r, g, b, a]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import pyx
-import numpy as na
+import numpy as np
 from matplotlib import cm
 from _mpl_imports import FigureCanvasAgg
 
@@ -243,7 +243,7 @@
             if xdata == None:
                 self.canvas.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 self.canvas.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
         else:
             plot = pyx.graph.graphxy \
@@ -253,7 +253,7 @@
             if xdata == None:
                 plot.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 plot.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
             self.canvas.insert(plot)
         self.axes_drawn = True
@@ -495,7 +495,7 @@
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
         # Convert the colormap into a string
-        x = na.linspace(1,0,256)
+        x = np.linspace(1,0,256)
         cm_string = cm.cmap_d[name](x, bytes=True)[:,0:3].tostring()
 
         cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,7 +29,7 @@
     y_dict, \
     axis_names
 import _MPL
-import numpy as na
+import numpy as np
 import weakref
 
 class FixedResolutionBuffer(object):
@@ -352,7 +352,7 @@
         """
         import numdisplay
         numdisplay.open()
-        if take_log: data=na.log10(self[field])
+        if take_log: data=np.log10(self[field])
         else: data=self[field]
         numdisplay.display(data)    
 
@@ -374,7 +374,7 @@
     """
     def __getitem__(self, item):
         if item in self.data: return self.data[item]
-        indices = na.argsort(self.data_source['dx'])[::-1]
+        indices = np.argsort(self.data_source['dx'])[::-1]
         buff = _MPL.CPixelize( self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
                                self.data_source['px'],  self.data_source['py'],
                                self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -21,7 +21,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import types, os
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer, ObliqueFixedResolutionBuffer
@@ -163,7 +163,7 @@
         """
         self.xlim = (low[0], high[0])
         self.ylim = (low[1], high[1])
-        return na.log10(self.buffer)
+        return np.log10(self.buffer)
 
     def set_width(self, width):
         """
@@ -283,7 +283,7 @@
 
     def __call__(self, val):
         self.pylab.clf()
-        self.pylab.imshow(na.log10(val), interpolation='nearest')
+        self.pylab.imshow(np.log10(val), interpolation='nearest')
         self.pylab.savefig("wimage_%03i.png" % self.tile_id)
 
 class TransportAppender(object):
@@ -297,13 +297,13 @@
     def __call__(self, val):
         from yt.utilities.lib import write_png_to_string
         from yt.visualization.image_writer import map_to_colors
-        image = na.log10(val)
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        image = np.log10(val)
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
         image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
         to_plot = map_to_colors(image, "algae")
-        to_plot = na.clip(to_plot, 0, 255)
+        to_plot = np.clip(to_plot, 0, 255)
         s = write_png_to_string(to_plot)
         response_body = "data:image/png;base64," + base64.encodestring(s)
         tf.close()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -23,7 +23,7 @@
 import types
 import imp
 import os
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import _colormap_data as cmd
@@ -44,7 +44,7 @@
 
         >>> image = scale_image(image, min=0, max=1000)
     """
-    if isinstance(image, na.ndarray) and image.dtype == na.uint8:
+    if isinstance(image, np.ndarray) and image.dtype == np.uint8:
         return image
     if isinstance(image, (types.TupleType, types.ListType)):
         image, mi, ma = image
@@ -52,7 +52,7 @@
         mi = image.min()
     if ma is None:
         ma = image.max()
-    image = (na.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
+    image = (np.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
     return image
 
 def multi_image_composite(fn, red_channel, blue_channel,
@@ -97,22 +97,22 @@
     Examples
     --------
 
-        >>> red_channel = na.log10(frb["Temperature"])
-        >>> blue_channel = na.log10(frb["Density"])
+        >>> red_channel = np.log10(frb["Temperature"])
+        >>> blue_channel = np.log10(frb["Density"])
         >>> multi_image_composite("multi_channel1.png", red_channel, blue_channel)
 
     """
     red_channel = scale_image(red_channel)
     blue_channel = scale_image(blue_channel)
     if green_channel is None:
-        green_channel = na.zeros(red_channel.shape, dtype='uint8')
+        green_channel = np.zeros(red_channel.shape, dtype='uint8')
     else:
         green_channel = scale_image(green_channel)
     if alpha_channel is None:
-        alpha_channel = na.zeros(red_channel.shape, dtype='uint8') + 255
+        alpha_channel = np.zeros(red_channel.shape, dtype='uint8') + 255
     else:
         alpha_channel = scale_image(alpha_channel) 
-    image = na.array([red_channel, green_channel, blue_channel, alpha_channel])
+    image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
@@ -141,16 +141,16 @@
         The upper limit to clip values to in the output, if converting to uint8.
         If `bitmap_array` is already uint8, this will be ignore.
     """
-    if bitmap_array.dtype != na.uint8:
+    if bitmap_array.dtype != np.uint8:
         if max_val is None: max_val = bitmap_array.max()
-        bitmap_array = na.clip(bitmap_array / max_val, 0.0, 1.0) * 255
+        bitmap_array = np.clip(bitmap_array / max_val, 0.0, 1.0) * 255
         bitmap_array = bitmap_array.astype("uint8")
     if len(bitmap_array.shape) != 3 or bitmap_array.shape[-1] not in (3,4):
         raise RuntimeError
     if bitmap_array.shape[-1] == 3:
         s1, s2 = bitmap_array.shape[:2]
-        alpha_channel = 255*na.ones((s1,s2,1), dtype='uint8')
-        bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
+        alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
+        bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
         for channel in range(bitmap_array.shape[2]):
             bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
@@ -229,14 +229,14 @@
     """
     image = func(image)
     if color_bounds is None:
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
     else:
         color_bounds = [func(c) for c in color_bounds]
     image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
     to_plot = map_to_colors(image, cmap_name)
-    to_plot = na.clip(to_plot, 0, 255)
+    to_plot = np.clip(to_plot, 0, 255)
     return to_plot
 
 def annotate_image(image, text, xpos, ypos, font_name = "Vera",
@@ -279,7 +279,7 @@
     >>> annotate_image(bitmap, "Hello!", 0, 100)
     >>> write_bitmap(bitmap, "saved.png")
     """
-    if len(image.shape) != 3 or image.dtype != na.uint8:
+    if len(image.shape) != 3 or image.dtype != np.uint8:
         raise RuntimeError("This routine requires a UINT8 bitmapped image.")
     font_path = os.path.join(imp.find_module("matplotlib")[1],
                              "mpl-data/fonts/ttf/",
@@ -295,10 +295,10 @@
         print "Your color map was not found in the extracted colormap file."
         raise KeyError(cmap_name)
     lut = cmd.color_map_luts[cmap_name]
-    x = na.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+    x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
     shape = buff.shape
-    mapped = na.dstack(
-            [(na.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
+    mapped = np.dstack(
+            [(np.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
     return mapped.copy("C")
 
 def strip_colormap_data(fn = "color_map_data.py",


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -26,7 +26,7 @@
 from matplotlib import figure
 import shutil
 import tempfile
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -71,7 +71,7 @@
 
     def add_image(self, fn, descr):
         self.image_metadata.append(descr)
-        self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+        self.images.append((os.path.basename(fn), np.fromfile(fn, dtype='c')))
 
 class PlotCollection(object):
     __id_counter = 0
@@ -122,7 +122,7 @@
         elif center == "center" or center == "c":
             self.c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         else:
-            self.c = na.array(center, dtype='float64')
+            self.c = np.array(center, dtype='float64')
         mylog.info("Created plot collection with default plot-center = %s",
                     list(self.c))
 
@@ -1884,7 +1884,7 @@
         norm = matplotlib.colors.Normalize()
     ax = pylab.figure().gca()
     ax.autoscale(False)
-    axi = ax.imshow(na.random.random((npix, npix)),
+    axi = ax.imshow(np.random.random((npix, npix)),
                     extent = extent, norm = norm,
                     origin = 'lower')
     cb = pylab.colorbar(axi, norm = norm)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -52,25 +52,25 @@
     def convert_to_plot(self, plot, coord, offset = True):
         # coord should be a 2 x ncoord array-like datatype.
         try:
-            ncoord = na.array(coord).shape[1]
+            ncoord = np.array(coord).shape[1]
         except IndexError:
             ncoord = 1
 
         # Convert the data and plot limits to tiled numpy arrays so that
         # convert_to_plot is automatically vectorized.
 
-        x0 = na.tile(plot.xlim[0],ncoord)
-        x1 = na.tile(plot.xlim[1],ncoord)
-        xx0 = na.tile(plot._axes.get_xlim()[0],ncoord)
-        xx1 = na.tile(plot._axes.get_xlim()[1],ncoord)
+        x0 = np.tile(plot.xlim[0],ncoord)
+        x1 = np.tile(plot.xlim[1],ncoord)
+        xx0 = np.tile(plot._axes.get_xlim()[0],ncoord)
+        xx1 = np.tile(plot._axes.get_xlim()[1],ncoord)
         
-        y0 = na.tile(plot.ylim[0],ncoord)
-        y1 = na.tile(plot.ylim[1],ncoord)
-        yy0 = na.tile(plot._axes.get_ylim()[0],ncoord)
-        yy1 = na.tile(plot._axes.get_ylim()[1],ncoord)
+        y0 = np.tile(plot.ylim[0],ncoord)
+        y1 = np.tile(plot.ylim[1],ncoord)
+        yy0 = np.tile(plot._axes.get_ylim()[0],ncoord)
+        yy1 = np.tile(plot._axes.get_ylim()[1],ncoord)
         
         # We need a special case for when we are only given one coordinate.
-        if na.array(coord).shape == (2,):
+        if np.array(coord).shape == (2,):
             return ((coord[0]-x0)/(x1-x0)*(xx1-xx0) + xx0,
                     (coord[1]-y0)/(y1-y0)*(yy1-yy0) + yy0)
         else:
@@ -195,10 +195,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = na.meshgrid(na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
+            nn = np.sqrt(pixX**2 + pixY**2)
             pixX /= nn
             pixY /= nn
         plot._axes.quiver(X,Y, pixX, pixY, scale=self.scale, scale_units=self.scale_units)
@@ -250,12 +250,12 @@
         #appropriate shift to the coppied field.  
 
         #set the cumulative arrays for the periodic shifting.
-        AllX = na.zeros(plot.data["px"].size, dtype='bool')
-        AllY = na.zeros(plot.data["py"].size, dtype='bool')
+        AllX = np.zeros(plot.data["px"].size, dtype='bool')
+        AllY = np.zeros(plot.data["py"].size, dtype='bool')
         XShifted = plot.data["px"].copy()
         YShifted = plot.data["py"].copy()
         dom_x, dom_y = plot._period
-        for shift in na.mgrid[-1:1:3j]:
+        for shift in np.mgrid[-1:1:3j]:
             xlim = ((plot.data["px"] + shift*dom_x >= x0)
                  &  (plot.data["px"] + shift*dom_x <= x1))
             ylim = ((plot.data["py"] + shift*dom_y >= y0)
@@ -269,24 +269,24 @@
         wI = (AllX & AllY)
 
         # We want xi, yi in plot coordinates
-        xi, yi = na.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
+        xi, yi = np.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
                           yy0:yy1:numPoints_y/(self.factor*1j)]
 
         # This converts XShifted and YShifted into plot coordinates
         x = (XShifted[wI]-x0)*dx + xx0
         y = (YShifted[wI]-y0)*dy + yy0
         z = plot.data[self.field][wI]
-        if plot.pf.field_info[self.field].take_log: z=na.log10(z)
+        if plot.pf.field_info[self.field].take_log: z=np.log10(z)
 
         # Both the input and output from the triangulator are in plot
         # coordinates
         zi = self.triang(x,y).nn_interpolator(z)(xi,yi)
         
         if plot.pf.field_info[self.field].take_log and self.clim is not None: 
-            self.clim = (na.log10(self.clim[0]), na.log10(self.clim[1]))
+            self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = na.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
         plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -324,9 +324,9 @@
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
         if self.periodic:
-            pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+            pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
         else:
-            pxs, pys = na.mgrid[0:0:1j,0:0:1j]
+            pxs, pys = np.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
@@ -339,7 +339,7 @@
             visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
                        ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
             if visible.nonzero()[0].size == 0: continue
-            verts = na.array(
+            verts = np.array(
                 [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
                  (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
@@ -352,8 +352,8 @@
             if self.draw_ids:
                 visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
                                ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
-                active_ids = na.unique(plot.data['GridIndices'])
-                for i in na.where(visible_ids)[0]:
+                active_ids = np.unique(plot.data['GridIndices'])
+                for i in np.where(visible_ids)[0]:
                     plot._axes.text(
                         left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
                         left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
@@ -418,18 +418,18 @@
                              plot.data[self.field_y],
                              int(nx), int(ny),
                            (x0, x1, y0, y1),)
-        r0 = na.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
+        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
                       self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = na.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
+        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
         lines[0,:,:,:] = r0
-        mag = na.sqrt(pixX**2 + pixY**2)
-        scale = na.sqrt(nx*ny) / (self.factor * mag.mean())
+        mag = np.sqrt(pixX**2 + pixY**2)
+        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
         dt = 1.0 / (self.nsample-1)
         for i in range(1,self.nsample):
             xt = lines[i-1,0,:,:]
             yt = lines[i-1,1,:,:]
-            ix = na.maximum(na.minimum((xt).astype('int'), nx-1), 0)
-            iy = na.maximum(na.minimum((yt).astype('int'), ny-1), 0)
+            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
+            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
         for i in range(self.data_size[0]):
@@ -517,18 +517,18 @@
         max_dx = plot.data['pdx'].max()
         w_min_x = 250.0 * min_dx
         w_max_x = 1.0 / self.factor
-        min_exp_x = na.ceil(na.log10(w_min_x*plot.data.pf[self.unit])
-                           /na.log10(self.factor))
-        max_exp_x = na.floor(na.log10(w_max_x*plot.data.pf[self.unit])
-                            /na.log10(self.factor))
+        min_exp_x = np.ceil(np.log10(w_min_x*plot.data.pf[self.unit])
+                           /np.log10(self.factor))
+        max_exp_x = np.floor(np.log10(w_max_x*plot.data.pf[self.unit])
+                            /np.log10(self.factor))
         n_x = max_exp_x - min_exp_x + 1
-        widths = na.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
+        widths = np.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
         widths /= plot.data.pf[self.unit]
         left_edge_px = (center[xi] - widths/2.0 - x0)*dx
         left_edge_py = (center[yi] - widths/2.0 - y0)*dy
         right_edge_px = (center[xi] + widths/2.0 - x0)*dx
         right_edge_py = (center[yi] + widths/2.0 - y0)*dy
-        verts = na.array(
+        verts = np.array(
                 [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                  (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
         visible =  ( right_edge_px - left_edge_px > 25 ) & \
@@ -635,7 +635,7 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0] / self.factor
         ny = plot.image._A.shape[1] / self.factor
-        indices = na.argsort(plot.data['dx'])[::-1]
+        indices = np.argsort(plot.data['dx'])[::-1]
         pixX = _MPL.CPixelize( plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
@@ -650,8 +650,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = na.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = na.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
+        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -687,7 +687,7 @@
         DomainWidth = DomainRight - DomainLeft
         
         nx, ny = plot.image._A.shape
-        buff = na.zeros((nx,ny),dtype='float64')
+        buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
@@ -701,7 +701,7 @@
                                  clump['dx']*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
-            buff = na.maximum(temp, buff)
+            buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
                                      **self.plot_args)
         plot._axes.hold(False)
@@ -845,7 +845,7 @@
             if size < self.min_size or size > self.max_size: continue
             # This could use halo.maximum_radius() instead of width
             if self.width is not None and \
-                na.abs(halo.center_of_mass() - 
+                np.abs(halo.center_of_mass() - 
                        plot.data.center)[plot.data.axis] > \
                    self.width:
                 continue
@@ -1093,8 +1093,8 @@
         LE[zax] = data.center[zax] - self.width*0.5
         RE[zax] = data.center[zax] + self.width*0.5
         if self.region is not None \
-            and na.all(self.region.left_edge <= LE) \
-            and na.all(self.region.right_edge >= RE):
+            and np.all(self.region.left_edge <= LE) \
+            and np.all(self.region.right_edge >= RE):
             return self.region
         self.region = data.pf.h.periodic_region(
             data.center, LE, RE)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -183,21 +183,21 @@
         if (zmin in (None,'min')) or (zmax in (None,'max')):    
             imbuff = self._axes.images[-1]._A
             if zmin == 'min':
-                zmin = na.nanmin(imbuff[na.nonzero(imbuff)])
+                zmin = np.nanmin(imbuff[np.nonzero(imbuff)])
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(imbuff))
+                    zmax = min(zmin*10**(dex),np.nanmax(imbuff))
             if zmax == 'max':
-                zmax = na.nanmax(imbuff)
+                zmax = np.nanmax(imbuff)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(imbuff))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(imbuff))
         if self.colorbar is not None:
             if ticks is not None:
-                ticks = na.sort(ticks)
+                ticks = np.sort(ticks)
                 self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                 self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
             elif minmaxtick:
                 if self.log_field: 
-                    ticks = na.array(self.colorbar._ticker()[1],dtype='float')
+                    ticks = np.array(self.colorbar._ticker()[1],dtype='float')
                     ticks = [zmin] + ticks.tolist() + [zmax]
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
@@ -205,11 +205,11 @@
                     mylog.error('Sorry, we do not support minmaxtick for linear fields.  It likely comes close by default')
             elif nticks is not None:
                 if self.log_field:
-                    lin = na.linspace(na.log10(zmin),na.log10(zmax),nticks)
+                    lin = np.linspace(np.log10(zmin),np.log10(zmax),nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(10**lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (10**x) for x in lin])
                 else: 
-                    lin = na.linspace(zmin,zmax,nticks)
+                    lin = np.linspace(zmin,zmax,nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % x for x in lin])
 
@@ -218,7 +218,7 @@
                     self.colorbar.locator = self._old_locator
                 if hasattr(self,'_old_formatter'):
                     self.colorbar.formatter = self._old_formatter
-        self.norm.autoscale(na.array([zmin,zmax], dtype='float64'))
+        self.norm.autoscale(np.array([zmin,zmax], dtype='float64'))
         self.image.changed()
         if self.colorbar is not None:
             mpl_notify(self.image, self.colorbar)
@@ -343,7 +343,7 @@
             self.colorbar.formatter = ttype()
 
     def __init_temp_image(self, setup_colorbar):
-        temparray = na.ones(self.size)
+        temparray = np.ones(self.size)
         self.image = \
             self._axes.imshow(temparray, interpolation='nearest',
                              norm = self.norm, aspect=1.0, picker=True,
@@ -394,20 +394,20 @@
         if self[self.axis_names["Z"]].size == 0:
             raise YTNoDataInObjectError(self.data)
         mylog.debug("Received buffer of min %s and max %s (data: %s %s)",
-                    na.nanmin(buff), na.nanmax(buff),
+                    np.nanmin(buff), np.nanmax(buff),
                     self[self.axis_names["Z"]].min(),
                     self[self.axis_names["Z"]].max())
         if self.log_field:
-            bI = na.where(buff > 0)
+            bI = np.where(buff > 0)
             if len(bI[0]) == 0:
                 newmin = 1e-99
                 newmax = 1e-99
             else:
-                newmin = na.nanmin(buff[bI])
-                newmax = na.nanmax(buff[bI])
+                newmin = np.nanmin(buff[bI])
+                newmax = np.nanmax(buff[bI])
         else:
-            newmin = na.nanmin(buff)
-            newmax = na.nanmax(buff)
+            newmin = np.nanmin(buff)
+            newmax = np.nanmax(buff)
         aspect = (self.ylim[1]-self.ylim[0])/(self.xlim[1]-self.xlim[0])
         if self.image._A.size != buff.size:
             self._axes.clear()
@@ -418,7 +418,7 @@
             self.image.set_data(buff)
         if self._axes.get_aspect() != aspect: self._axes.set_aspect(aspect)
         if self.do_autoscale:
-            self.norm.autoscale(na.array((newmin,newmax), dtype='float64'))
+            self.norm.autoscale(np.array((newmin,newmax), dtype='float64'))
         self._reset_image_parameters()
         self._run_callbacks()
 
@@ -476,8 +476,8 @@
         self._redraw_image()
 
     def autoscale(self):
-        zmin = na.nanmin(self._axes.images[-1]._A)
-        zmax = na.nanmax(self._axes.images[-1]._A)
+        zmin = np.nanmin(self._axes.images[-1]._A)
+        zmax = np.nanmax(self._axes.images[-1]._A)
         self.set_zlim(zmin, zmax)
 
     def switch_y(self, *args, **kwargs):
@@ -558,16 +558,16 @@
         numPoints_y = int(width)
         dx = numPoints_x / (x1-x0)
         dy = numPoints_y / (y1-y0)
-        xlim = na.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
+        xlim = np.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
                               self.data["px"]-2.0*self.data['pdx'] <= x1)
-        ylim = na.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
+        ylim = np.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
                               self.data["py"]-2.0*self.data['pdy'] <= y1)
-        wI = na.where(na.logical_and(xlim,ylim))
-        xi, yi = na.mgrid[0:numPoints_x, 0:numPoints_y]
+        wI = np.where(np.logical_and(xlim,ylim))
+        xi, yi = np.mgrid[0:numPoints_x, 0:numPoints_y]
         x = (self.data["px"][wI]-x0)*dx
         y = (self.data["py"][wI]-y0)*dy
         z = self.data[self.axis_names["Z"]][wI]
-        if self.log_field: z=na.log10(z)
+        if self.log_field: z=np.log10(z)
         buff = de.Triangulation(x,y).nn_interpolator(z)(xi,yi)
         buff = buff.clip(z.min(), z.max())
         if self.log_field: buff = 10**buff
@@ -603,7 +603,7 @@
         else:
             height = width
         self.pix = (width,height)
-        indices = na.argsort(self.data['dx'])[::-1]
+        indices = np.argsort(self.data['dx'])[::-1]
         buff = _MPL.CPixelize( self.data['x'], self.data['y'], self.data['z'],
                                self.data['px'], self.data['py'],
                                self.data['pdx'], self.data['pdy'], self.data['pdz'],
@@ -756,7 +756,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)
@@ -823,7 +823,7 @@
             cb(self)
 
     def __init_colorbar(self):
-        temparray = na.ones((self.x_bins.size, self.y_bins.size))
+        temparray = np.ones((self.x_bins.size, self.y_bins.size))
         self.norm = matplotlib.colors.Normalize()
         self.image = self._axes.pcolormesh(self.x_bins, self.y_bins,
                                       temparray, shading='flat',
@@ -858,13 +858,13 @@
         #self._redraw_image()
         if (zmin is None) or (zmax is None):    
             if zmin == 'min':
-                zmin = na.nanmin(self._axes.images[-1]._A)
+                zmin = np.nanmin(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(self._axes.images[-1]._A))
+                    zmax = min(zmin*10**(dex),np.nanmax(self._axes.images[-1]._A))
             if zmax == 'max':
-                zmax = na.nanmax(self._axes.images[-1]._A)
+                zmax = np.nanmax(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(self._axes.images[-1]._A))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(self._axes.images[-1]._A))
         self._zlim = (zmin, zmax)
 
     def set_log_field(self, val):
@@ -883,8 +883,8 @@
     def _redraw_image(self):
         vals = self.data[self.fields[2]].transpose()
         used_bin = self.data["UsedBins"].transpose()
-        vmin = na.nanmin(vals[used_bin])
-        vmax = na.nanmax(vals[used_bin])
+        vmin = np.nanmin(vals[used_bin])
+        vmax = np.nanmax(vals[used_bin])
         if self._zlim is not None: vmin, vmax = self._zlim
         if self._log_z:
             # We want smallest non-zero vmin
@@ -892,10 +892,10 @@
                                                 clip=False)
             self.ticker = matplotlib.ticker.LogLocator()
             if self._zlim is None:
-                vI = na.where(vals > 0)
+                vI = np.where(vals > 0)
                 vmin = vals[vI].min()
                 vmax = vals[vI].max()
-            self.norm.autoscale(na.array((vmin,vmax), dtype='float64'))
+            self.norm.autoscale(np.array((vmin,vmax), dtype='float64'))
         else:
             self.norm=matplotlib.colors.Normalize(vmin=vmin, vmax=vmax,
                                                   clip=False)
@@ -979,7 +979,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -33,7 +33,7 @@
 import __builtin__
 from functools import wraps
 
-import numpy as na
+import numpy as np
 from ._mpl_imports import *
 from .color_maps import yt_colormaps, is_colormap
 from .image_writer import \
@@ -122,7 +122,7 @@
             ticks = []
         return ticks
 
-log_transform = FieldTransform('log10', na.log10, LogLocator())
+log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
 def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
@@ -164,7 +164,7 @@
     if not iterable(width):
         width = (width, width)
     Wx, Wy = width
-    width = na.array((Wx/pf[unit], Wy/pf[unit]))
+    width = np.array((Wx/pf[unit], Wy/pf[unit]))
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -174,11 +174,11 @@
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
     # Transforming to the cutting plane coordinate system
-    center = na.array(center)
+    center = np.array(center)
     center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
     (normal,perp1,perp2) = ortho_find(normal)
-    mat = na.transpose(na.column_stack((perp1,perp2,normal)))
-    center = na.dot(mat,center)
+    mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+    center = np.dot(mat,center)
     width = width/pf.domain_width.min()
 
     bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
@@ -809,7 +809,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None):
+    def save(self,name=None,mpl_kwargs={}):
         """saves the plot to disk.
 
         Parameters
@@ -817,6 +817,10 @@
         name : string
            the base of the filename.  If not set the filename of 
            the parameter file is used
+        mpl_kwargs : dict
+           A dict of keyword arguments to be passed to matplotlib.
+           
+        >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
         if name == None:
@@ -841,7 +845,7 @@
                 n = "%s_%s_%s" % (name, type, k)
             if weight:
                 n += "_%s" % (weight)
-            names.append(v.save(n))
+            names.append(v.save(n,mpl_kwargs))
         return names
 
     def _send_zmq(self):
@@ -1119,7 +1123,7 @@
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
             x_width = self.xlim[1] - self.xlim[0]
-            zoom_fac = na.log10(x_width*self.pf['unitary'])/na.log10(min_zoom)
+            zoom_fac = np.log10(x_width*self.pf['unitary'])/np.log10(min_zoom)
             zoom_fac = 100.0*max(0.0, zoom_fac)
             ticks = self.get_ticks(field)
             payload = {'type':'png_string',
@@ -1163,12 +1167,12 @@
 
         raw_data = self._frb.data_source
         b = self._frb.bounds
-        xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
+        xi, yi = np.mgrid[b[0]:b[1]:(vi / 8) * 1j,
                           b[2]:b[3]:(vj / 8) * 1j]
         x = raw_data['px']
         y = raw_data['py']
         z = raw_data[field]
-        if logit: z = na.log10(z)
+        if logit: z = np.log10(z)
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         ax.contour(fvals, number, colors='w')
@@ -1187,8 +1191,8 @@
         fy = "%s-velocity" % (axis_names[y_dict[axis]])
         px = new_frb[fx][::-1,:]
         py = new_frb[fy][::-1,:]
-        x = na.mgrid[0:vi-1:ny*1j]
-        y = na.mgrid[0:vj-1:nx*1j]
+        x = np.mgrid[0:vi-1:ny*1j]
+        y = np.mgrid[0:vj-1:nx*1j]
         # Always normalize, then we scale
         nn = ((px**2.0 + py**2.0)**0.5).max()
         px /= nn
@@ -1212,7 +1216,7 @@
     def _get_cbar_image(self, height = 400, width = 40, field = None):
         if field is None: field = self._current_field
         cmap_name = self._colormaps[field]
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals, cmap_name = cmap_name)
         pngs = write_png_to_string(to_plot)
@@ -1255,14 +1259,23 @@
     def __init__(self, field, size):
         self._plot_valid = True
         fsize, axrect, caxrect = self._get_best_layout(size)
-        # Hardcoding the axis dimensions for now
         
-        self.figure = matplotlib.figure.Figure(figsize = fsize, 
-                                               frameon = True)
-        self.axes = self.figure.add_axes(axrect)
-        self.cax = self.figure.add_axes(caxrect)
-
-    def save(self, name, canvas = None):
+        if np.any(np.array(axrect) < 0):
+            self.figure = matplotlib.figure.Figure(figsize = size, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes((.07,.10,.8,.8))
+            self.cax = self.figure.add_axes((.87,.10,.04,.8))
+            mylog.warning('The axis ratio of the requested plot is very narrow.  '
+                          'There is a good chance the plot will not look very good, '
+                          'consider making the plot manually using FixedResolutionBuffer '
+                          'and matplotlib.')
+        else:
+            self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes(axrect)
+            self.cax = self.figure.add_axes(caxrect)
+            
+    def save(self, name, mpl_kwargs, canvas = None):
         if name[-4:] == '.png':
             suffix = ''
         else:
@@ -1279,7 +1292,7 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn)
+        canvas.print_figure(fn,**mpl_kwargs)
         return fn
 
     def _get_best_layout(self, size):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -27,7 +27,7 @@
 import types
 
 from functools import wraps
-import numpy as na
+import numpy as np
 
 from .image_writer import \
     write_image, apply_colormap
@@ -129,19 +129,19 @@
         use_mesh = False
         xmi, xma = self.x_spec.bounds
         if self.x_spec.scale == 'log':
-            x_bins = na.logspace(na.log10(xmi), na.log10(xma),
+            x_bins = np.logspace(np.log10(xmi), np.log10(xma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            x_bins = na.logspace(xmi, xma, self.image.shape[0]+1)
+            x_bins = np.logspace(xmi, xma, self.image.shape[0]+1)
 
         ymi, yma = self.y_spec.bounds
         if self.y_spec.scale == 'log':
-            y_bins = na.logspace(na.log10(ymi), na.log10(yma),
+            y_bins = np.logspace(np.log10(ymi), np.log10(yma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            y_bins = na.logspace(ymi, yma, self.image.shape[0]+1)
+            y_bins = np.logspace(ymi, yma, self.image.shape[0]+1)
 
         im = self.image
         if self.cbar.scale == 'log':
@@ -338,11 +338,11 @@
         raw_data = self.plot.image[::-1,:]
 
         if self.plot.cbar.scale == 'log':
-            func = na.log10
+            func = np.log10
         else:
             func = lambda a: a
-        raw_data = na.repeat(raw_data, 3, axis=0)
-        raw_data = na.repeat(raw_data, 3, axis=1)
+        raw_data = np.repeat(raw_data, 3, axis=0)
+        raw_data = np.repeat(raw_data, 3, axis=1)
         to_plot = apply_colormap(raw_data, self.plot.cbar.bounds,
                                  self.plot.cbar.cmap, func)
         if self.plot.cbar.scale == 'log':
@@ -369,7 +369,7 @@
 
     def _convert_axis(self, spec):
         func = lambda a: a
-        if spec.scale == 'log': func = na.log10
+        if spec.scale == 'log': func = np.log10
         tick_info = self._convert_ticks(spec.ticks, spec.bounds, func)
         ax = {'ticks':tick_info,
               'title': spec.title}
@@ -378,7 +378,7 @@
     def _get_cbar_image(self, height = 400, width = 40):
         # Right now there's just the single 'cmap', but that will eventually
         # change.  I think?
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals)
         pngs = write_png_to_string(to_plot)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_passthrough
@@ -61,7 +61,7 @@
         Default: minimum dx
     length : float, optional
         Optionally specify the length of integration.  
-        Default: na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        Default: np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
     direction : real, optional
         Specifies the direction of integration.  The magnitude of this
         value has no effect, only the sign.
@@ -77,10 +77,10 @@
     >>> from yt.visualization.api import Streamlines
     >>> pf = load('DD1701') # Load pf
 
-    >>> c = na.array([0.5]*3)
+    >>> c = np.array([0.5]*3)
     >>> N = 100
     >>> scale = 1.0
-    >>> pos_dx = na.random.random((N,3))*scale-scale/2.
+    >>> pos_dx = np.random.random((N,3))*scale-scale/2.
     >>> pos = c+pos_dx
     
     >>> streamlines = Streamlines(pf,pos,'x-velocity', 'y-velocity', 'z-velocity', length=1.0) 
@@ -91,7 +91,7 @@
     >>> fig=pl.figure() 
     >>> ax = Axes3D(fig)
     >>> for stream in streamlines.streamlines:
-    >>>     stream = stream[na.all(stream != 0.0, axis=1)]
+    >>>     stream = stream[np.all(stream != 0.0, axis=1)]
     >>>     ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
     >>> pl.savefig('streamlines.png')
     """
@@ -101,13 +101,13 @@
                  get_magnitude=False):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.start_positions = na.array(positions)
+        self.start_positions = np.array(positions)
         self.N = self.start_positions.shape[0]
         self.xfield = xfield
         self.yfield = yfield
         self.zfield = zfield
         self.get_magnitude=get_magnitude
-        self.direction = na.sign(direction)
+        self.direction = np.sign(direction)
         if volume is None:
             volume = AMRKDTree(self.pf, fields=[self.xfield,self.yfield,self.zfield],
                             log_fields=[False,False,False], merge_trees=True)
@@ -116,13 +116,13 @@
             dx = self.pf.h.get_smallest_dx()
         self.dx = dx
         if length is None:
-            length = na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+            length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
         self.steps = int(length/dx)
-        self.streamlines = na.zeros((self.N,self.steps,3), dtype='float64')
+        self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
-            self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
+            self.magnitudes = np.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
         nprocs = self.comm.size
@@ -161,21 +161,21 @@
                 brick.integrate_streamline(stream[-step+1], self.direction*self.dx, marr)
                 mag[-step+1] = marr[0]
                 
-            if na.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
-                   na.any(stream[-step+1,:] >= self.pf.domain_right_edge):
+            if np.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
+                   np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if na.any(stream[-step+1,:] < node.l_corner) | \
-                   na.any(stream[-step+1,:] >= node.r_corner):
+            if np.any(stream[-step+1,:] < node.l_corner) | \
+                   np.any(stream[-step+1,:] >= node.r_corner):
                 return step-1
             step -= 1
         return step
 
     def clean_streamlines(self):
-        temp = na.empty(self.N, dtype='object')
-        temp2 = na.empty(self.N, dtype='object')
+        temp = np.empty(self.N, dtype='object')
+        temp2 = np.empty(self.N, dtype='object')
         for i,stream in enumerate(self.streamlines):
-            mask = na.all(stream != 0.0, axis=1)
+            mask = np.all(stream != 0.0, axis=1)
             temp[i] = stream[mask]
             temp2[i] = self.magnitudes[i,mask]
         self.streamlines = temp


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/tick_locators.py
--- a/yt/visualization/tick_locators.py
+++ b/yt/visualization/tick_locators.py
@@ -5,7 +5,7 @@
 ##
 
 import math
-import numpy as na
+import numpy as np
 
 def is_decade(x,base=10):
     if x == 0.0:
@@ -40,7 +40,7 @@
         if subs is None:
             self._subs = None  # autosub
         else:
-            self._subs = na.asarray(subs)+0.0
+            self._subs = np.asarray(subs)+0.0
 
     def _set_numticks(self):
         self.numticks = 15  # todo; be smart here; this is just for dev
@@ -62,9 +62,9 @@
         numdec = math.floor(vmax)-math.ceil(vmin)
 
         if self._subs is None: # autosub
-            if numdec>10: subs = na.array([1.0])
-            elif numdec>6: subs = na.arange(2.0, b, 2.0)
-            else: subs = na.arange(2.0, b)
+            if numdec>10: subs = np.array([1.0])
+            elif numdec>6: subs = np.arange(2.0, b, 2.0)
+            else: subs = np.arange(2.0, b)
         else:
             subs = self._subs
 
@@ -72,7 +72,7 @@
         while numdec/stride+1 > self.numticks:
             stride += 1
 
-        decades = na.arange(math.floor(vmin),
+        decades = np.arange(math.floor(vmin),
                              math.ceil(vmax)+stride, stride)
         if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
             ticklocs = []
@@ -81,7 +81,7 @@
         else:
             ticklocs = b**decades
 
-        return na.array(ticklocs)
+        return np.array(ticklocs)
 
 
 class LinearLocator(object):
@@ -122,7 +122,7 @@
 
 
         if self.numticks==0: return []
-        ticklocs = na.linspace(vmin, vmax, self.numticks)
+        ticklocs = np.linspace(vmin, vmax, self.numticks)
 
         #return self.raise_if_exceeds(ticklocs)
         return ticklocs


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/CUDARayCast.py
--- a/yt/visualization/volume_rendering/CUDARayCast.py
+++ b/yt/visualization/volume_rendering/CUDARayCast.py
@@ -27,7 +27,7 @@
 
 from yt.mods import *
 import yt.extensions.HierarchySubset as hs
-import numpy as na
+import numpy as np
 import h5py, time
 
 import matplotlib;matplotlib.use("Agg");import pylab
@@ -62,7 +62,7 @@
 
     print "Constructing transfer function."
     if "Data" in fn:
-        mh = na.log10(1.67e-24)
+        mh = np.log10(1.67e-24)
         tf = ColorTransferFunction((7.5+mh, 14.0+mh))
         tf.add_gaussian( 8.25+mh, 0.002, [0.2, 0.2, 0.4, 0.1])
         tf.add_gaussian( 9.75+mh, 0.002, [0.0, 0.0, 0.3, 0.1])
@@ -77,17 +77,17 @@
         tf.add_gaussian(-28.5, 0.05, [1.0, 1.0, 1.0, 1.0])
     else: raise RuntimeError
 
-    cpu['ngrids'] = na.array([cpu['dims'].shape[0]], dtype='int32')
+    cpu['ngrids'] = np.array([cpu['dims'].shape[0]], dtype='int32')
     cpu['tf_r'] = tf.red.y.astype("float32")
     cpu['tf_g'] = tf.green.y.astype("float32")
     cpu['tf_b'] = tf.blue.y.astype("float32")
     cpu['tf_a'] = tf.alpha.y.astype("float32")
 
-    cpu['tf_bounds'] = na.array(tf.x_bounds, dtype='float32')
+    cpu['tf_bounds'] = np.array(tf.x_bounds, dtype='float32')
 
-    cpu['v_dir'] = na.array([0.3, 0.5, 0.6], dtype='float32')
+    cpu['v_dir'] = np.array([0.3, 0.5, 0.6], dtype='float32')
 
-    c = na.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
+    c = np.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
 
     print "Getting cutting plane."
     cp = pf.h.cutting(cpu['v_dir'], c)
@@ -98,16 +98,16 @@
     back_c = c - cp._norm_vec * W
     front_c = c + cp._norm_vec * W
 
-    px, py = na.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
+    px, py = np.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
     xv = cp._inv_mat[0,0]*px + cp._inv_mat[0,1]*py + cp.center[0]
     yv = cp._inv_mat[1,0]*px + cp._inv_mat[1,1]*py + cp.center[1]
     zv = cp._inv_mat[2,0]*px + cp._inv_mat[2,1]*py + cp.center[2]
-    cpu['v_pos'] = na.array([xv, yv, zv], dtype='float32').transpose()
+    cpu['v_pos'] = np.array([xv, yv, zv], dtype='float32').transpose()
 
-    cpu['image_r'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_g'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_b'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_a'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_r'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_g'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_b'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_a'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
 
     print "Generating module"
     source = open("yt/extensions/volume_rendering/_cuda_caster.cu").read()
@@ -161,7 +161,7 @@
         pylab.imshow(image[-1], interpolation='nearest')
         pylab.savefig("/u/ki/mturk/public_html/vr6/%s.png" % (ii))
 
-    image = na.array(image).transpose()
+    image = np.array(image).transpose()
     image = (image - mi) / (ma - mi)
     pylab.clf()
     pylab.imshow(image, interpolation='nearest')


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/UBVRI.py
--- a/yt/visualization/volume_rendering/UBVRI.py
+++ b/yt/visualization/volume_rendering/UBVRI.py
@@ -24,21 +24,21 @@
 """
 
 
-import numpy as na
+import numpy as np
 
 johnson_filters = dict(
     B = dict(
-      wavelen = na.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
+      wavelen = np.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550, 4600,
         4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000, 5050, 5100, 5150, 5200,
         5250, 5300, 5350, 5400, 5450, 5500, 5550], dtype='float64'),
-      trans = na.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
+      trans = np.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
         0.95, 0.98, 0.99, 1.0, 0.99, 0.98, 0.96, 0.94, 0.91, 0.87, 0.83, 0.79,
         0.74, 0.69, 0.63, 0.58, 0.52, 0.46, 0.41, 0.36, 0.3, 0.25, 0.2, 0.15,
         0.12, 0.09, 0.06, 0.04, 0.02, 0.01, 0.0, ], dtype='float64'),
       ),
     I = dict(
-      wavelen = na.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
+      wavelen = np.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
         7150, 7200, 7250, 7300, 7350, 7400, 7450, 7500, 7550, 7600, 7650, 7700,
         7750, 7800, 7850, 7900, 7950, 8000, 8050, 8100, 8150, 8200, 8250, 8300,
         8350, 8400, 8450, 8500, 8550, 8600, 8650, 8700, 8750, 8800, 8850, 8900,
@@ -48,7 +48,7 @@
         10600, 10650, 10700, 10750, 10800, 10850, 10900, 10950, 11000, 11050,
         11100, 11150, 11200, 11250, 11300, 11350, 11400, 11450, 11500, 11550,
         11600, 11650, 11700, 11750, 11800, 11850, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
         0.21, 0.26, 0.3, 0.36, 0.4, 0.44, 0.49, 0.56, 0.6, 0.65, 0.72, 0.76,
         0.84, 0.9, 0.93, 0.96, 0.97, 0.97, 0.98, 0.98, 0.99, 0.99, 0.99, 0.99,
         1.0, 1.0, 1.0, 1.0, 1.0, 0.99, 0.98, 0.98, 0.97, 0.96, 0.94, 0.93, 0.9,
@@ -59,7 +59,7 @@
         0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     R = dict(
-      wavelen = na.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
+      wavelen = np.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, 7400,
@@ -67,7 +67,7 @@
         8050, 8100, 8150, 8200, 8250, 8300, 8350, 8400, 8450, 8500, 8550, 8600,
         8650, 8700, 8750, 8800, 8850, 8900, 8950, 9000, 9050, 9100, 9150, 9200,
         9250, 9300, 9350, 9400, 9450, 9500, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
+      trans = np.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
         0.34, 0.4, 0.46, 0.5, 0.55, 0.6, 0.64, 0.69, 0.71, 0.74, 0.77, 0.79,
         0.81, 0.84, 0.86, 0.88, 0.9, 0.91, 0.92, 0.94, 0.95, 0.96, 0.97, 0.98,
         0.99, 0.99, 1.0, 1.0, 0.99, 0.98, 0.96, 0.94, 0.92, 0.9, 0.88, 0.85,
@@ -77,20 +77,20 @@
         0.02, 0.01, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     U = dict(
-      wavelen = na.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
+      wavelen = np.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
         3450, 3500, 3550, 3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
+      trans = np.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
         0.95, 0.97, 0.99, 1.0, 0.99, 0.97, 0.92, 0.73, 0.56, 0.36, 0.23, 0.05,
         0.03, 0.01, 0.0, ], dtype='float64'),),
     V = dict(
-      wavelen = na.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
+      wavelen = np.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
         5050, 5100, 5150, 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, ],
           dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
         0.67, 0.78, 0.85, 0.91, 0.94, 0.96, 0.98, 0.98, 0.95, 0.87, 0.79, 0.72,
         0.71, 0.69, 0.65, 0.62, 0.58, 0.52, 0.46, 0.4, 0.34, 0.29, 0.24, 0.2,
         0.17, 0.14, 0.11, 0.08, 0.06, 0.05, 0.03, 0.02, 0.02, 0.01, 0.01, 0.01,
@@ -102,4 +102,4 @@
 for filter, vals in johnson_filters.items():
     wavelen = vals["wavelen"]
     trans = vals["trans"]
-    vals["Lchar"] = wavelen[na.argmax(trans)]
+    vals["Lchar"] = wavelen[np.argmax(trans)]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -24,7 +24,7 @@
 """
 
 import __builtin__
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import *
@@ -167,12 +167,12 @@
         >>> pf = EnzoStaticOutput('DD1701') # Load pf
         >>> c = [0.5]*3 # Center
         >>> L = [1.0,1.0,1.0] # Viewpoint
-        >>> W = na.sqrt(3) # Width
+        >>> W = np.sqrt(3) # Width
         >>> N = 1024 # Pixels (1024^2)
 
         # Get density min, max
         >>> mi, ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi, ma = na.log10(mi), na.log10(ma)
+        >>> mi, ma = np.log10(mi), np.log10(ma)
 
         # Construct transfer function
         >>> tf = vr.ColorTransferFunction((mi-2, ma+2))
@@ -226,10 +226,10 @@
     def _setup_box_properties(self, width, center, unit_vectors):
         self.width = width
         self.center = center
-        self.box_vectors = na.array([unit_vectors[0]*width[0],
+        self.box_vectors = np.array([unit_vectors[0]*width[0],
                                      unit_vectors[1]*width[1],
                                      unit_vectors[2]*width[2]])
-        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.origin = center - 0.5*np.dot(width,unit_vectors)
         self.back_center =  center - 0.5*width[2]*unit_vectors[2]
         self.front_center = center + 0.5*width[2]*unit_vectors[2]         
 
@@ -289,23 +289,23 @@
                                          north_vector = north_vector)
         self._setup_box_properties(width, self.center, self.orienter.unit_vectors)
     def new_image(self):
-        image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
+        image = np.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.transfer_function, self.sub_samples)
+                np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
     def get_sampler(self, args):
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
-            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = np.empty(3,dtype='float64')
             temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
                     self.light_dir[1] * self.orienter.unit_vectors[2] + \
                     self.light_dir[2] * self.orienter.unit_vectors[0]
@@ -326,13 +326,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
 
         view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
@@ -510,30 +510,30 @@
         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
         ...     iw.write_bitmap(snapshot, "move_%04i.png" % i)
         """
-        self.center = na.array(self.center)
+        self.center = np.array(self.center)
         dW = None
         if exponential:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back 
                 if (self.center == 0.0).all():
-                    self.center += (na.array(final) - self.center) / (10. * n_steps)
-                final_zoom = final_width/na.array(self.width)
+                    self.center += (np.array(final) - self.center) / (10. * n_steps)
+                final_zoom = final_width/np.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
             else:
-                dW = na.array([1.0,1.0,1.0])
-            position_diff = (na.array(final)/self.center)*1.0
+                dW = np.array([1.0,1.0,1.0])
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back
-                dW = (1.0*final_width-na.array(self.width))/n_steps
+                dW = (1.0*final_width-np.array(self.width))/n_steps
             else:
-                dW = na.array([0.0,0.0,0.0])
-            dx = (na.array(final)-self.center)*1.0/n_steps
+                dW = np.array([0.0,0.0,0.0])
+            dx = (np.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.switch_view(center=self.center*dx, width=self.width*dW)
@@ -559,7 +559,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.rotation_vector
@@ -568,7 +568,7 @@
 
         normal_vector = self.front_center-self.center
 
-        self.switch_view(normal_vector=na.dot(R,normal_vector))
+        self.switch_view(normal_vector=np.dot(R,normal_vector))
 
     def roll(self, theta):
         r"""Roll by a given angle
@@ -583,12 +583,12 @@
         Examples
         --------
 
-        >>> cam.roll(na.pi/4)
+        >>> cam.roll(np.pi/4)
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
         north_vector = self.orienter.north_vector
-        self.switch_view(north_vector=na.dot(R, north_vector))
+        self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
         r"""Loop over rotate, creating a rotation
@@ -613,7 +613,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -676,12 +676,12 @@
         self.front_center += self.expand_factor*dl
         self.back_center -= dl
 
-        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+        px = np.linspace(-self.width[0]/2.0, self.width[0]/2.0,
                          self.resolution[0])[:,None]
-        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+        py = np.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
         inv_mat = self.orienter.inv_mat
-        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+        positions = np.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
@@ -693,14 +693,14 @@
         positions = self.front_center - 1.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
         vectors = (self.front_center - positions)
 
-        uv = na.ones(3, dtype='float64')
+        uv = np.ones(3, dtype='float64')
         image.shape = (self.resolution[0]**2,1,3)
         vectors.shape = (self.resolution[0]**2,1,3)
         positions.shape = (self.resolution[0]**2,1,3)
         args = (positions, vectors, self.back_center, 
                 (0.0,1.0,0.0,1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'), 
+                np.zeros(3, dtype='float64'), 
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -708,7 +708,7 @@
         image.shape = self.resolution[0], self.resolution[0], 3
 
 def corners(left_edge, right_edge):
-    return na.array([
+    return np.array([
       [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
@@ -726,7 +726,7 @@
                  pf = None, use_kd=True, no_ghost=False, use_light=False):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.nside = nside
         self.use_kd = use_kd
@@ -747,20 +747,20 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
+        image = np.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
         nv = 12 * self.nside ** 2
-        vs = arr_pix2vec_nest(self.nside, na.arange(nv))
+        vs = arr_pix2vec_nest(self.nside, np.arange(nv))
         vs *= self.radius
         vs.shape = nv, 1, 3
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nv, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nv, 1, 3), dtype='float64') * self.center
         args = (positions, vs, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
  
@@ -771,13 +771,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -823,14 +823,14 @@
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg
-            phi, theta = na.mgrid[0.0:2*na.pi:800j, 0:na.pi:800j]
+            phi, theta = np.mgrid[0.0:2*np.pi:800j, 0:np.pi:800j]
             pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
             image *= self.radius * self.pf['cm']
-            img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+            img = np.log10(image[:,0,0][pixi]).reshape((800,800))
 
             fig = matplotlib.figure.Figure((10, 5))
             ax = fig.add_subplot(1,1,1,projection='hammer')
-            implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
+            implot = ax.imshow(img, extent=(-np.pi,np.pi,-np.pi/2,np.pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
 
             if label == None:
@@ -852,7 +852,7 @@
                  rays_per_cell = 0.1, max_nside = 8192):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.use_kd = use_kd
         if transfer_function is None:
@@ -880,8 +880,8 @@
                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-        left_edges = na.array([b.LeftEdge for b in bricks])
-        right_edges = na.array([b.RightEdge for b in bricks])
+        left_edges = np.array([b.LeftEdge for b in bricks])
+        right_edges = np.array([b.RightEdge for b in bricks])
         min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
                      for b in bricks))
         # We jitter a bit if we're on a boundary of our initial grid
@@ -896,7 +896,7 @@
         for i,brick in enumerate(bricks):
             ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
                                        bricks)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         info, values = ray_source.get_rays()
@@ -935,10 +935,10 @@
         self.use_light = use_light
         self.light_dir = None
         self.light_rgba = None
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.fov = fov
         if iterable(resolution):
@@ -957,7 +957,7 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        image = np.zeros((self.resolution**2,1,3), dtype='float64', order='C')
         return image
         
     def get_sampler_args(self, image):
@@ -968,13 +968,13 @@
             vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
         del vp2
         vp *= self.radius
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
 
         args = (positions, vp, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -988,13 +988,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -1088,7 +1088,7 @@
         
         >>> field='Density'
         >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi,ma = na.log10(mi), na.log10(ma)
+        >>> mi,ma = np.log10(mi), np.log10(ma)
         
         # You may want to comment out the above lines and manually set the min and max
         # of the log of the Density field. For example:
@@ -1106,7 +1106,7 @@
         # the color range to the min and max values, rather than the transfer function
         # bounds.
         >>> Nc = 5
-        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=np.logspace(-2,0,Nc),
         >>>         colormap='RdBu_r')
         >>> 
         # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
@@ -1164,18 +1164,18 @@
             self.nimy = 1
         if pf is not None: self.pf = pf
         
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         
-        self.normal_vector = na.array([0.,0.,1])
-        self.north_vector = na.array([1.,0.,0.])
-        self.east_vector = na.array([0.,1.,0.])
+        self.normal_vector = np.array([0.,0.,1])
+        self.north_vector = np.array([1.,0.,0.])
+        self.east_vector = np.array([0.,1.,0.])
         self.rotation_vector = self.north_vector
 
         if iterable(resolution):
             raise RuntimeError("Resolution must be a single int")
         self.resolution = resolution
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.focal_center = focal_center
         self.radius = radius
         self.fov = fov
@@ -1195,17 +1195,17 @@
 
     def get_vector_plane(self):
         if self.focal_center is not None:
-            rvec =  na.array(self.focal_center) - na.array(self.center)
+            rvec =  np.array(self.focal_center) - np.array(self.center)
             rvec /= (rvec**2).sum()**0.5
-            angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
+            angle = np.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
                 (rvec**2).sum()**0.5))
-            rot_vector = na.cross(rvec, self.normal_vector)
+            rot_vector = np.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
             self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
-            self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
-            self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
-            self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
+            self.normal_vector = np.dot(self.rotation_matrix,self.normal_vector)
+            self.north_vector = np.dot(self.rotation_matrix,self.north_vector)
+            self.east_vector = np.dot(self.rotation_matrix,self.east_vector)
         else:
             self.focal_center = self.center + self.radius*self.normal_vector  
         dist = ((self.focal_center - self.center)**2).sum()**0.5
@@ -1228,9 +1228,9 @@
             self.get_vector_plane()
 
         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+        image = np.zeros((nx*ny,1,3), dtype='float64', order='C')
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nx*ny, 1, 3), dtype='float64') * self.center
         vector_plane = VectorPlane(positions, self.vp, self.center,
                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
         tfp = TransferFunctionProxy(self.transfer_function)
@@ -1243,7 +1243,7 @@
         total_cells = 0
         for brick in self.volume.traverse(None, self.center, image):
             brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         image.shape = (nx, ny, 3)
@@ -1269,7 +1269,7 @@
         if self.image_decomp:
             if self.comm.rank == 0:
                 if self.global_comm.rank == 0:
-                    final_image = na.empty((nx*self.nimx, 
+                    final_image = np.empty((nx*self.nimx, 
                         ny*self.nimy, 3),
                         dtype='float64',order='C')
                     final_image[:nx, :ny, :] = image
@@ -1312,7 +1312,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.north_vector
@@ -1322,9 +1322,9 @@
         R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
-        self.normal_vector = na.dot(R,self.normal_vector)
-        self.north_vector = na.dot(R,self.north_vector)
-        self.east_vector = na.dot(R,self.east_vector)
+        self.normal_vector = np.dot(R,self.normal_vector)
+        self.north_vector = np.dot(R,self.north_vector)
+        self.east_vector = np.dot(R,self.east_vector)
 
         if keep_focus:
             self.center = self.focal_center - dist*self.normal_vector
@@ -1349,7 +1349,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -1381,10 +1381,10 @@
         ...     cam.save_image('move_%04i.png' % i)
         """
         if exponential:
-            position_diff = (na.array(final)/self.center)*1.0
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
-            dx = (na.array(final) - self.center)*1.0/n_steps
+            dx = (np.array(final) - self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.center *= dx
@@ -1426,7 +1426,7 @@
         effects of nearby cells.
     rotation : optional, 3x3 array
         If supplied, the vectors will be rotated by this.  You can construct
-        this by, for instance, calling na.array([v1,v2,v3]) where those are the
+        this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
 
     Returns
@@ -1445,7 +1445,7 @@
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
-    center = na.array(center, dtype='float64')
+    center = np.array(center, dtype='float64')
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
         def _make_wf(f, w):
@@ -1457,8 +1457,8 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     nv = 12*nside**2
-    image = na.zeros((nv,1,3), dtype='float64', order='C')
-    vs = arr_pix2vec_nest(nside, na.arange(nv))
+    image = np.zeros((nv,1,3), dtype='float64', order='C')
+    vs = arr_pix2vec_nest(nside, np.arange(nv))
     vs.shape = (nv,1,3)
     if rotation is not None:
         vs2 = vs.copy()
@@ -1466,14 +1466,14 @@
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     else:
         vs += 1e-8
-    positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
+    positions = np.ones((nv, 1, 3), dtype='float64', order='C') * center
     dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
     positions += inner_radius * dx * vs
     vs *= radius
-    uv = na.ones(3, dtype='float64')
+    uv = np.ones(3, dtype='float64')
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
-                                image, uv, uv, na.zeros(3, dtype='float64'))
+                                image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
         data = [grid[field] * grid.child_mask.astype('float64')
@@ -1502,15 +1502,15 @@
                         take_log = True, resolution=512, cmin=None, cmax=None):
     import matplotlib.figure
     import matplotlib.backends.backend_agg
-    if rotation is None: rotation = na.eye(3).astype("float64")
+    if rotation is None: rotation = np.eye(3).astype("float64")
 
     img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
 
     fig = matplotlib.figure.Figure((10, 5))
     ax = fig.add_subplot(1,1,1,projection='aitoff')
-    if take_log: func = na.log10
+    if take_log: func = np.log10
     else: func = lambda a: a
-    implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
+    implot = ax.imshow(func(img), extent=(-np.pi,np.pi,-np.pi/2,np.pi/2),
                        clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
     cb = fig.colorbar(implot, orientation='horizontal')
     cb.set_label(label)
@@ -1568,12 +1568,12 @@
             pass
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
             (-self.width[0]/2, self.width[0]/2,
              -self.width[1]/2, self.width[1]/2),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.sub_samples)
+                np.array(self.width), self.sub_samples)
         return args
 
     def finalize_image(self,image):
@@ -1607,8 +1607,8 @@
                     this_point = (self.center + width/2. * off1 * north_vector
                                          + width/2. * off2 * east_vector
                                          + width/2. * off3 * normal_vector)
-                    na.minimum(mi, this_point, mi)
-                    na.maximum(ma, this_point, ma)
+                    np.minimum(mi, this_point, mi)
+                    np.maximum(ma, this_point, ma)
         # Now we have a bounding box.
         grids = pf.h.region(self.center, mi, ma)._grids
 
@@ -1630,7 +1630,7 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.pf.field_info[self.field].take_log:
-            im = na.log10(image)
+            im = np.log10(image)
         else:
             im = image
         if self.comm.rank is 0 and fn is not None:
@@ -1722,7 +1722,7 @@
 
     >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
                       0.2, N, "Temperature", "Density")
-    >>> write_image(na.log10(image), "offaxis.png")
+    >>> write_image(np.log10(image), "offaxis.png")
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -24,7 +24,7 @@
 """
 
 import random
-import numpy as na
+import numpy as np
 from .create_spline import create_spline
 
 class Keyframes(object):
@@ -67,12 +67,12 @@
         Examples
         --------
 
-        >>> import numpy as na
+        >>> import numpy as np
         >>> import matplotlib.pyplot as plt
         >>> from yt.visualization.volume_rendering.camera_path import *
 
         # Make a camera path from 10 random (x,y,z) keyframes
-        >>> data = na.random.random.((10,3))
+        >>> data = np.random.random.((10,3))
         >>> kf = Keyframes(data[:,0], data[:,1], data[:,2])
         >>> path = kf.create_path(250, shortest_path=False)
 
@@ -93,7 +93,7 @@
             print "Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz)
             sys.exit()
         self.nframes = Nx
-        self.pos = na.zeros((Nx,3))
+        self.pos = np.zeros((Nx,3))
         self.pos[:,0] = x
         self.pos[:,1] = y
         if z != None:
@@ -103,7 +103,7 @@
         self.north_vectors = north_vectors
         self.up_vectors = up_vectors
         if times == None:
-            self.times = na.arange(self.nframes)
+            self.times = np.arange(self.nframes)
         else:
             self.times = times
         self.cartesian_matrix()
@@ -131,7 +131,7 @@
         """
         # randomize tour
         self.tour = range(self.nframes)
-        na.random.shuffle(self.tour)
+        np.random.shuffle(self.tour)
         if fixed_start:
             first = self.tour.index(0)
             self.tour[0], self.tour[first] = self.tour[first], self.tour[0]
@@ -191,17 +191,17 @@
         Create a distance matrix for the city coords that uses
         straight line distance
         """
-        self.dist_matrix = na.zeros((self.nframes, self.nframes))
-        xmat = na.zeros((self.nframes, self.nframes))
+        self.dist_matrix = np.zeros((self.nframes, self.nframes))
+        xmat = np.zeros((self.nframes, self.nframes))
         xmat[:,:] = self.pos[:,0]
         dx = xmat - xmat.T
-        ymat = na.zeros((self.nframes, self.nframes))
+        ymat = np.zeros((self.nframes, self.nframes))
         ymat[:,:] = self.pos[:,1]
         dy = ymat - ymat.T
-        zmat = na.zeros((self.nframes, self.nframes))
+        zmat = np.zeros((self.nframes, self.nframes))
         zmat[:,:] = self.pos[:,2]
         dz = zmat - zmat.T
-        self.dist_matrix = na.sqrt(dx*dx + dy*dy + dz*dz)
+        self.dist_matrix = np.sqrt(dx*dx + dy*dy + dz*dz)
 
     def tour_length(self, tour):
         r"""
@@ -227,7 +227,7 @@
         if next > prev:
             return 1.0
         else:
-            return na.exp( -abs(next-prev) / temperature )
+            return np.exp( -abs(next-prev) / temperature )
 
     def get_shortest_path(self):
         r"""Determine shortest path between all keyframes.
@@ -294,14 +294,14 @@
             path.  Also saved to self.path.
         """
         self.npoints = npoints
-        self.path = {"time": na.zeros(npoints),
-                     "position": na.zeros((npoints, 3)),
-                     "north_vectors": na.zeros((npoints,3)),
-                     "up_vectors": na.zeros((npoints,3))}
+        self.path = {"time": np.zeros(npoints),
+                     "position": np.zeros((npoints, 3)),
+                     "north_vectors": np.zeros((npoints,3)),
+                     "up_vectors": np.zeros((npoints,3))}
         if shortest_path:
             self.get_shortest_path()
         if path_time == None:
-            path_time = na.linspace(0, self.nframes, npoints)
+            path_time = np.linspace(0, self.nframes, npoints)
         self.path["time"] = path_time
         for dim in range(3):
             self.path["position"][:,dim] = create_spline(self.times, self.pos[:,dim],


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/create_spline.py
--- a/yt/visualization/volume_rendering/create_spline.py
+++ b/yt/visualization/volume_rendering/create_spline.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def create_spline(old_x, old_y, new_x, tension=0.5, sorted=False):
     """
@@ -45,18 +45,18 @@
     """
     ndata = len(old_x)
     N = len(new_x)
-    result = na.zeros(N)
+    result = np.zeros(N)
     if not sorted:
-        isort = na.argsort(old_x)
+        isort = np.argsort(old_x)
         old_x = old_x[isort]
         old_y = old_y[isort]
     # Floor/ceiling of values outside of the original data
-    new_x = na.minimum(new_x, old_x[-1])
-    new_x = na.maximum(new_x, old_x[0])
-    ind = na.searchsorted(old_x, new_x)
-    im2 = na.maximum(ind-2, 0)
-    im1 = na.maximum(ind-1, 0)
-    ip1 = na.minimum(ind+1, ndata-1)
+    new_x = np.minimum(new_x, old_x[-1])
+    new_x = np.maximum(new_x, old_x[0])
+    ind = np.searchsorted(old_x, new_x)
+    im2 = np.maximum(ind-2, 0)
+    im1 = np.maximum(ind-1, 0)
+    ip1 = np.minimum(ind+1, ndata-1)
     for i in range(N):
         if ind[i] != im1[i]:
             u = (new_x[i] - old_x[im1[i]]) / (old_x[ind[i]] - old_x[im1[i]])


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 import h5py
 
@@ -63,10 +63,10 @@
                    len(self.bricks), back_point, front_point)
         if self.bricks is None: self.initialize_source()
         vec = front_point - back_point
-        dist = na.minimum(
-             na.sum((self.brick_left_edges - back_point) * vec, axis=1),
-             na.sum((self.brick_right_edges - back_point) * vec, axis=1))
-        ind = na.argsort(dist)
+        dist = np.minimum(
+             np.sum((self.brick_left_edges - back_point) * vec, axis=1),
+             np.sum((self.brick_right_edges - back_point) * vec, axis=1))
+        ind = np.argsort(dist)
         for b in self.bricks[ind]:
             #print b.LeftEdge, b.RightEdge
             yield b
@@ -79,7 +79,7 @@
         for field, log_field in zip(self.fields, self.log_fields):
             vcd = grid.get_vertex_centered_data(field, no_ghost = self.no_ghost)
             vcd = vcd.astype("float64")
-            if log_field: vcd = na.log10(vcd)
+            if log_field: vcd = np.log10(vcd)
             vcds.append(vcd)
 
         GF = GridFaces(grid.Children + [grid])
@@ -121,11 +121,11 @@
         # intersection, we only need to do the left edge & right edge.
         #
         # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.bricks = na.empty(len(bricks), dtype='object')
+        self.brick_left_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_right_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_parents = np.zeros( NB, dtype='int64')
+        self.brick_dimensions = np.zeros( (NB, 3), dtype='int64')
+        self.bricks = np.empty(len(bricks), dtype='object')
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
             self.brick_right_edges[i,:] = b.RightEdge
@@ -143,12 +143,12 @@
             for j in [-1, 1]:
                 for k in [-1, 1]:
                     for b in self.bricks:
-                        BB = na.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
-                        LE, RE = na.min(BB, axis=0), na.max(BB, axis=0)
+                        BB = np.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
+                        LE, RE = np.min(BB, axis=0), np.max(BB, axis=0)
                         nb.append(
                             PartitionedGrid(b.parent_grid_id, len(b.my_data), 
                                 [md[::i,::j,::k].copy("C") for md in b.my_data],
-                                LE, RE, na.array(b.my_data[0].shape) - 1))
+                                LE, RE, np.array(b.my_data[0].shape) - 1))
         # Replace old bricks
         self.initialize_bricks(nb)
 
@@ -183,7 +183,7 @@
                                 self.brick_right_edges[i,:],
                                 self.brick_dimensions[i,:],
                                 ))
-        self.bricks = na.array(bricks, dtype='object')
+        self.bricks = np.array(bricks, dtype='object')
         f.close()
 
     def reset_cast(self):
@@ -194,10 +194,10 @@
     def __init__(self, data_array):
         self.bricks = [PartitionedGrid(-1, 1, 
                        [data_array.astype("float64")],
-                       na.zeros(3, dtype='float64'),
-                       na.ones(3, dtype='float64'),
-                       na.array(data_array.shape, dtype='int64')-1)]
-        self.brick_dimensions = na.ones((1, 3), dtype='int64')*data_array.shape
+                       np.zeros(3, dtype='float64'),
+                       np.ones(3, dtype='float64'),
+                       np.array(data_array.shape, dtype='int64')-1)]
+        self.brick_dimensions = np.ones((1, 3), dtype='int64')*data_array.shape
 
     def initialize_source(self):
         pass
@@ -221,24 +221,24 @@
     def __getitem__(self, item):
         return self.faces[item]
 
-def export_partitioned_grids(grid_list, fn, int_type=na.int64, float_type=na.float64):
+def export_partitioned_grids(grid_list, fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "w")
     pbar = get_pbar("Writing Grids", len(grid_list))
     nelem = sum((grid.my_data.size for grid in grid_list))
     ngrids = len(grid_list)
     group = f.create_group("/PGrids")
-    left_edge = na.concatenate([[grid.LeftEdge,] for grid in grid_list])
+    left_edge = np.concatenate([[grid.LeftEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/LeftEdges", data=left_edge, dtype=float_type); del left_edge
-    right_edge = na.concatenate([[grid.RightEdge,] for grid in grid_list])
+    right_edge = np.concatenate([[grid.RightEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/RightEdges", data=right_edge, dtype=float_type); del right_edge
-    dims = na.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
+    dims = np.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
     f.create_dataset("/PGrids/Dims", data=dims, dtype=int_type); del dims
-    data = na.concatenate([grid.my_data.ravel() for grid in grid_list])
+    data = np.concatenate([grid.my_data.ravel() for grid in grid_list])
     f.create_dataset("/PGrids/Data", data=data, dtype=float_type); del data
     f.close()
     pbar.finish()
 
-def import_partitioned_grids(fn, int_type=na.int64, float_type=na.float64):
+def import_partitioned_grids(fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "r")
     n_groups = len(f)
     grid_list = []
@@ -258,4 +258,4 @@
         pbar.update(i)
     pbar.finish()
     f.close()
-    return na.array(grid_list, dtype='object')
+    return np.array(grid_list, dtype='object')


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -25,7 +25,7 @@
 import h5py
 try: import pyfits
 except: pass
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -67,7 +67,7 @@
         f.close()
     else:
         print 'No support for fits import.'
-    return na.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
+    return np.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
 
 def plot_channel(image, name, cmap='gist_heat', log=True, dex=3, zero_factor=1.0e-10, 
                  label=None, label_color='w', label_size='large'):
@@ -84,7 +84,7 @@
     import matplotlib
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     ma = image[image>0.0].max()
     image[image==0.0] = ma*zero_factor
     if log:
@@ -113,7 +113,7 @@
     """
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     if image.shape[2] >= 4:
         image = image[:,:,:3]
     pylab.clf()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -261,7 +261,7 @@
         tex_coord.Append((t1,t0,t1)); ver_coord.Append((x1, y0, z1)) # 7
         
         # Store quads
-        self._quads[tex_id] = (tex_coord, ver_coord, na.array(indices,dtype=na.uint8))
+        self._quads[tex_id] = (tex_coord, ver_coord, np.array(indices,dtype=np.uint8))
 
 def visvis_plot(vp):
     """
@@ -280,10 +280,10 @@
     ax = vv.gca()
 
     for i,g in enumerate(gs):
-        ss = ((g.RightEdge - g.LeftEdge) / (na.array(g.my_data[0].shape)-1)).tolist()
+        ss = ((g.RightEdge - g.LeftEdge) / (np.array(g.my_data[0].shape)-1)).tolist()
         origin = g.LeftEdge.astype("float32").tolist()
         dd = (g.my_data[0].astype("float32") - mi)/(ma - mi)
-        dd = na.clip(dd, 0.0, 1.0)
+        dd = np.clip(dd, 0.0, 1.0)
         print ss
         texes.append(vv.Aarray(dd, origin = origin, sampling = ss))
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from matplotlib.cm import get_cmap
 
 from yt.funcs import *
@@ -59,10 +59,10 @@
         self.pass_through = 0
         self.nbins = nbins
         self.x_bounds = x_bounds
-        self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
-        self.y = na.zeros(nbins, dtype='float64')
+        self.x = np.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
+        self.y = np.zeros(nbins, dtype='float64')
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -88,8 +88,8 @@
         >>> tf = TransferFunction( (-10.0, -5.0) )
         >>> tf.add_gaussian(-9.0, 0.01, 1.0)
         """
-        vals = height * na.exp(-(self.x - location)**2.0/width)
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        vals = height * np.exp(-(self.x - location)**2.0/width)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_line(self, start, stop):
         r"""Add a line between two points to the transmission function.
@@ -122,7 +122,7 @@
         # not satisfy our bounding box arguments
         vals = slope * (self.x - x0) + y0
         vals[~((self.x >= x0) & (self.x <= x1))] = 0.0
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.
@@ -154,12 +154,12 @@
         >>> tf.add_gaussian(-7.0, 0.01, 1.0)
         >>> tf.add_step(-8.0, -6.0, 0.5)
         """
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         vals[(self.x >= start) & (self.x <= stop)] = value
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_filtered_planck(self, wavelength, trans):
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         nu = clight/(wavelength*1e-8)
         nu = nu[::-1]
 
@@ -167,15 +167,15 @@
             T = 10**logT
             # Black body at this nu, T
             Bnu = ((2.0 * hcgs * nu**3) / clight**2.0) / \
-                    (na.exp(hcgs * nu / (kboltz * T)) - 1.0)
+                    (np.exp(hcgs * nu / (kboltz * T)) - 1.0)
             # transmission
             f = Bnu * trans[::-1]
             # integrate transmission over nu
-            vals[i] = na.trapz(f,nu)
+            vals[i] = np.trapz(f,nu)
 
         # normalize by total transmission over filter
-        self.y = vals/trans.sum() #/na.trapz(trans[::-1],nu)
-        #self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = vals/trans.sum() #/np.trapz(trans[::-1],nu)
+        #self.y = np.clip(np.maximum(vals, self.y), 0.0, 1.0)
 
     def plot(self, filename):
         r"""Save an image file of the transfer function.
@@ -245,7 +245,7 @@
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):
@@ -459,20 +459,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -500,20 +500,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -574,7 +574,7 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
-        tomap = na.linspace(0.,1.,num=rel1-rel0)
+        tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)*scale
         if scale_func is None:
@@ -640,17 +640,17 @@
             if ma is None: ma = col_bounds[1] - dist/(10.0*N)
         if w is None: w = 0.001 * (ma-mi)/N
         if alpha is None and self.grey_opacity:
-            alpha = na.ones(N, dtype="float64")
+            alpha = np.ones(N, dtype="float64")
         elif alpha is None and not self.grey_opacity:
-            alpha = na.logspace(-3, 0, N)
-        for v, a in zip(na.mgrid[mi:ma:N*1j], alpha):
+            alpha = np.logspace(-3, 0, N)
+        for v, a in zip(np.mgrid[mi:ma:N*1j], alpha):
             self.sample_colormap(v, w, a, colormap=colormap, col_bounds=col_bounds)
 
     def get_colormap_image(self, height, width):
-        image = na.zeros((height, width, 3), dtype='uint8')
-        hvals = na.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
+        image = np.zeros((height, width, 3), dtype='uint8')
+        hvals = np.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
         for i,f in enumerate(self.funcs[:3]):
-            vals = na.interp(hvals, f.x, f.y)
+            vals = np.interp(hvals, f.x, f.y)
             image[:,:,i] = (vals[:,None] * 255).astype('uint8')
         image = image[::-1,:,:]
         return image
@@ -736,7 +736,7 @@
         self._normalize()
 
     def _normalize(self):
-        fmax  = na.array([f.y for f in self.tables[:3]])
+        fmax  = np.array([f.y for f in self.tables[:3]])
         normal = fmax.max(axis=0)
         for f in self.tables[:3]:
             f.y = f.y/normal



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d2daf9466108/
changeset:   d2daf9466108
branch:      yt
user:        ngoldbaum
date:        2012-09-20 20:06:20
summary:     Don't need to modify grid_patch.py after all, so long as the grid edges are set correctly in the frontend everything should work.
affected #:  1 file

diff -r 28ca834d247d54c73293472ed55c010f0a7fbe05 -r d2daf94661086662a9efe6139fe88557b71bbff3 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,8 +210,6 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
-        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0afe4bd46a1a/
changeset:   0afe4bd46a1a
branch:      yt
user:        MatthewTurk
date:        2012-09-20 20:15:49
summary:     Merged in ngoldbaum/yt-cleancopy (pull request #274)
affected #:  3 files



diff -r b30f700ac72bcaa6d1e6145499d5e8a13c747e06 -r 0afe4bd46a1acc7653831364d4f541eadb45d530 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -148,8 +148,8 @@
 
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = np.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)





https://bitbucket.org/yt_analysis/yt-3.0/changeset/664d83ff5ace/
changeset:   664d83ff5ace
branch:      yt
user:        xarthisius
date:        2012-09-20 20:18:15
summary:     [gdf] calculate siblings and child_mask properly. Thanks to Matt for all the work
affected #:  2 files

diff -r 0afe4bd46a1acc7653831364d4f541eadb45d530 -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -364,8 +364,10 @@
         self._child_index_mask = None
 
     #@time_execution
-    def __fill_child_mask(self, child, mask, tofill):
+    def __fill_child_mask(self, child, mask, tofill, dlevel = 1):
         rf = self.pf.refine_by
+        if dlevel != 1:
+            rf = rf**dlevel
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
         startIndex = np.maximum(0, cgi / rf - gi)
         endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
@@ -386,7 +388,7 @@
             self.__fill_child_mask(child, self._child_mask, 0)
         if self.OverlappingSiblings is not None:
             for sibling in self.OverlappingSiblings:
-                self.__fill_child_mask(sibling, self._child_mask, 0)
+                self.__fill_child_mask(sibling, self._child_mask, 0, 0)
         
         self._child_indices = (self._child_mask==0) # bool, possibly redundant
 


diff -r 0afe4bd46a1acc7653831364d4f541eadb45d530 -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -37,6 +37,8 @@
            AMRHierarchy
 from yt.data_objects.static_output import \
            StaticOutput
+from yt.utilities.lib import \
+    get_box_grids_level
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -133,14 +135,25 @@
         del levels, glis, gdims
 
     def _populate_grid_objects(self):
-        for g in self.grids:
+        mask = np.empty(self.grids.size, dtype='int32')
+        for gi, g in enumerate(self.grids):
             g._prepare_grid()
             g._setup_dx()
 
-        for g in self.grids:
+        for gi, g in enumerate(self.grids):
             g.Children = self._get_grid_children(g)
             for g1 in g.Children:
                 g1.Parent.append(g)
+            get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                self.grid_levels[gi],
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            m = mask.astype("bool")
+            m[gi] = False
+            siblings = self.grids[gi:][m[gi:]]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
         self.max_level = self.grid_levels.max()
 
     def _setup_derived_fields(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/dc65a4be94ad/
changeset:   dc65a4be94ad
branch:      yt
user:        MatthewTurk
date:        2012-09-25 20:02:02
summary:     Adding a skeleton of a frontend.
affected #:  8 files

diff -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 -r dc65a4be94adab993c0ad6a8d4e9daa638d7e801 yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 -r dc65a4be94adab993c0ad6a8d4e9daa638d7e801 yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      SkeletonGrid, \
+      SkeletonHierarchy, \
+      SkeletonStaticOutput
+
+from .fields import \
+      SkeletonFieldInfo, \
+      add_flash_field
+
+from .io import \
+      IOHandlerSkeleton


diff -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 -r dc65a4be94adab993c0ad6a8d4e9daa638d7e801 yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.hierarchy import \
+    AMRHierarchy
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+    grid = SkeletonGrid
+    float_type = np.float64
+    
+    def __init__(self, pf, data_style='skeleton'):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        # This needs to set a self.field_list that contains all the available,
+        # on-disk fields.
+        pass
+    
+    def _count_grids(self):
+        # This needs to set self.num_grids
+        pass
+        
+    def _parse_hierarchy(self):
+        # This needs to fill the following arrays, where N is self.num_grids:
+        #   self.grid_left_edge         (N, 3) <= float64
+        #   self.grid_right_edge        (N, 3) <= float64
+        #   self.grid_dimensions        (N, 3) <= int
+        #   self.grid_particle_count    (N, 1) <= int
+        #   self.grid_levels            (N, 1) <= int
+        #   self.grids                  (N, 1) <= grid objects
+        #   
+        pass
+                        
+    def _populate_grid_objects(self):
+        # For each grid, this must call:
+        #   grid._prepare_grid()
+        #   grid._setup_dx()
+        # This must also set:
+        #   grid.Children <= list of child grids
+        #   grid.Parent   <= parent grid
+        # This is handled by the frontend because often the children must be
+        # identified.
+        pass
+
+class SkeletonStaticOutput(StaticOutput):
+    _hierarchy_class = SkeletonHierarchy
+    _fieldinfo_fallback = SkeletonFieldInfo
+    _fieldinfo_known = KnownSkeletonFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='skeleton',
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+    def _set_units(self):
+        # This needs to set up the dictionaries that convert from code units to
+        # CGS.  The needed items are listed in the second entry:
+        #   self.time_units         <= sec_conversion
+        #   self.conversion_factors <= mpc_conversion
+        #   self.units              <= On-disk fields
+        pass
+
+    def _parse_parameter_file(self):
+        # This needs to set up the following items:
+        #
+        #   self.unique_identifier
+        #   self.parameters             <= full of code-specific items of use
+        #   self.domain_left_edge       <= array of float64
+        #   self.domain_right_edge      <= array of float64
+        #   self.dimensionality         <= int
+        #   self.domain_dimensions      <= array of int64
+        #   self.current_time           <= simulation time in code units
+        #
+        # We also set up cosmological information.  Set these to zero if
+        # non-cosmological.
+        #
+        #   self.cosmological_simulation    <= int, 0 or 1
+        #   self.current_redshift           <= float
+        #   self.omega_lambda               <= float
+        #   self.omega_matter               <= float
+        #   self.hubble_constant            <= float
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # This accepts a filename or a set of arguments and returns True or
+        # False depending on if the file is of the type requested.
+        return False
+
+




diff -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 -r dc65a4be94adab993c0ad6a8d4e9daa638d7e801 yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+    kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt.  This
+# construct shows how to do that.  Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Temperature": "temp",
+                    "Pressure" : "pres", 
+                    "Grav_Potential" : "gpot",
+                    "particle_position_x" : "particle_posx",
+                    "particle_position_y" : "particle_posy",
+                    "particle_position_z" : "particle_posz",
+                    "particle_velocity_x" : "particle_velx",
+                    "particle_velocity_y" : "particle_vely",
+                    "particle_velocity_z" : "particle_velz",
+                    "particle_index" : "particle_tag",
+                    "Electron_Fraction" : "elec",
+                    "HI_Fraction" : "h   ",
+                    "HD_Fraction" : "hd  ",
+                    "HeI_Fraction": "hel ",
+                    "HeII_Fraction": "hep ",
+                    "HeIII_Fraction": "hepp",
+                    "HM_Fraction": "hmin",
+                    "HII_Fraction": "hp  ",
+                    "H2I_Fraction": "htwo",
+                    "H2II_Fraction": "htwp",
+                    "DI_Fraction": "deut",
+                    "DII_Fraction": "dplu",
+                    "ParticleMass": "particle_mass",
+                    "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+    if v not in KnownSkeletonFields:
+        pfield = v.startswith("particle")
+        add_skeleton_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownSkeletonFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownSkeletonFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")


diff -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 -r dc65a4be94adab993c0ad6a8d4e9daa638d7e801 yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "skeleton"
+
+    def _read_data_set(self, grid, field):
+        # This must return the array, of size/shape grid.ActiveDimensions, that
+        # corresponds to 'field'.
+        pass
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        # If this is not implemented, the IO handler will just slice a
+        # _read_data_set item.
+        pass




diff -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 -r dc65a4be94adab993c0ad6a8d4e9daa638d7e801 yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('skeleton', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ae3f6a9e07a1/
changeset:   ae3f6a9e07a1
branch:      yt
user:        ngoldbaum
date:        2012-09-21 21:04:07
summary:     Fixing dz for 2D AMR data, the dz of the child should be the same as the parent in 2D.
affected #:  1 file

diff -r 664d83ff5ace2571cd77516ec3c0cf5fb35da771 -r ae3f6a9e07a18644470391eaa684434ffc14bb8a yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,6 +210,8 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ab03f817518b/
changeset:   ab03f817518b
branch:      yt
user:        xarthisius
date:        2012-09-25 16:09:04
summary:     [clump_handling] don't try to pass instructions to non existing children
affected #:  1 file

diff -r ae3f6a9e07a18644470391eaa684434ffc14bb8a -r ab03f817518bb824bc899f88f59bdeb8d4292ddb yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/bc8ce6c86eb2/
changeset:   bc8ce6c86eb2
branch:      yt
user:        MatthewTurk
date:        2012-09-25 20:02:11
summary:     Merge
affected #:  2 files

diff -r dc65a4be94adab993c0ad6a8d4e9daa638d7e801 -r bc8ce6c86eb2f1be8679b8a3a7bf582100083115 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 


diff -r dc65a4be94adab993c0ad6a8d4e9daa638d7e801 -r bc8ce6c86eb2f1be8679b8a3a7bf582100083115 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,6 +210,8 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d3f1fbececb6/
changeset:   d3f1fbececb6
branch:      yt
user:        scopatz
date:        2012-09-25 22:26:05
summary:     Added fix for #428
affected #:  1 file

diff -r ae3f6a9e07a18644470391eaa684434ffc14bb8a -r d3f1fbececb63a7db41d46f496791ba41bb88e32 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -853,11 +853,15 @@
         return names
 
     def _send_zmq(self):
-        from IPython.zmq.pylab.backend_inline import \
-                    send_figure
+        try:
+            # pre-IPython v0.14        
+            from IPython.zmq.pylab.backend_inline import send_figure as display
+        except ImportError:
+            # IPython v0.14+ 
+            from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
             canvas = FigureCanvasAgg(v.figure)
-            send_figure(v.figure)
+            display(v.figure)
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/dfe2b9787bc7/
changeset:   dfe2b9787bc7
branch:      yt
user:        scopatz
date:        2012-09-25 22:26:45
summary:     merged upstream changes.
affected #:  9 files

diff -r d3f1fbececb63a7db41d46f496791ba41bb88e32 -r dfe2b9787bc7b39b6a82bc904fee5d3967502c37 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 


diff -r d3f1fbececb63a7db41d46f496791ba41bb88e32 -r dfe2b9787bc7b39b6a82bc904fee5d3967502c37 yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r d3f1fbececb63a7db41d46f496791ba41bb88e32 -r dfe2b9787bc7b39b6a82bc904fee5d3967502c37 yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      SkeletonGrid, \
+      SkeletonHierarchy, \
+      SkeletonStaticOutput
+
+from .fields import \
+      SkeletonFieldInfo, \
+      add_flash_field
+
+from .io import \
+      IOHandlerSkeleton


diff -r d3f1fbececb63a7db41d46f496791ba41bb88e32 -r dfe2b9787bc7b39b6a82bc904fee5d3967502c37 yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.hierarchy import \
+    AMRHierarchy
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+    grid = SkeletonGrid
+    float_type = np.float64
+    
+    def __init__(self, pf, data_style='skeleton'):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        # This needs to set a self.field_list that contains all the available,
+        # on-disk fields.
+        pass
+    
+    def _count_grids(self):
+        # This needs to set self.num_grids
+        pass
+        
+    def _parse_hierarchy(self):
+        # This needs to fill the following arrays, where N is self.num_grids:
+        #   self.grid_left_edge         (N, 3) <= float64
+        #   self.grid_right_edge        (N, 3) <= float64
+        #   self.grid_dimensions        (N, 3) <= int
+        #   self.grid_particle_count    (N, 1) <= int
+        #   self.grid_levels            (N, 1) <= int
+        #   self.grids                  (N, 1) <= grid objects
+        #   
+        pass
+                        
+    def _populate_grid_objects(self):
+        # For each grid, this must call:
+        #   grid._prepare_grid()
+        #   grid._setup_dx()
+        # This must also set:
+        #   grid.Children <= list of child grids
+        #   grid.Parent   <= parent grid
+        # This is handled by the frontend because often the children must be
+        # identified.
+        pass
+
+class SkeletonStaticOutput(StaticOutput):
+    _hierarchy_class = SkeletonHierarchy
+    _fieldinfo_fallback = SkeletonFieldInfo
+    _fieldinfo_known = KnownSkeletonFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='skeleton',
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+    def _set_units(self):
+        # This needs to set up the dictionaries that convert from code units to
+        # CGS.  The needed items are listed in the second entry:
+        #   self.time_units         <= sec_conversion
+        #   self.conversion_factors <= mpc_conversion
+        #   self.units              <= On-disk fields
+        pass
+
+    def _parse_parameter_file(self):
+        # This needs to set up the following items:
+        #
+        #   self.unique_identifier
+        #   self.parameters             <= full of code-specific items of use
+        #   self.domain_left_edge       <= array of float64
+        #   self.domain_right_edge      <= array of float64
+        #   self.dimensionality         <= int
+        #   self.domain_dimensions      <= array of int64
+        #   self.current_time           <= simulation time in code units
+        #
+        # We also set up cosmological information.  Set these to zero if
+        # non-cosmological.
+        #
+        #   self.cosmological_simulation    <= int, 0 or 1
+        #   self.current_redshift           <= float
+        #   self.omega_lambda               <= float
+        #   self.omega_matter               <= float
+        #   self.hubble_constant            <= float
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # This accepts a filename or a set of arguments and returns True or
+        # False depending on if the file is of the type requested.
+        return False
+
+




diff -r d3f1fbececb63a7db41d46f496791ba41bb88e32 -r dfe2b9787bc7b39b6a82bc904fee5d3967502c37 yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+    kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt.  This
+# construct shows how to do that.  Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Temperature": "temp",
+                    "Pressure" : "pres", 
+                    "Grav_Potential" : "gpot",
+                    "particle_position_x" : "particle_posx",
+                    "particle_position_y" : "particle_posy",
+                    "particle_position_z" : "particle_posz",
+                    "particle_velocity_x" : "particle_velx",
+                    "particle_velocity_y" : "particle_vely",
+                    "particle_velocity_z" : "particle_velz",
+                    "particle_index" : "particle_tag",
+                    "Electron_Fraction" : "elec",
+                    "HI_Fraction" : "h   ",
+                    "HD_Fraction" : "hd  ",
+                    "HeI_Fraction": "hel ",
+                    "HeII_Fraction": "hep ",
+                    "HeIII_Fraction": "hepp",
+                    "HM_Fraction": "hmin",
+                    "HII_Fraction": "hp  ",
+                    "H2I_Fraction": "htwo",
+                    "H2II_Fraction": "htwp",
+                    "DI_Fraction": "deut",
+                    "DII_Fraction": "dplu",
+                    "ParticleMass": "particle_mass",
+                    "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+    if v not in KnownSkeletonFields:
+        pfield = v.startswith("particle")
+        add_skeleton_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownSkeletonFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownSkeletonFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")


diff -r d3f1fbececb63a7db41d46f496791ba41bb88e32 -r dfe2b9787bc7b39b6a82bc904fee5d3967502c37 yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "skeleton"
+
+    def _read_data_set(self, grid, field):
+        # This must return the array, of size/shape grid.ActiveDimensions, that
+        # corresponds to 'field'.
+        pass
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        # If this is not implemented, the IO handler will just slice a
+        # _read_data_set item.
+        pass




diff -r d3f1fbececb63a7db41d46f496791ba41bb88e32 -r dfe2b9787bc7b39b6a82bc904fee5d3967502c37 yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('skeleton', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2fcc03b82af1/
changeset:   2fcc03b82af1
branch:      yt
user:        MatthewTurk
date:        2012-09-26 16:45:02
summary:     Porting over the test_interpolators code from 3.0
affected #:  1 file

diff -r dfe2b9787bc7b39b6a82bc904fee5d3967502c37 -r 2fcc03b82af1282e1c410add0059c492d44a7e81 yt/utilities/tests/test_interpolators.py
--- /dev/null
+++ b/yt/utilities/tests/test_interpolators.py
@@ -0,0 +1,13 @@
+import numpy as np
+from numpy.testing import assert_array_equal
+import yt.utilities.linear_interpolators as lin
+
+def setup():
+    pass
+
+
+def test_linear_interpolator():
+    random_data = np.random.random(128)
+    x = {"Random":np.mgrid[0.0:1.0:128j]}
+    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "Random", True)
+    assert_array_equal(ufi(x), random_data)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/d71aa73bd6ac/
changeset:   d71aa73bd6ac
branch:      yt
user:        MatthewTurk
date:        2012-09-26 16:59:53
summary:     Adding bi and tri linear interpolator tests.
affected #:  1 file

diff -r 2fcc03b82af1282e1c410add0059c492d44a7e81 -r d71aa73bd6ac48fd11b21aa90656fb7ded769395 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -5,9 +5,24 @@
 def setup():
     pass
 
+def test_linear_interpolator_1d():
+    random_data = np.random.random(64)
+    fv = {'x': np.mgrid[0.0:1.0:64j]}
+    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
+    assert_array_equal(ufi(fv), random_data)
 
-def test_linear_interpolator():
-    random_data = np.random.random(128)
-    x = {"Random":np.mgrid[0.0:1.0:128j]}
-    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "Random", True)
-    assert_array_equal(ufi(x), random_data)
+def test_linear_interpolator_2d():
+    random_data = np.random.random((64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0), "xy", True)
+    assert_array_equal(bfi(fv), random_data)
+
+def test_linear_interpolator_3d():
+    random_data = np.random.random((64, 64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
+    assert_array_equal(tfi(fv), random_data)



https://bitbucket.org/yt_analysis/yt-3.0/changeset/182debc46fdb/
changeset:   182debc46fdb
branch:      yt
user:        MatthewTurk
date:        2012-09-26 17:04:33
summary:     Adding in Anthony's testing helper module
affected #:  1 file

diff -r d71aa73bd6ac48fd11b21aa90656fb7ded769395 -r 182debc46fdba2fe1aae830bfa136e7d4f68f27f yt/testing.py
--- /dev/null
+++ b/yt/testing.py
@@ -0,0 +1,127 @@
+"""Provides utility and helper functions for testing in yt.
+
+Author: Anthony Scpatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Anthony Scopatz.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+
+def amrspace(extent, levels=7, cells=8):
+    """Creates two numpy arrays representing the left and right bounds of 
+    an AMR grid as well as an array for the AMR level of each cell.
+
+    Parameters
+    ----------
+    extent : array-like
+        This a sequence of length 2*ndims that is the bounds of each dimension.
+        For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
+        A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
+    levels : int or sequence of ints, optional
+        This is the number of AMR refinement levels.  If given as a sequence (of
+        length ndims), then each dimension will be refined down to this level.
+        All values in this array must be the same or zero.  A zero valued dimension
+        indicates that this dim should not be refined.  Taking the 3D cylindrical
+        example above if we don't want refine theta but want r and z at 5 we would 
+        set levels=(5, 5, 0).
+    cells : int, optional
+        This is the number of cells per refinement level.
+
+    Returns
+    -------
+    left : float ndarray, shape=(npoints, ndims)
+        The left AMR grid points.
+    right : float ndarray, shape=(npoints, ndims)
+        The right AMR grid points.
+    level : int ndarray, shape=(npoints,)
+        The AMR level for each point.
+
+    Examples
+    --------
+    >>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
+    >>> print l
+    [[ 0.     1.     0.   ]
+     [ 0.25   1.     0.   ]
+     [ 0.     1.125  0.   ]
+     [ 0.25   1.125  0.   ]
+     [ 0.5    1.     0.   ]
+     [ 0.     1.25   0.   ]
+     [ 0.5    1.25   0.   ]
+     [ 1.     1.     0.   ]
+     [ 0.     1.5    0.   ]
+     [ 1.     1.5    0.   ]]
+
+    """
+    extent = np.asarray(extent, dtype='f8')
+    dextent = extent[1::2] - extent[::2]
+    ndims = len(dextent)
+
+    if isinstance(levels, int):
+        minlvl = maxlvl = levels
+        levels = np.array([levels]*ndims, dtype='int32')
+    else:
+        levels = np.asarray(levels, dtype='int32')
+        minlvl = levels.min()
+        maxlvl = levels.max()
+        if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
+            raise ValueError("all levels must have the same value or zero.")
+    dims_zero = (levels == 0)
+    dims_nonzero = ~dims_zero
+    ndims_nonzero = dims_nonzero.sum()
+
+    npoints = (cells**ndims_nonzero - 1)*maxlvl + 1
+    left = np.empty((npoints, ndims), dtype='float64')
+    right = np.empty((npoints, ndims), dtype='float64')
+    level = np.empty(npoints, dtype='int32')
+
+    # fill zero dims
+    left[:,dims_zero] = extent[::2][dims_zero]
+    right[:,dims_zero] = extent[1::2][dims_zero]
+
+    # fill non-zero dims
+    dcell = 1.0 / cells
+    left_slice =  tuple([slice(extent[2*n], extent[2*n+1], extent[2*n+1]) if \
+        dims_zero[n] else slice(0.0,1.0,dcell) for n in range(ndims)])
+    right_slice = tuple([slice(extent[2*n+1], extent[2*n], -extent[2*n+1]) if \
+        dims_zero[n] else slice(dcell,1.0+dcell,dcell) for n in range(ndims)])
+    left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
+    lng_zero = left_norm_grid[:,dims_zero]
+    lng_nonzero = left_norm_grid[:,dims_nonzero]
+
+    right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
+    rng_zero = right_norm_grid[:,dims_zero]
+    rng_nonzero = right_norm_grid[:,dims_nonzero]
+
+    level[0] = maxlvl
+    left[0,:] = extent[::2]
+    right[0,dims_zero] = extent[1::2][dims_zero]
+    right[0,dims_nonzero] = (dcell**maxlvl)*dextent[dims_nonzero] + extent[::2][dims_nonzero]
+    for i, lvl in enumerate(range(maxlvl, 0, -1)):
+        start = (cells**ndims_nonzero - 1)*i + 1
+        stop = (cells**ndims_nonzero - 1)*(i+1) + 1
+        dsize = dcell**(lvl-1) * dextent[dims_nonzero]
+        level[start:stop] = lvl
+        left[start:stop,dims_zero] = lng_zero
+        left[start:stop,dims_nonzero] = lng_nonzero*dsize + extent[::2][dims_nonzero]
+        right[start:stop,dims_zero] = rng_zero
+        right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
+
+    return left, right, level



https://bitbucket.org/yt_analysis/yt-3.0/changeset/db90f5fd1742/
changeset:   db90f5fd1742
branch:      yt
user:        MatthewTurk
date:        2012-09-26 17:20:57
summary:     Adding a fake_random_pf to yt/testing.py, and starting to add some refinement
criteria.
affected #:  4 files

diff -r 182debc46fdba2fe1aae830bfa136e7d4f68f27f -r db90f5fd17420a0d36e816b82b50695358de52d1 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -23,7 +23,8 @@
 """
 
 import numpy as np
-
+from yt.funcs import *
+from numpy.testing import assert_array_equal
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -125,3 +126,18 @@
         right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
 
     return left, right, level
+
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+    from yt.frontends.stream.api import load_uniform_grid
+    if not iterable(ndims):
+        ndims = [ndims, ndims, ndims]
+    else:
+        assert(len(ndims) == 3)
+    if negative:
+        offset = 0.5
+    else:
+        offset = 0.0
+    data = dict((field, (np.random.random(ndims) - offset) * peak_value)
+                 for field in fields)
+    ug = load_uniform_grid(data, ndims, 1.0)
+    return ug


diff -r 182debc46fdba2fe1aae830bfa136e7d4f68f27f -r db90f5fd17420a0d36e816b82b50695358de52d1 yt/utilities/flagging_methods.py
--- /dev/null
+++ b/yt/utilities/flagging_methods.py
@@ -0,0 +1,51 @@
+"""
+Utilities for flagging zones for refinement in a dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np # For modern purposes
+
+flagging_method_registry = {}
+
+def flag_cells(grid, methods):
+    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+    for method in methods:
+        flagged |= method(grid)
+    return flagged
+
+class FlaggingMethod(object):
+    _skip_add = False
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if hasattr(cls, "_type_name") and not cls._skip_add:
+                flagging_method_registry[cls._type_name] = cls
+
+class OverDensity(FlaggingMethod):
+    _type_name = "overdensity"
+    def __init__(self, over_density):
+        self.over_density = over_density
+
+    def __call__(self, pf, grid):
+        rho = grid["Density"] / (pf.refine_by**grid.Level)
+        return (rho > self.over_density)


diff -r 182debc46fdba2fe1aae830bfa136e7d4f68f27f -r db90f5fd17420a0d36e816b82b50695358de52d1 yt/utilities/tests/test_flagging_methods.py
--- /dev/null
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -0,0 +1,12 @@
+from yt.testing import *
+from yt.utilities.flagging_methods import flagging_method_registry
+
+def setup():
+    global pf
+    pf = fake_random_pf(64)
+    pf.h
+
+def test_over_density():
+    od_flag = flagging_method_registry["overdensity"](0.75) 
+    criterion = (pf.h.grids[0]["Density"] > 0.75)
+    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )


diff -r 182debc46fdba2fe1aae830bfa136e7d4f68f27f -r db90f5fd17420a0d36e816b82b50695358de52d1 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -1,5 +1,4 @@
-import numpy as np
-from numpy.testing import assert_array_equal
+from yt.testing import *
 import yt.utilities.linear_interpolators as lin
 
 def setup():



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0527d1fab20f/
changeset:   0527d1fab20f
branch:      yt
user:        Andrew Myers
date:        2012-09-20 07:45:50
summary:     reading in dds for each grid from the simulation data dump instead of calculating it
affected #:  5 files

diff -r 2120043a7851f77c41c67f5301640528cc8b5314 -r 0527d1fab20f743137c8173243ef26938a83f6ec yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -101,18 +101,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 2120043a7851f77c41c67f5301640528cc8b5314 -r 0527d1fab20f743137c8173243ef26938a83f6ec yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -89,17 +89,8 @@
         return self.start_index
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(AMRHierarchy):
@@ -176,11 +167,13 @@
         # 'Chombo_global'
         levels = f.keys()[1:]
         grids = []
+        self.dds_list = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])


diff -r 2120043a7851f77c41c67f5301640528cc8b5314 -r 0527d1fab20f743137c8173243ef26938a83f6ec yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -102,17 +102,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 2120043a7851f77c41c67f5301640528cc8b5314 -r 0527d1fab20f743137c8173243ef26938a83f6ec yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -100,18 +100,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz here.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE - LE) / self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 2120043a7851f77c41c67f5301640528cc8b5314 -r 0527d1fab20f743137c8173243ef26938a83f6ec yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -99,17 +99,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/a16fc24a1c04/
changeset:   a16fc24a1c04
branch:      yt
user:        MatthewTurk
date:        2012-09-26 17:34:17
summary:     Merged in atmyers/yt (pull request #275)
affected #:  5 files

diff -r db90f5fd17420a0d36e816b82b50695358de52d1 -r a16fc24a1c04fc9d88e0c238bd85b10275743097 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -101,18 +101,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r db90f5fd17420a0d36e816b82b50695358de52d1 -r a16fc24a1c04fc9d88e0c238bd85b10275743097 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -89,17 +89,8 @@
         return self.start_index
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(AMRHierarchy):
@@ -176,11 +167,13 @@
         # 'Chombo_global'
         levels = f.keys()[1:]
         grids = []
+        self.dds_list = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])


diff -r db90f5fd17420a0d36e816b82b50695358de52d1 -r a16fc24a1c04fc9d88e0c238bd85b10275743097 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -102,17 +102,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r db90f5fd17420a0d36e816b82b50695358de52d1 -r a16fc24a1c04fc9d88e0c238bd85b10275743097 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -100,18 +100,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz here.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE - LE) / self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r db90f5fd17420a0d36e816b82b50695358de52d1 -r a16fc24a1c04fc9d88e0c238bd85b10275743097 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -99,17 +99,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/6d7c8a1b1ab1/
changeset:   6d7c8a1b1ab1
branch:      yt
user:        MatthewTurk
date:        2012-09-26 19:25:34
summary:     Switching int for offset1 and offset2 in the read_and_seek function (Boxlib,
Nyx) to np.int64_t
affected #:  1 file

diff -r a16fc24a1c04fc9d88e0c238bd85b10275743097 -r 6d7c8a1b1ab123f6a8a71891fbcee2c4d1c6d1c9 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -53,8 +53,8 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def read_and_seek(char *filename, int offset1, int offset2,
-                  np.ndarray buffer, int bytes):
+def read_and_seek(char *filename, np.int64_t offset1,
+                  np.int64_t offset2, np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
     cdef char line[1024]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8fcee78bcc84/
changeset:   8fcee78bcc84
branch:      yt
user:        ngoldbaum
date:        2012-09-26 21:25:45
summary:     Adding a comment in orientation.py to clarify the possibly confusing way we've
handled north_vector.
affected #:  1 file

diff -r ae3f6a9e07a18644470391eaa684434ffc14bb8a -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -67,6 +67,8 @@
             t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
             east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            # self.north_vector must remain None otherwise rotations about a fixed axis will break.  
+            # The north_vector calculated here will still be included in self.unit_vectors.
             north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
@@ -82,7 +84,7 @@
         r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes related
-        to a an orientable object.
+        to an orientable object.
 
         Parameters
         ----------



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e6915de3e4eb/
changeset:   e6915de3e4eb
branch:      yt
user:        ngoldbaum
date:        2012-09-26 21:28:21
summary:     Merging.
affected #:  20 files

diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      SkeletonGrid, \
+      SkeletonHierarchy, \
+      SkeletonStaticOutput
+
+from .fields import \
+      SkeletonFieldInfo, \
+      add_flash_field
+
+from .io import \
+      IOHandlerSkeleton


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.hierarchy import \
+    AMRHierarchy
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+    grid = SkeletonGrid
+    float_type = np.float64
+    
+    def __init__(self, pf, data_style='skeleton'):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        # This needs to set a self.field_list that contains all the available,
+        # on-disk fields.
+        pass
+    
+    def _count_grids(self):
+        # This needs to set self.num_grids
+        pass
+        
+    def _parse_hierarchy(self):
+        # This needs to fill the following arrays, where N is self.num_grids:
+        #   self.grid_left_edge         (N, 3) <= float64
+        #   self.grid_right_edge        (N, 3) <= float64
+        #   self.grid_dimensions        (N, 3) <= int
+        #   self.grid_particle_count    (N, 1) <= int
+        #   self.grid_levels            (N, 1) <= int
+        #   self.grids                  (N, 1) <= grid objects
+        #   
+        pass
+                        
+    def _populate_grid_objects(self):
+        # For each grid, this must call:
+        #   grid._prepare_grid()
+        #   grid._setup_dx()
+        # This must also set:
+        #   grid.Children <= list of child grids
+        #   grid.Parent   <= parent grid
+        # This is handled by the frontend because often the children must be
+        # identified.
+        pass
+
+class SkeletonStaticOutput(StaticOutput):
+    _hierarchy_class = SkeletonHierarchy
+    _fieldinfo_fallback = SkeletonFieldInfo
+    _fieldinfo_known = KnownSkeletonFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='skeleton',
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+    def _set_units(self):
+        # This needs to set up the dictionaries that convert from code units to
+        # CGS.  The needed items are listed in the second entry:
+        #   self.time_units         <= sec_conversion
+        #   self.conversion_factors <= mpc_conversion
+        #   self.units              <= On-disk fields
+        pass
+
+    def _parse_parameter_file(self):
+        # This needs to set up the following items:
+        #
+        #   self.unique_identifier
+        #   self.parameters             <= full of code-specific items of use
+        #   self.domain_left_edge       <= array of float64
+        #   self.domain_right_edge      <= array of float64
+        #   self.dimensionality         <= int
+        #   self.domain_dimensions      <= array of int64
+        #   self.current_time           <= simulation time in code units
+        #
+        # We also set up cosmological information.  Set these to zero if
+        # non-cosmological.
+        #
+        #   self.cosmological_simulation    <= int, 0 or 1
+        #   self.current_redshift           <= float
+        #   self.omega_lambda               <= float
+        #   self.omega_matter               <= float
+        #   self.hubble_constant            <= float
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # This accepts a filename or a set of arguments and returns True or
+        # False depending on if the file is of the type requested.
+        return False
+
+




diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+    kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt.  This
+# construct shows how to do that.  Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Temperature": "temp",
+                    "Pressure" : "pres", 
+                    "Grav_Potential" : "gpot",
+                    "particle_position_x" : "particle_posx",
+                    "particle_position_y" : "particle_posy",
+                    "particle_position_z" : "particle_posz",
+                    "particle_velocity_x" : "particle_velx",
+                    "particle_velocity_y" : "particle_vely",
+                    "particle_velocity_z" : "particle_velz",
+                    "particle_index" : "particle_tag",
+                    "Electron_Fraction" : "elec",
+                    "HI_Fraction" : "h   ",
+                    "HD_Fraction" : "hd  ",
+                    "HeI_Fraction": "hel ",
+                    "HeII_Fraction": "hep ",
+                    "HeIII_Fraction": "hepp",
+                    "HM_Fraction": "hmin",
+                    "HII_Fraction": "hp  ",
+                    "H2I_Fraction": "htwo",
+                    "H2II_Fraction": "htwp",
+                    "DI_Fraction": "deut",
+                    "DII_Fraction": "dplu",
+                    "ParticleMass": "particle_mass",
+                    "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+    if v not in KnownSkeletonFields:
+        pfield = v.startswith("particle")
+        add_skeleton_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownSkeletonFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownSkeletonFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "skeleton"
+
+    def _read_data_set(self, grid, field):
+        # This must return the array, of size/shape grid.ActiveDimensions, that
+        # corresponds to 'field'.
+        pass
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        # If this is not implemented, the IO handler will just slice a
+        # _read_data_set item.
+        pass




diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('skeleton', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -101,18 +101,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -89,17 +89,8 @@
         return self.start_index
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(AMRHierarchy):
@@ -176,11 +167,13 @@
         # 'Chombo_global'
         levels = f.keys()[1:]
         grids = []
+        self.dds_list = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -102,17 +102,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -100,18 +100,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz here.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE - LE) / self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -99,17 +99,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/testing.py
--- /dev/null
+++ b/yt/testing.py
@@ -0,0 +1,143 @@
+"""Provides utility and helper functions for testing in yt.
+
+Author: Anthony Scpatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Anthony Scopatz.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from yt.funcs import *
+from numpy.testing import assert_array_equal
+
+def amrspace(extent, levels=7, cells=8):
+    """Creates two numpy arrays representing the left and right bounds of 
+    an AMR grid as well as an array for the AMR level of each cell.
+
+    Parameters
+    ----------
+    extent : array-like
+        This a sequence of length 2*ndims that is the bounds of each dimension.
+        For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
+        A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
+    levels : int or sequence of ints, optional
+        This is the number of AMR refinement levels.  If given as a sequence (of
+        length ndims), then each dimension will be refined down to this level.
+        All values in this array must be the same or zero.  A zero valued dimension
+        indicates that this dim should not be refined.  Taking the 3D cylindrical
+        example above if we don't want refine theta but want r and z at 5 we would 
+        set levels=(5, 5, 0).
+    cells : int, optional
+        This is the number of cells per refinement level.
+
+    Returns
+    -------
+    left : float ndarray, shape=(npoints, ndims)
+        The left AMR grid points.
+    right : float ndarray, shape=(npoints, ndims)
+        The right AMR grid points.
+    level : int ndarray, shape=(npoints,)
+        The AMR level for each point.
+
+    Examples
+    --------
+    >>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
+    >>> print l
+    [[ 0.     1.     0.   ]
+     [ 0.25   1.     0.   ]
+     [ 0.     1.125  0.   ]
+     [ 0.25   1.125  0.   ]
+     [ 0.5    1.     0.   ]
+     [ 0.     1.25   0.   ]
+     [ 0.5    1.25   0.   ]
+     [ 1.     1.     0.   ]
+     [ 0.     1.5    0.   ]
+     [ 1.     1.5    0.   ]]
+
+    """
+    extent = np.asarray(extent, dtype='f8')
+    dextent = extent[1::2] - extent[::2]
+    ndims = len(dextent)
+
+    if isinstance(levels, int):
+        minlvl = maxlvl = levels
+        levels = np.array([levels]*ndims, dtype='int32')
+    else:
+        levels = np.asarray(levels, dtype='int32')
+        minlvl = levels.min()
+        maxlvl = levels.max()
+        if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
+            raise ValueError("all levels must have the same value or zero.")
+    dims_zero = (levels == 0)
+    dims_nonzero = ~dims_zero
+    ndims_nonzero = dims_nonzero.sum()
+
+    npoints = (cells**ndims_nonzero - 1)*maxlvl + 1
+    left = np.empty((npoints, ndims), dtype='float64')
+    right = np.empty((npoints, ndims), dtype='float64')
+    level = np.empty(npoints, dtype='int32')
+
+    # fill zero dims
+    left[:,dims_zero] = extent[::2][dims_zero]
+    right[:,dims_zero] = extent[1::2][dims_zero]
+
+    # fill non-zero dims
+    dcell = 1.0 / cells
+    left_slice =  tuple([slice(extent[2*n], extent[2*n+1], extent[2*n+1]) if \
+        dims_zero[n] else slice(0.0,1.0,dcell) for n in range(ndims)])
+    right_slice = tuple([slice(extent[2*n+1], extent[2*n], -extent[2*n+1]) if \
+        dims_zero[n] else slice(dcell,1.0+dcell,dcell) for n in range(ndims)])
+    left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
+    lng_zero = left_norm_grid[:,dims_zero]
+    lng_nonzero = left_norm_grid[:,dims_nonzero]
+
+    right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
+    rng_zero = right_norm_grid[:,dims_zero]
+    rng_nonzero = right_norm_grid[:,dims_nonzero]
+
+    level[0] = maxlvl
+    left[0,:] = extent[::2]
+    right[0,dims_zero] = extent[1::2][dims_zero]
+    right[0,dims_nonzero] = (dcell**maxlvl)*dextent[dims_nonzero] + extent[::2][dims_nonzero]
+    for i, lvl in enumerate(range(maxlvl, 0, -1)):
+        start = (cells**ndims_nonzero - 1)*i + 1
+        stop = (cells**ndims_nonzero - 1)*(i+1) + 1
+        dsize = dcell**(lvl-1) * dextent[dims_nonzero]
+        level[start:stop] = lvl
+        left[start:stop,dims_zero] = lng_zero
+        left[start:stop,dims_nonzero] = lng_nonzero*dsize + extent[::2][dims_nonzero]
+        right[start:stop,dims_zero] = rng_zero
+        right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
+
+    return left, right, level
+
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+    from yt.frontends.stream.api import load_uniform_grid
+    if not iterable(ndims):
+        ndims = [ndims, ndims, ndims]
+    else:
+        assert(len(ndims) == 3)
+    if negative:
+        offset = 0.5
+    else:
+        offset = 0.0
+    data = dict((field, (np.random.random(ndims) - offset) * peak_value)
+                 for field in fields)
+    ug = load_uniform_grid(data, ndims, 1.0)
+    return ug


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/utilities/flagging_methods.py
--- /dev/null
+++ b/yt/utilities/flagging_methods.py
@@ -0,0 +1,51 @@
+"""
+Utilities for flagging zones for refinement in a dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np # For modern purposes
+
+flagging_method_registry = {}
+
+def flag_cells(grid, methods):
+    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+    for method in methods:
+        flagged |= method(grid)
+    return flagged
+
+class FlaggingMethod(object):
+    _skip_add = False
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if hasattr(cls, "_type_name") and not cls._skip_add:
+                flagging_method_registry[cls._type_name] = cls
+
+class OverDensity(FlaggingMethod):
+    _type_name = "overdensity"
+    def __init__(self, over_density):
+        self.over_density = over_density
+
+    def __call__(self, pf, grid):
+        rho = grid["Density"] / (pf.refine_by**grid.Level)
+        return (rho > self.over_density)


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -53,8 +53,8 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def read_and_seek(char *filename, int offset1, int offset2,
-                  np.ndarray buffer, int bytes):
+def read_and_seek(char *filename, np.int64_t offset1,
+                  np.int64_t offset2, np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
     cdef char line[1024]


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/utilities/tests/test_flagging_methods.py
--- /dev/null
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -0,0 +1,12 @@
+from yt.testing import *
+from yt.utilities.flagging_methods import flagging_method_registry
+
+def setup():
+    global pf
+    pf = fake_random_pf(64)
+    pf.h
+
+def test_over_density():
+    od_flag = flagging_method_registry["overdensity"](0.75) 
+    criterion = (pf.h.grids[0]["Density"] > 0.75)
+    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/utilities/tests/test_interpolators.py
--- /dev/null
+++ b/yt/utilities/tests/test_interpolators.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+import yt.utilities.linear_interpolators as lin
+
+def setup():
+    pass
+
+def test_linear_interpolator_1d():
+    random_data = np.random.random(64)
+    fv = {'x': np.mgrid[0.0:1.0:64j]}
+    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
+    assert_array_equal(ufi(fv), random_data)
+
+def test_linear_interpolator_2d():
+    random_data = np.random.random((64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0), "xy", True)
+    assert_array_equal(bfi(fv), random_data)
+
+def test_linear_interpolator_3d():
+    random_data = np.random.random((64, 64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
+    assert_array_equal(tfi(fv), random_data)


diff -r 8fcee78bcc842c3dc21bf4eb796f1fd62bc97778 -r e6915de3e4eb9416758acf2203d281c71319b40b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -853,11 +853,15 @@
         return names
 
     def _send_zmq(self):
-        from IPython.zmq.pylab.backend_inline import \
-                    send_figure
+        try:
+            # pre-IPython v0.14        
+            from IPython.zmq.pylab.backend_inline import send_figure as display
+        except ImportError:
+            # IPython v0.14+ 
+            from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
             canvas = FigureCanvasAgg(v.figure)
-            send_figure(v.figure)
+            display(v.figure)
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.



https://bitbucket.org/yt_analysis/yt-3.0/changeset/5af7cb598179/
changeset:   5af7cb598179
branch:      yt
user:        ngoldbaum
date:        2012-09-26 21:47:04
summary:     Fixing the case when north_vector is not defined by the user.
affected #:  1 file

diff -r e6915de3e4eb9416758acf2203d281c71319b40b -r 5af7cb598179b93a2162b6aa8605d70cb2776b90 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -195,7 +195,7 @@
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
         self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
-        self.rotation_vector = self.orienter.north_vector
+        self.rotation_vector = self.orienter.unit_vectors[1]
         self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -282,7 +282,7 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.orienter.north_vector
+            north_vector = self.orienter.unit_vectors[1]
         if normal_vector is None:
             normal_vector = self.orienter.normal_vector
         self.orienter.switch_orientation(normal_vector = normal_vector,
@@ -587,7 +587,7 @@
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
-        north_vector = self.orienter.north_vector
+        north_vector = self.orienter.unit_vectors[1]
         self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2067e8120c89/
changeset:   2067e8120c89
branch:      yt
user:        Andrew Myers
date:        2012-09-27 01:43:22
summary:     a couple of fixes needed to get velocity callbacks to work with off-axis slices
affected #:  2 files

diff -r 0527d1fab20f743137c8173243ef26938a83f6ec -r 2067e8120c892e3edcdbdb363cc92a678dc2c722 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -650,8 +650,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)


diff -r 0527d1fab20f743137c8173243ef26938a83f6ec -r 2067e8120c892e3edcdbdb363cc92a678dc2c722 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -103,7 +103,10 @@
         self.pf = frb.pf
         self.xlim = viewer.xlim
         self.ylim = viewer.ylim
-        self._type_name = ''
+        if 'Cutting' in self.data.__class__.__name__:
+            self._type_name = "CuttingPlane"
+        else:
+            self._type_name = ''
 
 class FieldTransform(object):
     def __init__(self, name, func, locator):
@@ -179,7 +182,6 @@
     (normal,perp1,perp2) = ortho_find(normal)
     mat = np.transpose(np.column_stack((perp1,perp2,normal)))
     center = np.dot(mat,center)
-    width = width/pf.domain_width.min()
 
     bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
     



https://bitbucket.org/yt_analysis/yt-3.0/changeset/2359ab1ceea9/
changeset:   2359ab1ceea9
branch:      yt
user:        ngoldbaum
date:        2012-09-27 01:59:21
summary:     Merged in atmyers/yt (pull request #282)
affected #:  2 files

diff -r 5af7cb598179b93a2162b6aa8605d70cb2776b90 -r 2359ab1ceea99e0db1f47adcc45ae00f50eaed12 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -650,8 +650,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)


diff -r 5af7cb598179b93a2162b6aa8605d70cb2776b90 -r 2359ab1ceea99e0db1f47adcc45ae00f50eaed12 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -106,7 +106,10 @@
         self.pf = frb.pf
         self.xlim = viewer.xlim
         self.ylim = viewer.ylim
-        self._type_name = ''
+        if 'Cutting' in self.data.__class__.__name__:
+            self._type_name = "CuttingPlane"
+        else:
+            self._type_name = ''
 
 class FieldTransform(object):
     def __init__(self, name, func, locator):
@@ -182,7 +185,6 @@
     (normal,perp1,perp2) = ortho_find(normal)
     mat = np.transpose(np.column_stack((perp1,perp2,normal)))
     center = np.dot(mat,center)
-    width = width/pf.domain_width.min()
 
     bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
     



https://bitbucket.org/yt_analysis/yt-3.0/changeset/9ba1eef6cba4/
changeset:   9ba1eef6cba4
branch:      yt
user:        Andrew Myers
date:        2012-09-27 22:30:17
summary:     porting over the magnetic field callback for off-axis slices
affected #:  3 files

diff -r 2067e8120c892e3edcdbdb363cc92a678dc2c722 -r 9ba1eef6cba44db82444d12f9887d8ef5026ec18 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -938,6 +938,25 @@
           validators=[ValidateParameter("cp_%s_vec" % ax)
                       for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
 
+def _CuttingPlaneBx(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(x_vec, b_vec)
+add_field("CuttingPlaneBx", 
+          function=_CuttingPlaneBx,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+def _CuttingPlaneBy(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(y_vec, b_vec)
+add_field("CuttingPlaneBy", 
+          function=_CuttingPlaneBy,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+
 def _MeanMolecularWeight(field,data):
     return (data["Density"] / (mh *data["NumberDensity"]))
 add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")


diff -r 2067e8120c892e3edcdbdb363cc92a678dc2c722 -r 9ba1eef6cba44db82444d12f9887d8ef5026ec18 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -98,6 +98,21 @@
 add_field("Density",function=_Density, take_log=True,
           units=r'\rm{g}/\rm{cm^3}')
 
+def _Bx(field,data):
+    return data["X-magnfield"]
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B")
+
+def _By(field,data):
+    return data["Y-magnfield"]
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B")
+
+def _Bz(field,data):
+    return data["Z-magnfield"]
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B")
+
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +
             data["Y-magnfield"]**2 +


diff -r 2067e8120c892e3edcdbdb363cc92a678dc2c722 -r 9ba1eef6cba44db82444d12f9887d8ef5026ec18 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -146,7 +146,9 @@
     def __call__(self, plot):
         # Instantiation of these is cheap
         if plot._type_name == "CuttingPlane":
-            print "WARNING: Magnetic field on Cutting Plane Not implemented."
+            qcb = CuttingQuiverCallback("CuttingPlaneBx",
+                                        "CuttingPlaneBy",
+                                        self.factor)
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c6fd19e0f399/
changeset:   c6fd19e0f399
branch:      yt
user:        Andrew Myers
date:        2012-09-28 00:53:30
summary:     fixing the streamline callback to convert from pixel to data units
affected #:  1 file

diff -r 9ba1eef6cba44db82444d12f9887d8ef5026ec18 -r c6fd19e0f3998f41935fdabf2fe412621827033b yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -434,6 +434,9 @@
             iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
+        # scale into data units
+        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
+        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
         for i in range(self.data_size[0]):
             for j in range(self.data_size[1]):
                 plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],



https://bitbucket.org/yt_analysis/yt-3.0/changeset/80b349aff943/
changeset:   80b349aff943
branch:      yt
user:        ngoldbaum
date:        2012-09-28 03:11:16
summary:     Merged in atmyers/yt (pull request #283)
affected #:  3 files

diff -r 2359ab1ceea99e0db1f47adcc45ae00f50eaed12 -r 80b349aff94364fda92e7a5623d2f6a6955fe8a0 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -938,6 +938,25 @@
           validators=[ValidateParameter("cp_%s_vec" % ax)
                       for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
 
+def _CuttingPlaneBx(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(x_vec, b_vec)
+add_field("CuttingPlaneBx", 
+          function=_CuttingPlaneBx,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+def _CuttingPlaneBy(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(y_vec, b_vec)
+add_field("CuttingPlaneBy", 
+          function=_CuttingPlaneBy,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+
 def _MeanMolecularWeight(field,data):
     return (data["Density"] / (mh *data["NumberDensity"]))
 add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")


diff -r 2359ab1ceea99e0db1f47adcc45ae00f50eaed12 -r 80b349aff94364fda92e7a5623d2f6a6955fe8a0 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -98,6 +98,21 @@
 add_field("Density",function=_Density, take_log=True,
           units=r'\rm{g}/\rm{cm^3}')
 
+def _Bx(field,data):
+    return data["X-magnfield"]
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B")
+
+def _By(field,data):
+    return data["Y-magnfield"]
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B")
+
+def _Bz(field,data):
+    return data["Z-magnfield"]
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B")
+
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +
             data["Y-magnfield"]**2 +


diff -r 2359ab1ceea99e0db1f47adcc45ae00f50eaed12 -r 80b349aff94364fda92e7a5623d2f6a6955fe8a0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -146,7 +146,9 @@
     def __call__(self, plot):
         # Instantiation of these is cheap
         if plot._type_name == "CuttingPlane":
-            print "WARNING: Magnetic field on Cutting Plane Not implemented."
+            qcb = CuttingQuiverCallback("CuttingPlaneBx",
+                                        "CuttingPlaneBy",
+                                        self.factor)
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])
@@ -432,6 +434,9 @@
             iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
+        # scale into data units
+        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
+        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
         for i in range(self.data_size[0]):
             for j in range(self.data_size[1]):
                 plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],



https://bitbucket.org/yt_analysis/yt-3.0/changeset/7e4f7124641b/
changeset:   7e4f7124641b
branch:      yt
user:        xarthisius
date:        2012-10-01 11:36:00
summary:     [image_write:write_bitmap] non square matrices require swapaxes instead of transpose
affected #:  1 file

diff -r 80b349aff94364fda92e7a5623d2f6a6955fe8a0 -r 7e4f7124641b4852eb6666f052183053fcca7daf yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -152,8 +152,7 @@
         alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
         bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
-        for channel in range(bitmap_array.shape[2]):
-            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
+        bitmap_array = bitmap_array.swapaxes(0,1)
     if filename is not None:
         au.write_png(bitmap_array.copy(), filename)
     else:



https://bitbucket.org/yt_analysis/yt-3.0/changeset/3434ef07332a/
changeset:   3434ef07332a
branch:      yt
user:        samskillman
date:        2012-10-01 22:38:24
summary:     HEALpix was only working with the VolumerRenderSampler, and not the ProjectionSampler.
affected #:  1 file

diff -r e6915de3e4eb9416758acf2203d281c71319b40b -r 3434ef07332a3c54f5c2bf6a8b461694f47469e5 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -720,6 +720,9 @@
     ], dtype='float64')
 
 class HEALpixCamera(Camera):
+
+    _sampler_object = None 
+    
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
@@ -733,6 +736,12 @@
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
+
+        if isinstance(self.transfer_function, ProjectionTransferFunction):
+            self._sampler_object = ProjectionSampler
+        else:
+            self._sampler_object = VolumeRenderSampler
+
         if fields is None: fields = ["Density"]
         self.fields = fields
         self.sub_samples = sub_samples



https://bitbucket.org/yt_analysis/yt-3.0/changeset/e56c3f9dca02/
changeset:   e56c3f9dca02
branch:      yt
user:        jwise77
date:        2012-10-01 22:46:16
summary:     Merged in samskillman/yt (pull request #285)
affected #:  1 file

diff -r 7e4f7124641b4852eb6666f052183053fcca7daf -r e56c3f9dca02e663bddc24fdd04a30bff793788b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -720,6 +720,9 @@
     ], dtype='float64')
 
 class HEALpixCamera(Camera):
+
+    _sampler_object = None 
+    
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
@@ -733,6 +736,12 @@
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
+
+        if isinstance(self.transfer_function, ProjectionTransferFunction):
+            self._sampler_object = ProjectionSampler
+        else:
+            self._sampler_object = VolumeRenderSampler
+
         if fields is None: fields = ["Density"]
         self.fields = fields
         self.sub_samples = sub_samples



https://bitbucket.org/yt_analysis/yt-3.0/changeset/de5451397981/
changeset:   de5451397981
branch:      yt
user:        ngoldbaum
date:        2012-10-02 01:39:33
summary:     Fixing the way magnetic fields are handled in the FLASH frontend.  This explicitly corrects for the user's choice of coordinate system.  Adding some useful universal fields that depend on magnetic quantities.  Fixing a minor typo in the chombo frontend.  Closes #429
affected #:  3 files

diff -r e56c3f9dca02e663bddc24fdd04a30bff793788b -r de5451397981bca072e120ea53540e571ff56669 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -994,12 +994,37 @@
     units of Gauss. If you use MKS, make sure to write your own
     MagneticEnergy field to deal with non-unitary \mu_0.
     """
-    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/2.
+    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/(8*np.pi)
 add_field("MagneticEnergy",function=_MagneticEnergy,
-          units=r"",
-          validators = [ValidateDataField("Bx"),
-                        ValidateDataField("By"),
-                        ValidateDataField("Bz")])
+          units=r"\rm{ergs}\/\rm{cm}^{-3}",
+          display_name=r"\rm{Magnetic}\/\rm{Energy}")
+
+def _BMagnitude(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    MagneticEnergy field to deal with non-unitary \mu_0.
+    """
+    return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
+add_field("BMagnitude",
+          function=_BMagnitude,
+          display_name=r"|B|", units="\rm{Gauss}")
+
+def _PlasmaBeta(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    MagneticEnergy field to deal with non-unitary \mu_0.
+    """
+    return data['Pressure']/data['MagneticEnergy']
+add_field("PlasmaBeta",
+          function=_PlasmaBeta,
+          display_name=r"\rm{Plasma}\/\beta", units="")
+
+def _MagneticPressure(field,data):
+    return data['MagneticEnergy']
+add_field("MagneticPressure",
+          function=_MagneticPressure,
+          display_name=r"\rm{Magnetic}\/\rm{Energy}",
+          units="\rm{ergs}\/\rm{cm}^{-3}")
 
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)


diff -r e56c3f9dca02e663bddc24fdd04a30bff793788b -r de5451397981bca072e120ea53540e571ff56669 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -101,17 +101,17 @@
 def _Bx(field,data):
     return data["X-magnfield"]
 add_field("Bx", function=_Bx, take_log=False,
-          units=r"\rm{Gauss}", display_name=r"B")
+          units=r"\rm{Gauss}", display_name=r"B_x")
 
 def _By(field,data):
     return data["Y-magnfield"]
 add_field("By", function=_By, take_log=False,
-          units=r"\rm{Gauss}", display_name=r"B")
+          units=r"\rm{Gauss}", display_name=r"B_y")
 
 def _Bz(field,data):
     return data["Z-magnfield"]
 add_field("Bz", function=_Bz, take_log=False,
-          units=r"\rm{Gauss}", display_name=r"B")
+          units=r"\rm{Gauss}", display_name=r"B_z")
 
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +


diff -r e56c3f9dca02e663bddc24fdd04a30bff793788b -r de5451397981bca072e120ea53540e571ff56669 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -254,3 +255,43 @@
 
 add_field("GasEnergy", function=_GasEnergy, 
           units=r"\rm{ergs}/\rm{g}")
+
+# See http://flash.uchicago.edu/pipermail/flash-users/2012-October/001180.html
+# along with the attachment to that e-mail for details
+def GetMagRescalingFactor(pf):
+    if pf['unitsystem'].lower() == "cgs":
+         factor = 1
+    if pf['unitsystem'].lower() == "si":
+         factor = np.sqrt(4*np.pi/1e7)
+    if pf['unitsystem'].lower() == "none":
+         factor = np.sqrt(4*np.pi)
+    else:
+        raise RuntimeError("Runtime parameter unitsystem with"
+                           "value %s is unrecognized" % pf['unitsystem'])
+    return factor
+
+def _Bx(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magx']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magy']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magz']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
+def _DivB(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['divb']*factor
+add_field("DivB", function=_DivB, take_log=False,
+          units=r"\rm{Gauss}\/\rm{cm}^{-1}")
+
+



https://bitbucket.org/yt_analysis/yt-3.0/changeset/bbc25a5eaac8/
changeset:   bbc25a5eaac8
branch:      yt
user:        MatthewTurk
date:        2012-10-02 20:22:33
summary:     Re-enabling star particle rendering in volume rendering.  Needs some finessing,
but for now, setting star_trees on a camera to a list corresponding to grids will work.
affected #:  2 files

diff -r de5451397981bca072e120ea53540e571ff56669 -r bbc25a5eaac852d67c4b17d1c65431876555623a yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -616,7 +616,7 @@
     cdef np.float64_t slopes[6], dp[3], ds[3]
     cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
     cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
-    cdef int nstars
+    cdef int nstars, dti, i, j
     cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
     for i in range(3):
         dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
@@ -648,6 +648,7 @@
         dvs[i] = temp
     for dti in range(vri.n_samples): 
         # Now we add the contribution from stars
+        kdtree_utils.kd_res_rewind(ballq)
         for i in range(nstars):
             kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
             colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
@@ -656,19 +657,21 @@
                  + (py - pos[1])*(py - pos[1]) \
                  + (pz - pos[2])*(pz - pos[2])
             gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
-            for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+            for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
         for i in range(3):
             pos[i] += local_dds[i]
         FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
                           vri.field_table_ids, vri.grey_opacity)
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
+    kdtree_utils.kd_res_free(ballq)
 
 cdef class VolumeRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
     cdef public object tf_obj
     cdef public object my_field_tables
     cdef kdtree_utils.kdtree **trees
+    cdef object tree_containers
     def __cinit__(self, 
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,
@@ -709,6 +712,7 @@
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
         cdef star_kdtree_container skdc
+        self.tree_containers = star_list
         if star_list is None:
             self.trees = NULL
         else:
@@ -719,10 +723,15 @@
                 self.trees[i] = skdc.tree
 
     cdef void setup(self, PartitionedGrid pg):
+        cdef star_kdtree_container star_tree
         if self.trees == NULL:
             self.sampler = volume_render_sampler
         else:
+            star_tree = self.tree_containers[pg.parent_grid_id]
             self.vra.star_list = self.trees[pg.parent_grid_id]
+            self.vra.star_sigma_num = 2.0*star_tree.sigma**2.0
+            self.vra.star_er = 2.326 * star_tree.sigma
+            self.vra.star_coeff = star_tree.coeff
             self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):


diff -r de5451397981bca072e120ea53540e571ff56669 -r bbc25a5eaac852d67c4b17d1c65431876555623a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -301,7 +301,11 @@
                 np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
+    star_trees = None
     def get_sampler(self, args):
+        kwargs = {}
+        if self.star_trees is not None:
+            kwargs = {'star_list': self.star_trees}
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
@@ -312,9 +316,10 @@
             if self.light_rgba is None:
                 self.set_default_light_rgba()
             sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
-                    light_rgba=self.light_rgba)
+                    light_rgba=self.light_rgba, **kwargs)
         else:
-            sampler = self._sampler_object(*args)
+            sampler = self._sampler_object(*args, **kwargs)
+        print sampler, kwargs
         return sampler
 
     def finalize_image(self, image):



https://bitbucket.org/yt_analysis/yt-3.0/changeset/8fac44601bf5/
changeset:   8fac44601bf5
branch:      yt
user:        ngoldbaum
date:        2012-09-24 22:44:20
summary:     Adding OffAxisProjectionPlot doesn't work yet.
affected #:  1 file

diff -r ae3f6a9e07a18644470391eaa684434ffc14bb8a -r 8fac44601bf5deba62b2f6452308897ed295a6f3 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1084,6 +1084,64 @@
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
         self.set_axes_unit(axes_unit)
 
+class OffAxisProjectionPlot(PWViewerMPL):
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 axes_unit=None, weight_field=None, max_level=None, 
+                 north_vector=None, volume=None, no_ghost=False, le=None,
+                 re=None, interpolated=False):
+        r"""Creates an off axis projection plot from a parameter file
+
+        Given a pf object, a normal vector to project along, and
+        a field name string, this will return a PWViewrMPL object
+        containing the plot.
+        
+        The plot can be updated using one of the many helper functions
+        defined in PlotWindow.
+
+        Parameters
+        ----------
+        pf : :class:`yt.data_objects.api.StaticOutput`
+            This is the parameter file object corresponding to the
+            simulation output to be plotted.
+        normal : a sequence of floats
+            The vector normal to the slicing plane.
+        fields : string
+            The name of the field(s) to be plotted.
+        center : A two or three-element vector of sequence floats, 'c', or 'center'
+            The coordinate of the center of the image.  If left blanck,
+            the image centers on the location of the maximum density
+            cell.  If set to 'c' or 'center', the plot is centered on
+            the middle of the domain.
+        width : A tuple or a float
+            A tuple containing the width of image and the string key of
+            the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        weight_field : string
+            The name of the weighting field.  Set to None for no weight.
+        max_level: int
+            The maximum level to project to.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+        north-vector : a sequence of floats
+            A vector defining the 'up' direction in the plot.  This
+            option sets the orientation of the slicing plane.  If not
+            set, an arbitrary grid-aligned north-vector is chosen.
+
+        """
+        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
+        # Hard-coding the resolution for now
+        projcam = ProjectionCamera(center, normal_vector, width, (800,800), fields,
+                                   weight=weight_field,  volume=volume, no_ghost=no_ghost,
+                                   le=le, re=re, north_vector=north_vector, pf=pf, 
+                                   interpolated=interpolated)
+        # Hard-coding the origin keyword since the other two options
+        # aren't well-defined for off-axis data objects
+        PWViewerMPL.__init__(self,projcam,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
+
 _metadata_template = """
 %(pf)s<br><br>



https://bitbucket.org/yt_analysis/yt-3.0/changeset/79743892acbc/
changeset:   79743892acbc
branch:      yt
user:        ngoldbaum
date:        2012-09-26 01:35:44
summary:     First pass at OffAxisProjectionPlot.
affected #:  3 files

diff -r 8fac44601bf5deba62b2f6452308897ed295a6f3 -r 79743892acbc5d49bbc5067db8077e349a745e1f yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -122,7 +122,7 @@
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
     apply_colormap, scale_image, write_projection, write_fits, \
-    SlicePlot, OffAxisSlicePlot, ProjectionPlot
+    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \


diff -r 8fac44601bf5deba62b2f6452308897ed295a6f3 -r 79743892acbc5d49bbc5067db8077e349a745e1f yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -66,6 +66,7 @@
 from plot_window import \
     SlicePlot, \
     OffAxisSlicePlot, \
-    ProjectionPlot
+    ProjectionPlot, \
+    OffAxisProjectionPlot
     
 


diff -r 8fac44601bf5deba62b2f6452308897ed295a6f3 -r 79743892acbc5d49bbc5067db8077e349a745e1f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -47,6 +47,7 @@
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
+from .volume_rendering.api import off_axis_projection
 from yt.utilities.delaunay.triangulate import Triangulation as triang
 from yt.config import ytcfg
 
@@ -157,7 +158,7 @@
               center[y_dict[axis]]+width[1]/2]
     return (bounds,center)
 
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1'):
+def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
     if width == None:
         width = (pf.domain_width.min(),
                  pf.domain_width.min())
@@ -168,6 +169,13 @@
         width = (width, width)
     Wx, Wy = width
     width = np.array((Wx/pf[unit], Wy/pf[unit]))
+    if depth != None:
+        if iterable(depth) and isinstance(depth[1],str):
+            d,unit = depth
+            depth = d/pf[unit]
+        elif iterable(depth):
+            raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+        width = np.append(width,depth)
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -182,10 +190,13 @@
     (normal,perp1,perp2) = ortho_find(normal)
     mat = np.transpose(np.column_stack((perp1,perp2,normal)))
     center = np.dot(mat,center)
-    width = width/pf.domain_width.min()
+    width = width
+    
+    if width.shape == (2,):
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+    else:
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
 
-    bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
-    
     return (bounds,center)
 
 class PlotWindow(object):
@@ -247,19 +258,24 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        try:
-            bounds = self.xlim+self.ylim
-            if self.oblique == False:
-                self._frb = FixedResolutionBuffer(self.data_source, 
-                                                  bounds, self.buff_size, 
-                                                  self.antialias, 
+        bounds = self.xlim+self.ylim
+        class_name = self.data_source.__class__.__name__
+        if 'OffAxisProjection' in class_name:
+            self._frb = OffAxisProjectionDummyFRB(self.data_source,
+                                                  bounds, self.buff_size,
+                                                  self.antialias,
                                                   periodic=self._periodic)
-            else:
-                self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                         bounds, self.buff_size, 
-                                                         self.antialias, 
-                                                         periodic=self._periodic)
-        except:
+        elif 'Cutting' in class_name:
+            self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
+                                                     bounds, self.buff_size, 
+                                                     self.antialias, 
+                                                     periodic=self._periodic)
+        elif 'Projection' in class_name or 'Slice' in class_name:
+            self._frb = FixedResolutionBuffer(self.data_source, 
+                                              bounds, self.buff_size, 
+                                              self.antialias, 
+                                              periodic=self._periodic)
+        else:
             raise RuntimeError("Failed to repixelize.")
         if old_fields is None:
             self._frb._get_data_source_fields()
@@ -836,7 +852,10 @@
         if 'Slice' in self.data_source.__class__.__name__:
             type = 'Slice'
         if 'Proj' in self.data_source.__class__.__name__:
-            type = 'Projection'
+            if 'OffAxis' in self.data_source.__class__.__name__:
+                type = 'OffAxisProjection'
+            else:
+                type = 'Projection'
             weight = self.data_source.weight_field
         if 'Cutting' in self.data_source.__class__.__name__:
             type = 'OffAxisSlice'
@@ -1084,11 +1103,55 @@
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
         self.set_axes_unit(axes_unit)
 
+class OffAxisProjectionDummyFRB(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.internal_dict = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        try:
+            image = self.internal_dict[item]
+        except KeyError:
+            ds = self.data_source
+            image = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                        ds.width, ds.resolution, item,
+                                        weight=ds.weight_field, volume=ds.volume,
+                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated)
+            self.internal_dict[item] = image
+        return image
+    
+    def _get_data_source_fields(self):
+        for f in self.data_source.fields:
+            self[f] = None
+
+class OffAxisProjectionDummyDataSource(object):
+    _type_name = 'proj'
+    proj_style = 'integrate'
+    def __init__(self, center, pf, normal_vector, width, fields, 
+                 interpolated, resolution = (800,800), weight=None,  
+                 volume=None, no_ghost=False, le=None, re=None, 
+                 north_vector=None):
+        self.center = center
+        self.pf = pf
+        self.axis = 4 # always true for oblique data objects
+        self.normal_vector = normal_vector
+        self.width = width
+        self.fields = fields
+        self.interpolated = interpolated
+        self.resolution = resolution
+        self.weight_field = weight
+        self.volume = volume
+        self.no_ghost = no_ghost
+        self.le = le
+        self.re = re
+        self.north_vector = north_vector
+
 class OffAxisProjectionPlot(PWViewerMPL):
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, weight_field=None, max_level=None, 
-                 north_vector=None, volume=None, no_ghost=False, le=None,
-                 re=None, interpolated=False):
+                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+                 max_level=None, north_vector=None, volume=None, no_ghost=False, 
+                 le=None, re=None, interpolated=False):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and
@@ -1116,6 +1179,10 @@
             A tuple containing the width of image and the string key of
             the unit: (width, 'unit').  If set to a float, code units
             are assumed
+        depth : A tuple or a float
+            A tuple containing the depth to project thourhg and the string
+            key of the unit: (width, 'unit').  If set to a float, code units
+            are assumed
         weight_field : string
             The name of the weighting field.  Set to None for no weight.
         max_level: int
@@ -1131,15 +1198,17 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
+        self.OffAxisProjection = True
+        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
         # Hard-coding the resolution for now
-        projcam = ProjectionCamera(center, normal_vector, width, (800,800), fields,
-                                   weight=weight_field,  volume=volume, no_ghost=no_ghost,
-                                   le=le, re=re, north_vector=north_vector, pf=pf, 
-                                   interpolated=interpolated)
+        fields = ensure_list(fields)[:]
+        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
+                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
+                                                       le=le, re=re, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self,projcam,bounds,origin='center-window',periodic=False,oblique=True)
+        PWViewerMPL.__init__(self,OffAxisProj,bounds,origin='center-window',periodic=False,oblique=True)
         self.set_axes_unit(axes_unit)
 
 _metadata_template = """



https://bitbucket.org/yt_analysis/yt-3.0/changeset/43ac69da0542/
changeset:   43ac69da0542
branch:      yt
user:        ngoldbaum
date:        2012-10-02 01:55:27
summary:     Merging.
affected #:  3 files

diff -r e56c3f9dca02e663bddc24fdd04a30bff793788b -r 43ac69da054290c81e42fdc9d2473e743559e173 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -122,7 +122,7 @@
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
     apply_colormap, scale_image, write_projection, write_fits, \
-    SlicePlot, OffAxisSlicePlot, ProjectionPlot
+    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \


diff -r e56c3f9dca02e663bddc24fdd04a30bff793788b -r 43ac69da054290c81e42fdc9d2473e743559e173 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -66,6 +66,7 @@
 from plot_window import \
     SlicePlot, \
     OffAxisSlicePlot, \
-    ProjectionPlot
+    ProjectionPlot, \
+    OffAxisProjectionPlot
     
 


diff -r e56c3f9dca02e663bddc24fdd04a30bff793788b -r 43ac69da054290c81e42fdc9d2473e743559e173 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -47,6 +47,7 @@
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
+from .volume_rendering.api import off_axis_projection
 from yt.utilities.delaunay.triangulate import Triangulation as triang
 from yt.config import ytcfg
 
@@ -160,7 +161,7 @@
               center[y_dict[axis]]+width[1]/2]
     return (bounds,center)
 
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1'):
+def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
     if width == None:
         width = (pf.domain_width.min(),
                  pf.domain_width.min())
@@ -171,6 +172,13 @@
         width = (width, width)
     Wx, Wy = width
     width = np.array((Wx/pf[unit], Wy/pf[unit]))
+    if depth != None:
+        if iterable(depth) and isinstance(depth[1],str):
+            d,unit = depth
+            depth = d/pf[unit]
+        elif iterable(depth):
+            raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+        width = np.append(width,depth)
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -185,9 +193,13 @@
     (normal,perp1,perp2) = ortho_find(normal)
     mat = np.transpose(np.column_stack((perp1,perp2,normal)))
     center = np.dot(mat,center)
+    width = width
+    
+    if width.shape == (2,):
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+    else:
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
 
-    bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
-    
     return (bounds,center)
 
 class PlotWindow(object):
@@ -249,19 +261,24 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        try:
-            bounds = self.xlim+self.ylim
-            if self.oblique == False:
-                self._frb = FixedResolutionBuffer(self.data_source, 
-                                                  bounds, self.buff_size, 
-                                                  self.antialias, 
+        bounds = self.xlim+self.ylim
+        class_name = self.data_source.__class__.__name__
+        if 'OffAxisProjection' in class_name:
+            self._frb = OffAxisProjectionDummyFRB(self.data_source,
+                                                  bounds, self.buff_size,
+                                                  self.antialias,
                                                   periodic=self._periodic)
-            else:
-                self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                         bounds, self.buff_size, 
-                                                         self.antialias, 
-                                                         periodic=self._periodic)
-        except:
+        elif 'Cutting' in class_name:
+            self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
+                                                     bounds, self.buff_size, 
+                                                     self.antialias, 
+                                                     periodic=self._periodic)
+        elif 'Projection' in class_name or 'Slice' in class_name:
+            self._frb = FixedResolutionBuffer(self.data_source, 
+                                              bounds, self.buff_size, 
+                                              self.antialias, 
+                                              periodic=self._periodic)
+        else:
             raise RuntimeError("Failed to repixelize.")
         if old_fields is None:
             self._frb._get_data_source_fields()
@@ -838,7 +855,10 @@
         if 'Slice' in self.data_source.__class__.__name__:
             type = 'Slice'
         if 'Proj' in self.data_source.__class__.__name__:
-            type = 'Projection'
+            if 'OffAxis' in self.data_source.__class__.__name__:
+                type = 'OffAxisProjection'
+            else:
+                type = 'Projection'
             weight = self.data_source.weight_field
         if 'Cutting' in self.data_source.__class__.__name__:
             type = 'OffAxisSlice'
@@ -1090,6 +1110,114 @@
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
         self.set_axes_unit(axes_unit)
 
+class OffAxisProjectionDummyFRB(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.internal_dict = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        try:
+            image = self.internal_dict[item]
+        except KeyError:
+            ds = self.data_source
+            image = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                        ds.width, ds.resolution, item,
+                                        weight=ds.weight_field, volume=ds.volume,
+                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated)
+            self.internal_dict[item] = image
+        return image
+    
+    def _get_data_source_fields(self):
+        for f in self.data_source.fields:
+            self[f] = None
+
+class OffAxisProjectionDummyDataSource(object):
+    _type_name = 'proj'
+    proj_style = 'integrate'
+    def __init__(self, center, pf, normal_vector, width, fields, 
+                 interpolated, resolution = (800,800), weight=None,  
+                 volume=None, no_ghost=False, le=None, re=None, 
+                 north_vector=None):
+        self.center = center
+        self.pf = pf
+        self.axis = 4 # always true for oblique data objects
+        self.normal_vector = normal_vector
+        self.width = width
+        self.fields = fields
+        self.interpolated = interpolated
+        self.resolution = resolution
+        self.weight_field = weight
+        self.volume = volume
+        self.no_ghost = no_ghost
+        self.le = le
+        self.re = re
+        self.north_vector = north_vector
+
+class OffAxisProjectionPlot(PWViewerMPL):
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+                 max_level=None, north_vector=None, volume=None, no_ghost=False, 
+                 le=None, re=None, interpolated=False):
+        r"""Creates an off axis projection plot from a parameter file
+
+        Given a pf object, a normal vector to project along, and
+        a field name string, this will return a PWViewrMPL object
+        containing the plot.
+        
+        The plot can be updated using one of the many helper functions
+        defined in PlotWindow.
+
+        Parameters
+        ----------
+        pf : :class:`yt.data_objects.api.StaticOutput`
+            This is the parameter file object corresponding to the
+            simulation output to be plotted.
+        normal : a sequence of floats
+            The vector normal to the slicing plane.
+        fields : string
+            The name of the field(s) to be plotted.
+        center : A two or three-element vector of sequence floats, 'c', or 'center'
+            The coordinate of the center of the image.  If left blanck,
+            the image centers on the location of the maximum density
+            cell.  If set to 'c' or 'center', the plot is centered on
+            the middle of the domain.
+        width : A tuple or a float
+            A tuple containing the width of image and the string key of
+            the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        depth : A tuple or a float
+            A tuple containing the depth to project thourhg and the string
+            key of the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        weight_field : string
+            The name of the weighting field.  Set to None for no weight.
+        max_level: int
+            The maximum level to project to.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+        north-vector : a sequence of floats
+            A vector defining the 'up' direction in the plot.  This
+            option sets the orientation of the slicing plane.  If not
+            set, an arbitrary grid-aligned north-vector is chosen.
+
+        """
+        self.OffAxisProjection = True
+        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
+        # Hard-coding the resolution for now
+        fields = ensure_list(fields)[:]
+        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
+                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
+                                                       le=le, re=re, north_vector=north_vector)
+        # Hard-coding the origin keyword since the other two options
+        # aren't well-defined for off-axis data objects
+        PWViewerMPL.__init__(self,OffAxisProj,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
+
 _metadata_template = """
 %(pf)s<br><br>



https://bitbucket.org/yt_analysis/yt-3.0/changeset/b2263e35caa7/
changeset:   b2263e35caa7
branch:      yt
user:        ngoldbaum
date:        2012-10-02 02:32:21
summary:     Implementing Matt's suggestions.  See PR 281.
affected #:  2 files

diff -r 43ac69da054290c81e42fdc9d2473e743559e173 -r b2263e35caa7eeaf80daace4b177229c4647936e yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -28,6 +28,7 @@
     x_dict, \
     y_dict, \
     axis_names
+from .volume_rendering.api import off_axis_projection
 import _MPL
 import numpy as np
 import weakref
@@ -384,3 +385,24 @@
                                self.bounds).transpose()
         self[item] = buff
         return buff
+
+
+class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.data = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        if item in self.data: return self.data[item]
+        mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+            (item, self.buff_size[0], self.buff_size[1]))
+        ds = self.data_source
+        buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                   ds.width, ds.resolution, item,
+                                   weight=ds.weight_field, volume=ds.volume,
+                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated)
+        self[item] = buff
+        return buff
+
+


diff -r 43ac69da054290c81e42fdc9d2473e743559e173 -r b2263e35caa7eeaf80daace4b177229c4647936e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -43,11 +43,11 @@
     write_image, apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer
+    ObliqueFixedResolutionBuffer, \
+    OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
-from .volume_rendering.api import off_axis_projection
 from yt.utilities.delaunay.triangulate import Triangulation as triang
 from yt.config import ytcfg
 
@@ -262,24 +262,10 @@
         if self._frb is not None:
             old_fields = self._frb.keys()
         bounds = self.xlim+self.ylim
-        class_name = self.data_source.__class__.__name__
-        if 'OffAxisProjection' in class_name:
-            self._frb = OffAxisProjectionDummyFRB(self.data_source,
-                                                  bounds, self.buff_size,
-                                                  self.antialias,
-                                                  periodic=self._periodic)
-        elif 'Cutting' in class_name:
-            self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                     bounds, self.buff_size, 
-                                                     self.antialias, 
-                                                     periodic=self._periodic)
-        elif 'Projection' in class_name or 'Slice' in class_name:
-            self._frb = FixedResolutionBuffer(self.data_source, 
-                                              bounds, self.buff_size, 
-                                              self.antialias, 
-                                              periodic=self._periodic)
-        else:
-            raise RuntimeError("Failed to repixelize.")
+        self._frb = self._frb_generator(self.data_source,
+                                        bounds, self.buff_size,
+                                        self.antialias,
+                                        periodic=self._periodic)
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:
@@ -852,16 +838,7 @@
         if mpl_kwargs is None: mpl_kwargs = {}
         axis = axis_names[self.data_source.axis]
         weight = None
-        if 'Slice' in self.data_source.__class__.__name__:
-            type = 'Slice'
-        if 'Proj' in self.data_source.__class__.__name__:
-            if 'OffAxis' in self.data_source.__class__.__name__:
-                type = 'OffAxisProjection'
-            else:
-                type = 'Projection'
-            weight = self.data_source.weight_field
-        if 'Cutting' in self.data_source.__class__.__name__:
-            type = 'OffAxisSlice'
+        type = self._plot_type
         names = []
         for k, v in self.plots.iteritems():
             if axis:
@@ -909,6 +886,9 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
+    _plot_type = 'Slice'
+    _frb_generator = FixedResolutionBuffer
+
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  origin='center-window'):
         r"""Creates a slice plot from a parameter file
@@ -984,6 +964,9 @@
         self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
+    __plot_type = 'Projection'
+    _frb_generator = FixedResolutionBuffer
+
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
@@ -1063,6 +1046,9 @@
         self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
+    _plot_type = 'OffAxisSlice'
+    _frb_generator = ObliqueFixedResolutionBuffer
+
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
@@ -1110,31 +1096,10 @@
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
         self.set_axes_unit(axes_unit)
 
-class OffAxisProjectionDummyFRB(FixedResolutionBuffer):
-    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
-                 periodic = False):
-        self.internal_dict = {}
-        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
-
-    def __getitem__(self, item):
-        try:
-            image = self.internal_dict[item]
-        except KeyError:
-            ds = self.data_source
-            image = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
-                                        ds.width, ds.resolution, item,
-                                        weight=ds.weight_field, volume=ds.volume,
-                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated)
-            self.internal_dict[item] = image
-        return image
-    
-    def _get_data_source_fields(self):
-        for f in self.data_source.fields:
-            self[f] = None
-
 class OffAxisProjectionDummyDataSource(object):
     _type_name = 'proj'
     proj_style = 'integrate'
+    _key_fields = []
     def __init__(self, center, pf, normal_vector, width, fields, 
                  interpolated, resolution = (800,800), weight=None,  
                  volume=None, no_ghost=False, le=None, re=None, 
@@ -1155,6 +1120,9 @@
         self.north_vector = north_vector
 
 class OffAxisProjectionPlot(PWViewerMPL):
+    _plot_type = 'OffAxisProjection'
+    _frb_generator = OffAxisProjectionFixedResolutionBuffer
+
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 



https://bitbucket.org/yt_analysis/yt-3.0/changeset/c00c65ad909e/
changeset:   c00c65ad909e
branch:      yt
user:        ngoldbaum
date:        2012-10-02 03:05:25
summary:     Fixing set_width to work with OffAxisProjectionPlot.
affected #:  2 files

diff -r b2263e35caa7eeaf80daace4b177229c4647936e -r c00c65ad909e0ad93ca6df617ce83c8475acf29f yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -398,8 +398,11 @@
         mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
             (item, self.buff_size[0], self.buff_size[1]))
         ds = self.data_source
+        width = (self.bounds[1] - self.bounds[0],
+                 self.bounds[3] - self.bounds[2],
+                 self.bounds[5] - self.bounds[4])
         buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
-                                   ds.width, ds.resolution, item,
+                                   width, ds.resolution, item,
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated)
         self[item] = buff


diff -r b2263e35caa7eeaf80daace4b177229c4647936e -r c00c65ad909e0ad93ca6df617ce83c8475acf29f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -261,7 +261,10 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        bounds = self.xlim+self.ylim
+        if self.zlim:
+            bounds = self.xlim+self.ylim+self.zlim
+        else:
+            bounds = self.xlim+self.ylim
         self._frb = self._frb_generator(self.data_source,
                                         bounds, self.buff_size,
                                         self.antialias,
@@ -306,6 +309,7 @@
         nWx, nWy = Wx/factor, Wy/factor
         self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
         self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
+                    
 
     @invalidate_data
     def pan(self, deltas):
@@ -352,12 +356,16 @@
             dy = bounds[3] - bounds[2]
             self.xlim = (self.center[0] - dx/2., self.center[0] + dx/2.)
             self.ylim = (self.center[1] - dy/2., self.center[1] + dy/2.)
-            mylog.info("xlim = %f %f" %self.xlim)
-            mylog.info("ylim = %f %f" %self.ylim)
         else:
-            self.xlim = bounds[0:2]
-            self.ylim = bounds[2:]
-            
+            self.xlim = tuple(bounds[0:2])
+            self.ylim = tuple(bounds[2:4])
+            if len(bounds) == 6:
+                self.zlim = tuple(bounds[4:6])
+        mylog.info("xlim = %f %f" %self.xlim)
+        mylog.info("ylim = %f %f" %self.ylim)
+        if self.zlim:
+            mylog.info("zlim = %f %f" %self.zlim)
+
     @invalidate_data
     def set_width(self, width, unit = '1'):
         """set the width of the plot window
@@ -403,14 +411,20 @@
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
-        centerx = (self.xlim[1] + self.xlim[0])/2 
-        centery = (self.ylim[1] + self.ylim[0])/2 
+        centerx = (self.xlim[1] + self.xlim[0])/2.
+        centery = (self.ylim[1] + self.ylim[0])/2. 
         
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,
                      centery + width[1]/2.)
         
+        if hasattr(self,'zlim'):
+            centerz = (self.zlim[1] + self.zlim[0])/2.
+            mw = max(width)
+            self.zlim = (centerz - mw/2.,
+                         centerz + mw/2.)
+        
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -1173,7 +1187,6 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        self.OffAxisProjection = True
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
         # Hard-coding the resolution for now
         fields = ensure_list(fields)[:]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/18e031b3973d/
changeset:   18e031b3973d
branch:      yt
user:        ngoldbaum
date:        2012-10-02 07:34:58
summary:     Fixing a couple of typos.
affected #:  1 file

diff -r de5451397981bca072e120ea53540e571ff56669 -r 18e031b3973db3c99d302b95b7b96e6f340fac73 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1002,7 +1002,7 @@
 def _BMagnitude(field,data):
     """This assumes that your front end has provided Bx, By, Bz in
     units of Gauss. If you use MKS, make sure to write your own
-    MagneticEnergy field to deal with non-unitary \mu_0.
+    BMagnitude field to deal with non-unitary \mu_0.
     """
     return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
 add_field("BMagnitude",
@@ -1012,7 +1012,7 @@
 def _PlasmaBeta(field,data):
     """This assumes that your front end has provided Bx, By, Bz in
     units of Gauss. If you use MKS, make sure to write your own
-    MagneticEnergy field to deal with non-unitary \mu_0.
+    PlasmaBeta field to deal with non-unitary \mu_0.
     """
     return data['Pressure']/data['MagneticEnergy']
 add_field("PlasmaBeta",



https://bitbucket.org/yt_analysis/yt-3.0/changeset/454d7e8a63d6/
changeset:   454d7e8a63d6
branch:      yt
user:        ngoldbaum
date:        2012-10-03 00:37:32
summary:     Merging.
affected #:  1 file

diff -r bbc25a5eaac852d67c4b17d1c65431876555623a -r 454d7e8a63d686f63e7191731abe46286eeb8143 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1002,7 +1002,7 @@
 def _BMagnitude(field,data):
     """This assumes that your front end has provided Bx, By, Bz in
     units of Gauss. If you use MKS, make sure to write your own
-    MagneticEnergy field to deal with non-unitary \mu_0.
+    BMagnitude field to deal with non-unitary \mu_0.
     """
     return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
 add_field("BMagnitude",
@@ -1012,7 +1012,7 @@
 def _PlasmaBeta(field,data):
     """This assumes that your front end has provided Bx, By, Bz in
     units of Gauss. If you use MKS, make sure to write your own
-    MagneticEnergy field to deal with non-unitary \mu_0.
+    PlasmaBeta field to deal with non-unitary \mu_0.
     """
     return data['Pressure']/data['MagneticEnergy']
 add_field("PlasmaBeta",



https://bitbucket.org/yt_analysis/yt-3.0/changeset/0d164fe973b2/
changeset:   0d164fe973b2
branch:      yt
user:        ngoldbaum
date:        2012-10-03 00:37:54
summary:     Merging
affected #:  4 files

diff -r 454d7e8a63d686f63e7191731abe46286eeb8143 -r 0d164fe973b282445477f7f7e0d41c749998f523 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -122,7 +122,7 @@
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
     apply_colormap, scale_image, write_projection, write_fits, \
-    SlicePlot, OffAxisSlicePlot, ProjectionPlot
+    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \


diff -r 454d7e8a63d686f63e7191731abe46286eeb8143 -r 0d164fe973b282445477f7f7e0d41c749998f523 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -66,6 +66,7 @@
 from plot_window import \
     SlicePlot, \
     OffAxisSlicePlot, \
-    ProjectionPlot
+    ProjectionPlot, \
+    OffAxisProjectionPlot
     
 


diff -r 454d7e8a63d686f63e7191731abe46286eeb8143 -r 0d164fe973b282445477f7f7e0d41c749998f523 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -28,6 +28,7 @@
     x_dict, \
     y_dict, \
     axis_names
+from .volume_rendering.api import off_axis_projection
 import _MPL
 import numpy as np
 import weakref
@@ -384,3 +385,27 @@
                                self.bounds).transpose()
         self[item] = buff
         return buff
+
+
+class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.data = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        if item in self.data: return self.data[item]
+        mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+            (item, self.buff_size[0], self.buff_size[1]))
+        ds = self.data_source
+        width = (self.bounds[1] - self.bounds[0],
+                 self.bounds[3] - self.bounds[2],
+                 self.bounds[5] - self.bounds[4])
+        buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                   width, ds.resolution, item,
+                                   weight=ds.weight_field, volume=ds.volume,
+                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated)
+        self[item] = buff
+        return buff
+
+


diff -r 454d7e8a63d686f63e7191731abe46286eeb8143 -r 0d164fe973b282445477f7f7e0d41c749998f523 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -43,7 +43,8 @@
     write_image, apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer
+    ObliqueFixedResolutionBuffer, \
+    OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
@@ -160,7 +161,7 @@
               center[y_dict[axis]]+width[1]/2]
     return (bounds,center)
 
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1'):
+def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
     if width == None:
         width = (pf.domain_width.min(),
                  pf.domain_width.min())
@@ -171,6 +172,13 @@
         width = (width, width)
     Wx, Wy = width
     width = np.array((Wx/pf[unit], Wy/pf[unit]))
+    if depth != None:
+        if iterable(depth) and isinstance(depth[1],str):
+            d,unit = depth
+            depth = d/pf[unit]
+        elif iterable(depth):
+            raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+        width = np.append(width,depth)
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -185,9 +193,13 @@
     (normal,perp1,perp2) = ortho_find(normal)
     mat = np.transpose(np.column_stack((perp1,perp2,normal)))
     center = np.dot(mat,center)
+    width = width
+    
+    if width.shape == (2,):
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+    else:
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
 
-    bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
-    
     return (bounds,center)
 
 class PlotWindow(object):
@@ -249,20 +261,14 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        try:
+        if self.zlim:
+            bounds = self.xlim+self.ylim+self.zlim
+        else:
             bounds = self.xlim+self.ylim
-            if self.oblique == False:
-                self._frb = FixedResolutionBuffer(self.data_source, 
-                                                  bounds, self.buff_size, 
-                                                  self.antialias, 
-                                                  periodic=self._periodic)
-            else:
-                self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                         bounds, self.buff_size, 
-                                                         self.antialias, 
-                                                         periodic=self._periodic)
-        except:
-            raise RuntimeError("Failed to repixelize.")
+        self._frb = self._frb_generator(self.data_source,
+                                        bounds, self.buff_size,
+                                        self.antialias,
+                                        periodic=self._periodic)
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:
@@ -303,6 +309,7 @@
         nWx, nWy = Wx/factor, Wy/factor
         self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
         self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
+                    
 
     @invalidate_data
     def pan(self, deltas):
@@ -349,12 +356,16 @@
             dy = bounds[3] - bounds[2]
             self.xlim = (self.center[0] - dx/2., self.center[0] + dx/2.)
             self.ylim = (self.center[1] - dy/2., self.center[1] + dy/2.)
-            mylog.info("xlim = %f %f" %self.xlim)
-            mylog.info("ylim = %f %f" %self.ylim)
         else:
-            self.xlim = bounds[0:2]
-            self.ylim = bounds[2:]
-            
+            self.xlim = tuple(bounds[0:2])
+            self.ylim = tuple(bounds[2:4])
+            if len(bounds) == 6:
+                self.zlim = tuple(bounds[4:6])
+        mylog.info("xlim = %f %f" %self.xlim)
+        mylog.info("ylim = %f %f" %self.ylim)
+        if self.zlim:
+            mylog.info("zlim = %f %f" %self.zlim)
+
     @invalidate_data
     def set_width(self, width, unit = '1'):
         """set the width of the plot window
@@ -400,14 +411,20 @@
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
-        centerx = (self.xlim[1] + self.xlim[0])/2 
-        centery = (self.ylim[1] + self.ylim[0])/2 
+        centerx = (self.xlim[1] + self.xlim[0])/2.
+        centery = (self.ylim[1] + self.ylim[0])/2. 
         
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,
                      centery + width[1]/2.)
         
+        if hasattr(self,'zlim'):
+            centerz = (self.zlim[1] + self.zlim[0])/2.
+            mw = max(width)
+            self.zlim = (centerz - mw/2.,
+                         centerz + mw/2.)
+        
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -835,13 +852,7 @@
         if mpl_kwargs is None: mpl_kwargs = {}
         axis = axis_names[self.data_source.axis]
         weight = None
-        if 'Slice' in self.data_source.__class__.__name__:
-            type = 'Slice'
-        if 'Proj' in self.data_source.__class__.__name__:
-            type = 'Projection'
-            weight = self.data_source.weight_field
-        if 'Cutting' in self.data_source.__class__.__name__:
-            type = 'OffAxisSlice'
+        type = self._plot_type
         names = []
         for k, v in self.plots.iteritems():
             if axis:
@@ -889,6 +900,9 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
+    _plot_type = 'Slice'
+    _frb_generator = FixedResolutionBuffer
+
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  origin='center-window'):
         r"""Creates a slice plot from a parameter file
@@ -964,6 +978,9 @@
         self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
+    __plot_type = 'Projection'
+    _frb_generator = FixedResolutionBuffer
+
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
@@ -1043,6 +1060,9 @@
         self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
+    _plot_type = 'OffAxisSlice'
+    _frb_generator = ObliqueFixedResolutionBuffer
+
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
@@ -1090,6 +1110,95 @@
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
         self.set_axes_unit(axes_unit)
 
+class OffAxisProjectionDummyDataSource(object):
+    _type_name = 'proj'
+    proj_style = 'integrate'
+    _key_fields = []
+    def __init__(self, center, pf, normal_vector, width, fields, 
+                 interpolated, resolution = (800,800), weight=None,  
+                 volume=None, no_ghost=False, le=None, re=None, 
+                 north_vector=None):
+        self.center = center
+        self.pf = pf
+        self.axis = 4 # always true for oblique data objects
+        self.normal_vector = normal_vector
+        self.width = width
+        self.fields = fields
+        self.interpolated = interpolated
+        self.resolution = resolution
+        self.weight_field = weight
+        self.volume = volume
+        self.no_ghost = no_ghost
+        self.le = le
+        self.re = re
+        self.north_vector = north_vector
+
+class OffAxisProjectionPlot(PWViewerMPL):
+    _plot_type = 'OffAxisProjection'
+    _frb_generator = OffAxisProjectionFixedResolutionBuffer
+
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+                 max_level=None, north_vector=None, volume=None, no_ghost=False, 
+                 le=None, re=None, interpolated=False):
+        r"""Creates an off axis projection plot from a parameter file
+
+        Given a pf object, a normal vector to project along, and
+        a field name string, this will return a PWViewrMPL object
+        containing the plot.
+        
+        The plot can be updated using one of the many helper functions
+        defined in PlotWindow.
+
+        Parameters
+        ----------
+        pf : :class:`yt.data_objects.api.StaticOutput`
+            This is the parameter file object corresponding to the
+            simulation output to be plotted.
+        normal : a sequence of floats
+            The vector normal to the slicing plane.
+        fields : string
+            The name of the field(s) to be plotted.
+        center : A two or three-element vector of sequence floats, 'c', or 'center'
+            The coordinate of the center of the image.  If left blanck,
+            the image centers on the location of the maximum density
+            cell.  If set to 'c' or 'center', the plot is centered on
+            the middle of the domain.
+        width : A tuple or a float
+            A tuple containing the width of image and the string key of
+            the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        depth : A tuple or a float
+            A tuple containing the depth to project thourhg and the string
+            key of the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        weight_field : string
+            The name of the weighting field.  Set to None for no weight.
+        max_level: int
+            The maximum level to project to.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+        north-vector : a sequence of floats
+            A vector defining the 'up' direction in the plot.  This
+            option sets the orientation of the slicing plane.  If not
+            set, an arbitrary grid-aligned north-vector is chosen.
+
+        """
+        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
+        # Hard-coding the resolution for now
+        fields = ensure_list(fields)[:]
+        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
+                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
+                                                       le=le, re=re, north_vector=north_vector)
+        # Hard-coding the origin keyword since the other two options
+        # aren't well-defined for off-axis data objects
+        PWViewerMPL.__init__(self,OffAxisProj,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
+
 _metadata_template = """
 %(pf)s<br><br>



https://bitbucket.org/yt_analysis/yt-3.0/changeset/ac4b312b5d6f/
changeset:   ac4b312b5d6f
branch:      yt
user:        ngoldbaum
date:        2012-10-03 03:34:17
summary:     Fixing some bugs found during testing.
affected #:  4 files

diff -r 0d164fe973b282445477f7f7e0d41c749998f523 -r ac4b312b5d6fac59499a2b8bc5aa6cc3faabd0f2 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1007,7 +1007,7 @@
     return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
 add_field("BMagnitude",
           function=_BMagnitude,
-          display_name=r"|B|", units="\rm{Gauss}")
+          display_name=r"|B|", units=r"\rm{Gauss}")
 
 def _PlasmaBeta(field,data):
     """This assumes that your front end has provided Bx, By, Bz in


diff -r 0d164fe973b282445477f7f7e0d41c749998f523 -r ac4b312b5d6fac59499a2b8bc5aa6cc3faabd0f2 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -404,8 +404,9 @@
         buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
                                    width, ds.resolution, item,
                                    weight=ds.weight_field, volume=ds.volume,
-                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated)
-        self[item] = buff
+                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+                                   north_vector=ds.north_vector)
+        self[item] = buff.swapaxes(0,1)
         return buff
 
 


diff -r 0d164fe973b282445477f7f7e0d41c749998f523 -r ac4b312b5d6fac59499a2b8bc5aa6cc3faabd0f2 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -187,15 +187,15 @@
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
-    # Transforming to the cutting plane coordinate system
-    center = np.array(center)
-    center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
-    (normal,perp1,perp2) = ortho_find(normal)
-    mat = np.transpose(np.column_stack((perp1,perp2,normal)))
-    center = np.dot(mat,center)
-    width = width
+    if width.shape == (2,):
+        # Transforming to the cutting plane coordinate system
+        center = np.array(center)
+        center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
+        (normal,perp1,perp2) = ortho_find(normal)
+        mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+        center = np.dot(mat,center)
+        width = width
     
-    if width.shape == (2,):
         bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
     else:
         bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
@@ -261,7 +261,7 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        if self.zlim:
+        if hasattr(self,'zlim'):
             bounds = self.xlim+self.ylim+self.zlim
         else:
             bounds = self.xlim+self.ylim
@@ -363,7 +363,7 @@
                 self.zlim = tuple(bounds[4:6])
         mylog.info("xlim = %f %f" %self.xlim)
         mylog.info("ylim = %f %f" %self.ylim)
-        if self.zlim:
+        if hasattr(self,'zlim'):
             mylog.info("zlim = %f %f" %self.zlim)
 
     @invalidate_data
@@ -853,6 +853,8 @@
         axis = axis_names[self.data_source.axis]
         weight = None
         type = self._plot_type
+        if type in ['Projection','OffAxisProjection']:
+            weight = self.data_source.weight_field
         names = []
         for k, v in self.plots.iteritems():
             if axis:
@@ -978,7 +980,7 @@
         self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
-    __plot_type = 'Projection'
+    _plot_type = 'Projection'
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,


diff -r 0d164fe973b282445477f7f7e0d41c749998f523 -r ac4b312b5d6fac59499a2b8bc5aa6cc3faabd0f2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1681,7 +1681,8 @@
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, 
-                        volume = None, no_ghost = False, interpolated = False):
+                        volume = None, no_ghost = False, interpolated = False,
+                        north_vector = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1740,8 +1741,9 @@
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
-            field, weight=weight, pf=pf, volume=volume,
-            no_ghost=no_ghost, interpolated=interpolated)
+                               field, weight=weight, pf=pf, volume=volume,
+                               no_ghost=no_ghost, interpolated=interpolated, 
+                               north_vector=north_vector)
     image = projcam.snapshot()
     if weight is not None:
         pf.field_info.pop("temp_weightfield")



https://bitbucket.org/yt_analysis/yt-3.0/changeset/59154579b465/
changeset:   59154579b465
branch:      yt
user:        MatthewTurk
date:        2012-10-03 05:42:24
summary:     Fixing a void * pointer and changing expl => exp inside the star rendering.
affected #:  1 file

diff -r ac4b312b5d6fac59499a2b8bc5aa6cc3faabd0f2 -r 59154579b465a3bc1d01723d7b8680526d4b784d yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -590,7 +590,7 @@
         cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
         for i in range(pos_x.shape[0]):
             kdtree_utils.kd_insert3(self.tree,
-                pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+                pos_x[i], pos_y[i], pos_z[i], <void *> (pointer + i*3))
 
     def __dealloc__(self):
         kdtree_utils.kd_free(self.tree)
@@ -656,7 +656,7 @@
             gexp = (px - pos[0])*(px - pos[0]) \
                  + (py - pos[1])*(py - pos[1]) \
                  + (pz - pos[2])*(pz - pos[2])
-            gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
+            gaussian = vri.star_coeff * exp(-gexp/vri.star_sigma_num)
             for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
         for i in range(3):
             pos[i] += local_dds[i]



https://bitbucket.org/yt_analysis/yt-3.0/changeset/24087b9826d1/
changeset:   24087b9826d1
branch:      yt-3.0
user:        MatthewTurk
date:        2012-10-04 02:12:50
summary:     Merging changes in yt dev branch since 7c5ad85490e8
affected #:  92 files

diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -20,7 +20,7 @@
     setenv YT_DEST
 endif
 set _OLD_VIRTUAL_YT_DEST="$YT_DEST"
-setenv YT_DEST "${VIRTUAL_ENV}:${YT_DEST}"
+setenv YT_DEST "${VIRTUAL_ENV}"
 
 if ($?PYTHONPATH == 0) then
     setenv PYTHONPATH


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -18,7 +18,7 @@
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
    "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "na.multiply(a, 3, a)".
+   be "np.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
  * When writing a subclass, use the super built-in to access the super class,
@@ -40,8 +40,7 @@
 
    from yt.visualization.plot_collection import PlotCollection
 
- * Numpy is to be imported as "na" not "np".  While this may change in the
-   future, for now this is the correct idiom.
+ * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword
    arguments, then you are doing too much in __init__ and not enough via
    parameter setting.


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -399,7 +399,6 @@
 # Now we dump all our SHA512 files out.
 
 echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
-echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
@@ -684,6 +683,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
+echo "Building Fortran kD-tree module."
+cd yt/utilities/kdtree
+( make 2>&1 ) 1>> ${LOG_FILE}
+cd ../../..
+
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,5 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+__version__ = "2.5-dev"






diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
             child_points = np.array(child_points)
-            fKD.pos = np.asfortranarray(child_points.T)
-            fKD.qv = np.empty(3, dtype='float64')
-            fKD.dist = np.empty(NumNeighbors, dtype='float64')
-            fKD.tags = np.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = np.array([row[1] / self.period[0],
+                query = np.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 
@@ -450,9 +437,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = np.array([], dtype='int64')
-            parent_masses = np.array([], dtype='float64')
-            parent_halos = np.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,31 +447,38 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = np.concatenate((parent_IDs, thisIDs))
-                        parent_masses = np.concatenate((parent_masses, thisMasses))
-                        parent_halos = np.concatenate((parent_halos, 
-                            np.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(np.ones(len(thisIDs),
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = np.array([], dtype='int64')
+                parent_masses = np.array([], dtype='float64')
+                parent_halos = np.array([], dtype='int32')
+            else:
+                parent_IDs = np.concatenate(parent_IDs).astype('int64')
+                parent_masses = np.concatenate(parent_masses).astype('float64')
+                parent_halos = np.concatenate(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
         parent_send = np.ones(parent_IDs.size, dtype='bool')
-        
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = np.array([], dtype='int64')
-        child_masses = np.array([], dtype='float64')
-        child_halos = np.array([], dtype='int32')
+        child_IDs = []
+        child_masses = []
+        child_halos = []
         for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
@@ -492,20 +486,28 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = np.concatenate((child_IDs, thisIDs))
-                    child_masses = np.concatenate((child_masses, thisMasses))
-                    child_halos = np.concatenate((child_halos, 
-                        np.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(np.ones(len(thisIDs),
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = np.array([], dtype='int64')
+            child_masses = np.array([], dtype='float64')
+            child_halos = np.array([], dtype='int32')
+        else:
+            child_IDs = np.concatenate(child_IDs).astype('int64')
+            child_masses = np.concatenate(child_masses)
+            child_halos = np.concatenate(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
         
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
         child_send = np.ones(child_IDs.size, dtype='bool')
-        del sort
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,








diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -51,6 +51,7 @@
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
     pasteboard_repo = '',
+    reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',








diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -233,7 +233,7 @@
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity', 'center', 'height_vector']:
+        if param in ['bulk_velocity', 'center', 'normal']:
             return np.random.random(3) * 1e-2
         else:
             return 0.0


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -214,8 +214,8 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
@@ -376,6 +376,8 @@
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
         rf = self.pf.refine_by
+        if dlevel != 1:
+            rf = rf**dlevel
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
         startIndex = np.maximum(0, cgi / rf - gi)
         endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,






diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -702,7 +702,7 @@
         """
         YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)
         self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
-        self.set_field_parameter("height_vector", self._norm_vec)
+        self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
         self._d = -1.0 * np.dot(self._norm_vec, self.center)


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -212,50 +212,181 @@
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
+
+
+### spherical coordinates: r (radius)
+def _sph_r(field, data):
+    center = data.get_field_parameter("center")
+      
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The spherical coordinates radius is simply the magnitude of the
+    ## coords vector.
+
+    return np.sqrt(np.sum(coords**2,axis=-1))
+
+def _Convert_sph_r_CGS(data):
+   return data.convert("cm")
+
+add_field("sph_r", function=_sph_r,
+         validators=[ValidateParameter("center")],
+         convert_function = _Convert_sph_r_CGS, units=r"\rm{cm}")
+
+
+### spherical coordinates: theta (angle with respect to normal)
+def _sph_theta(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The angle (theta) with respect to the normal (J), is the arccos
+    ## of the dot product of the normal with the normalized coords
+    ## vector.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=-1)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+
+add_field("sph_theta", function=_sph_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### spherical coordinates: phi (angle in the plane perpendicular to the normal)
+def _sph_phi(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+    
+    ## We have freedom with respect to what axis (xprime) to define
+    ## the disk angle. Here I've chosen to use the axis that is
+    ## perpendicular to the normal and the y-axis. When normal ==
+    ## y-hat, then set xprime = z-hat. With this definition, when
+    ## normal == z-hat (as is typical), then xprime == x-hat.
+    ##
+    ## The angle is then given by the arctan of the ratio of the
+    ## yprime-component and the xprime-component of the coords vector.
+
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
+    
+    Px = np.sum(Jx*coords,axis=-1)
+    Py = np.sum(Jy*coords,axis=-1)
+    
+    return np.arctan2(Py,Px)
+
+add_field("sph_phi", function=_sph_phi,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+
+### cylindrical coordinates: R (radius in the cylinder's plane)
+def _cyl_R(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+      
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The cross product of the normal (J) with the coords vector
+    ## gives a vector of magnitude equal to the cylindrical radius.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal,tile_shape)
+
+    JcrossCoords = np.cross(J,coords)
+    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+
+def _Convert_cyl_R_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_R", function=_cyl_R,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: z (height above the cylinder's plane)
+def _cyl_z(field, data):
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = np.array([data['x'] - center[0],
+                       data['y'] - center[1],
+                       data['z'] - center[2]]).transpose()
+
+    ## The dot product of the normal (J) with the coords vector gives
+    ## the cylindrical height.
+    
+    tile_shape = list(coords.shape)[:-1] + [1]
+    J = np.tile(normal,tile_shape)
+
+    return np.sum(J*coords,axis=-1)  
+
+def _Convert_cyl_z_CGS(data):
+   return data.convert("cm")
+
+add_field("cyl_z", function=_cyl_z,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")],
+         convert_function = _Convert_cyl_z_CGS, units=r"\rm{cm}")
+
+
+### cylindrical coordinates: theta (angle in the cylinder's plane)
+### [This is identical to the spherical coordinate's 'phi' angle.]
+def _cyl_theta(field, data):
+    return data['sph_phi']
+
+add_field("cyl_theta", function=_cyl_theta,
+         validators=[ValidateParameter("center"),ValidateParameter("normal")])
+
+
+### The old field DiskAngle is the same as the spherical coordinates'
+### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
+def _DiskAngle(field, data):
+    return data['sph_theta']
+
+add_field("DiskAngle", function=_DiskAngle,
+          take_log=False,
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
+          display_field=False)
+
+
+### The old field Height is the same as the cylindrical coordinates' z
+### field. I'm keeping Height for backwards compatibility.
 def _Height(field, data):
-    # We take the dot product of the radius vector with the height-vector
-    center = data.get_field_parameter("center")
-    r_vec = np.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    h_vec = np.array(data.get_field_parameter("height_vector"))
-    h_vec = h_vec / np.sqrt(h_vec[0]**2.0+
-                            h_vec[1]**2.0+
-                            h_vec[2]**2.0)
-    height = r_vec[0,:] * h_vec[0] \
-           + r_vec[1,:] * h_vec[1] \
-           + r_vec[2,:] * h_vec[2]
-    return np.abs(height)
+    return data['cyl_z']
+
 def _convertHeight(data):
     return data.convert("cm")
 def _convertHeightAU(data):
     return data.convert("au")
 add_field("Height", function=_Height,
           convert_function=_convertHeight,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"cm", display_field=False)
 add_field("HeightAU", function=_Height,
           convert_function=_convertHeightAU,
-          validators=[ValidateParameter("height_vector")],
+          validators=[ValidateParameter("center"),
+                      ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
-def _DiskAngle(field, data):
-    # We make both r_vec and h_vec into unit vectors
-    center = data.get_field_parameter("center")
-    r_vec = np.array([data["x"] - center[0],
-                      data["y"] - center[1],
-                      data["z"] - center[2]])
-    r_vec = r_vec/np.sqrt((r_vec**2.0).sum(axis=0))
-    h_vec = np.array(data.get_field_parameter("height_vector"))
-    dp = r_vec[0,:] * h_vec[0] \
-       + r_vec[1,:] * h_vec[1] \
-       + r_vec[2,:] * h_vec[2]
-    return np.arccos(dp)
-add_field("DiskAngle", function=_DiskAngle,
-          take_log=False,
-          validators=[ValidateParameter("height_vector"),
-                      ValidateParameter("center")],
-          display_field=False)
 
 def _DynamicalTime(field, data):
     """
@@ -802,6 +933,25 @@
           validators=[ValidateParameter("cp_%s_vec" % ax)
                       for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
 
+def _CuttingPlaneBx(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(x_vec, b_vec)
+add_field("CuttingPlaneBx", 
+          function=_CuttingPlaneBx,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+def _CuttingPlaneBy(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(y_vec, b_vec)
+add_field("CuttingPlaneBy", 
+          function=_CuttingPlaneBy,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+
 def _MeanMolecularWeight(field,data):
     return (data["Density"] / (mh *data["NumberDensity"]))
 add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")
@@ -839,12 +989,37 @@
     units of Gauss. If you use MKS, make sure to write your own
     MagneticEnergy field to deal with non-unitary \mu_0.
     """
-    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/2.
+    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/(8*np.pi)
 add_field("MagneticEnergy",function=_MagneticEnergy,
-          units=r"",
-          validators = [ValidateDataField("Bx"),
-                        ValidateDataField("By"),
-                        ValidateDataField("Bz")])
+          units=r"\rm{ergs}\/\rm{cm}^{-3}",
+          display_name=r"\rm{Magnetic}\/\rm{Energy}")
+
+def _BMagnitude(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    BMagnitude field to deal with non-unitary \mu_0.
+    """
+    return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
+add_field("BMagnitude",
+          function=_BMagnitude,
+          display_name=r"|B|", units=r"\rm{Gauss}")
+
+def _PlasmaBeta(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    PlasmaBeta field to deal with non-unitary \mu_0.
+    """
+    return data['Pressure']/data['MagneticEnergy']
+add_field("PlasmaBeta",
+          function=_PlasmaBeta,
+          display_name=r"\rm{Plasma}\/\beta", units="")
+
+def _MagneticPressure(field,data):
+    return data['MagneticEnergy']
+add_field("MagneticPressure",
+          function=_MagneticPressure,
+          display_name=r"\rm{Magnetic}\/\rm{Energy}",
+          units="\rm{ergs}\/\rm{cm}^{-3}")
 
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      SkeletonGrid, \
+      SkeletonHierarchy, \
+      SkeletonStaticOutput
+
+from .fields import \
+      SkeletonFieldInfo, \
+      add_flash_field
+
+from .io import \
+      IOHandlerSkeleton


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.hierarchy import \
+    AMRHierarchy
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+    grid = SkeletonGrid
+    float_type = np.float64
+    
+    def __init__(self, pf, data_style='skeleton'):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        # This needs to set a self.field_list that contains all the available,
+        # on-disk fields.
+        pass
+    
+    def _count_grids(self):
+        # This needs to set self.num_grids
+        pass
+        
+    def _parse_hierarchy(self):
+        # This needs to fill the following arrays, where N is self.num_grids:
+        #   self.grid_left_edge         (N, 3) <= float64
+        #   self.grid_right_edge        (N, 3) <= float64
+        #   self.grid_dimensions        (N, 3) <= int
+        #   self.grid_particle_count    (N, 1) <= int
+        #   self.grid_levels            (N, 1) <= int
+        #   self.grids                  (N, 1) <= grid objects
+        #   
+        pass
+                        
+    def _populate_grid_objects(self):
+        # For each grid, this must call:
+        #   grid._prepare_grid()
+        #   grid._setup_dx()
+        # This must also set:
+        #   grid.Children <= list of child grids
+        #   grid.Parent   <= parent grid
+        # This is handled by the frontend because often the children must be
+        # identified.
+        pass
+
+class SkeletonStaticOutput(StaticOutput):
+    _hierarchy_class = SkeletonHierarchy
+    _fieldinfo_fallback = SkeletonFieldInfo
+    _fieldinfo_known = KnownSkeletonFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='skeleton',
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+    def _set_units(self):
+        # This needs to set up the dictionaries that convert from code units to
+        # CGS.  The needed items are listed in the second entry:
+        #   self.time_units         <= sec_conversion
+        #   self.conversion_factors <= mpc_conversion
+        #   self.units              <= On-disk fields
+        pass
+
+    def _parse_parameter_file(self):
+        # This needs to set up the following items:
+        #
+        #   self.unique_identifier
+        #   self.parameters             <= full of code-specific items of use
+        #   self.domain_left_edge       <= array of float64
+        #   self.domain_right_edge      <= array of float64
+        #   self.dimensionality         <= int
+        #   self.domain_dimensions      <= array of int64
+        #   self.current_time           <= simulation time in code units
+        #
+        # We also set up cosmological information.  Set these to zero if
+        # non-cosmological.
+        #
+        #   self.cosmological_simulation    <= int, 0 or 1
+        #   self.current_redshift           <= float
+        #   self.omega_lambda               <= float
+        #   self.omega_matter               <= float
+        #   self.hubble_constant            <= float
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # This accepts a filename or a set of arguments and returns True or
+        # False depending on if the file is of the type requested.
+        return False
+
+




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+    kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt.  This
+# construct shows how to do that.  Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Temperature": "temp",
+                    "Pressure" : "pres", 
+                    "Grav_Potential" : "gpot",
+                    "particle_position_x" : "particle_posx",
+                    "particle_position_y" : "particle_posy",
+                    "particle_position_z" : "particle_posz",
+                    "particle_velocity_x" : "particle_velx",
+                    "particle_velocity_y" : "particle_vely",
+                    "particle_velocity_z" : "particle_velz",
+                    "particle_index" : "particle_tag",
+                    "Electron_Fraction" : "elec",
+                    "HI_Fraction" : "h   ",
+                    "HD_Fraction" : "hd  ",
+                    "HeI_Fraction": "hel ",
+                    "HeII_Fraction": "hep ",
+                    "HeIII_Fraction": "hepp",
+                    "HM_Fraction": "hmin",
+                    "HII_Fraction": "hp  ",
+                    "H2I_Fraction": "htwo",
+                    "H2II_Fraction": "htwp",
+                    "DI_Fraction": "deut",
+                    "DII_Fraction": "dplu",
+                    "ParticleMass": "particle_mass",
+                    "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+    if v not in KnownSkeletonFields:
+        pfield = v.startswith("particle")
+        add_skeleton_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownSkeletonFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownSkeletonFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "skeleton"
+
+    def _read_data_set(self, grid, field):
+        # This must return the array, of size/shape grid.ActiveDimensions, that
+        # corresponds to 'field'.
+        pass
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        # If this is not implemented, the IO handler will just slice a
+        # _read_data_set item.
+        pass




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('skeleton', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config








diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/athena/api.py
--- /dev/null
+++ b/yt/frontends/athena/api.py
@@ -0,0 +1,42 @@
+"""
+API for yt.frontends.athena
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+from .data_structures import \
+      AthenaGrid, \
+      AthenaHierarchy, \
+      AthenaStaticOutput
+
+from .fields import \
+      AthenaFieldInfo, \
+      KnownAthenaFields, \
+      add_athena_field
+
+from .io import \
+      IOHandlerAthena


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/athena/data_structures.py
--- /dev/null
+++ b/yt/frontends/athena/data_structures.py
@@ -0,0 +1,356 @@
+"""
+Data structures for Athena.
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import numpy as np
+import weakref
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+           AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridGeometryHandler
+from yt.data_objects.static_output import \
+           StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+from .fields import AthenaFieldInfo, KnownAthenaFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+
+def _get_convert(fname):
+    def _conv(data):
+        return data.convert(fname)
+    return _conv
+
+class AthenaGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level, start, dimensions):
+        df = hierarchy.storage_filename
+        if 'id0' not in hierarchy.parameter_file.filename:
+            gname = hierarchy.parameter_file.filename
+        else:
+            if id == 0:
+                gname = 'id0/%s.vtk' % df
+            else:
+                gname = 'id%i/%s-id%i%s.vtk' % (id, df[:-5], id, df[-5:] )
+        AMRGridPatch.__init__(self, id, filename = gname,
+                              hierarchy = hierarchy)
+        self.filename = gname
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.pf.refine_by
+        else:
+            LE, RE = self.hierarchy.grid_left_edge[id,:], \
+                     self.hierarchy.grid_right_edge[id,:]
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = 1.0
+        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+def parse_line(line, grid):
+    # grid is a dictionary
+    splitup = line.strip().split()
+    if "vtk" in splitup:
+        grid['vtk_version'] = splitup[-1]
+    elif "Really" in splitup:
+        grid['time'] = splitup[-1]
+    elif any(x in ['PRIMITIVE','CONSERVED'] for x in splitup):
+        grid['time'] = float(splitup[4].rstrip(','))
+        grid['level'] = int(splitup[6].rstrip(','))
+        grid['domain'] = int(splitup[8].rstrip(','))
+    elif "DIMENSIONS" in splitup:
+        grid['dimensions'] = np.array(splitup[-3:]).astype('int')
+    elif "ORIGIN" in splitup:
+        grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
+    elif "SPACING" in splitup:
+        grid['dds'] = np.array(splitup[-3:]).astype('float64')
+    elif "CELL_DATA" in splitup:
+        grid["ncells"] = int(splitup[-1])
+    elif "SCALARS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'scalar'
+    elif "VECTORS" in splitup:
+        field = splitup[1]
+        grid['read_field'] = field
+        grid['read_type'] = 'vector'
+
+class AthenaHierarchy(GridGeometryHandler):
+
+    grid = AthenaGrid
+    _data_style='athena'
+    
+    def __init__(self, pf, data_style='athena'):
+        self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
+        # for now, the hierarchy file is the parameter file!
+        self.storage_filename = self.parameter_file.storage_filename
+        self.hierarchy_filename = self.parameter_file.filename
+        #self.directory = os.path.dirname(self.hierarchy_filename)
+        self._fhandle = file(self.hierarchy_filename,'rb')
+        AMRHierarchy.__init__(self, pf, data_style)
+
+        self._fhandle.close()
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        field_map = {}
+        f = open(self.hierarchy_filename,'rb')
+        line = f.readline()
+        while line != '':
+            splitup = line.strip().split()
+            if "DIMENSIONS" in splitup:
+                grid_dims = np.array(splitup[-3:]).astype('int')
+                line = f.readline()
+            elif "CELL_DATA" in splitup:
+                grid_ncells = int(splitup[-1])
+                line = f.readline()
+                if np.prod(grid_dims) != grid_ncells:
+                    grid_dims -= 1
+                    grid_dims[grid_dims==0]=1
+                if np.prod(grid_dims) != grid_ncells:
+                    mylog.error('product of dimensions %i not equal to number of cells %i' %
+                          (np.prod(grid_dims), grid_ncells))
+                    raise TypeError
+                break
+            else:
+                line = f.readline()
+        read_table = False
+        read_table_offset = f.tell()
+        while line != '':
+            splitup = line.strip().split()
+            if 'SCALARS' in splitup:
+                field = splitup[1]
+                if not read_table:
+                    line = f.readline() # Read the lookup table line
+                    read_table = True
+                field_map[field] = ('scalar', f.tell() - read_table_offset)
+                read_table=False
+
+            elif 'VECTORS' in splitup:
+                field = splitup[1]
+                for ax in 'xyz':
+                    field_map["%s_%s" % (field, ax)] =\
+                            ('vector', f.tell() - read_table_offset)
+            line = f.readline()
+
+        f.close()
+
+        self.field_list = field_map.keys()
+        self._field_map = field_map
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = self.parameter_file.nvtk
+
+    def _parse_hierarchy(self):
+        f = open(self.hierarchy_filename,'rb')
+        grid = {}
+        grid['read_field'] = None
+        grid['read_type'] = None
+        table_read=False
+        line = f.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            line = f.readline()
+        f.close()
+
+        # It seems some datasets have a mismatch between ncells and 
+        # the actual grid dimensions.
+        if np.prod(grid['dimensions']) != grid['ncells']:
+            grid['dimensions'] -= 1
+            grid['dimensions'][grid['dimensions']==0]=1
+        if np.prod(grid['dimensions']) != grid['ncells']:
+            mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                  (np.prod(grid['dimensions']), grid['ncells']))
+            raise TypeError
+
+        dxs=[]
+        self.grids = np.empty(self.num_grids, dtype='object')
+        levels = np.zeros(self.num_grids, dtype='int32')
+        single_grid_width = grid['dds']*grid['dimensions']
+        grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
+        glis = np.empty((self.num_grids,3), dtype='int64')
+        for i in range(self.num_grids):
+            procz = i/(grids_per_dim[0]*grids_per_dim[1])
+            procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
+            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[0]
+            glis[i, 0] = procx*grid['dimensions'][0]
+            glis[i, 1] = procy*grid['dimensions'][1]
+            glis[i, 2] = procz*grid['dimensions'][2]
+        gdims = np.ones_like(glis)
+        gdims[:] = grid['dimensions']
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+
+            dx = (self.parameter_file.domain_right_edge-
+                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+            dx = dx/self.parameter_file.refine_by**(levels[i])
+            dxs.append(grid['dds'])
+        dx = np.array(dxs)
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
+        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+#     def _setup_derived_fields(self):
+#         self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+class AthenaStaticOutput(StaticOutput):
+    _hierarchy_class = AthenaHierarchy
+    _fieldinfo_fallback = AthenaFieldInfo
+    _fieldinfo_known = KnownAthenaFields
+    _data_style = "athena"
+
+    def __init__(self, filename, data_style='athena',
+                 storage_filename = None, parameters = {}):
+        self.specified_parameters = parameters
+        StaticOutput.__init__(self, filename, data_style)
+        self.filename = filename
+        self.storage_filename = filename[4:-4]
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+
+    def _setup_nounits_units(self):
+        self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+    def _parse_parameter_file(self):
+        self._handle = open(self.parameter_filename, "rb")
+        # Read the start of a grid to get simulation parameters.
+        grid = {}
+        grid['read_field'] = None
+        line = self._handle.readline()
+        while grid['read_field'] is None:
+            parse_line(line, grid)
+            if "SCALAR" in line.strip().split():
+                break
+            if "VECTOR" in line.strip().split():
+                break
+            if 'TABLE' in line.strip().split():
+                break
+            if len(line) == 0: break
+            line = self._handle.readline()
+
+        self.domain_left_edge = grid['left_edge']
+        if 'domain_right_edge' in self.specified_parameters:
+            self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
+        else:
+            mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
+                    "if it is not equal to -domain_left_edge.")
+            self.domain_right_edge = -self.domain_left_edge
+        self.domain_width = self.domain_right_edge-self.domain_left_edge
+        self.domain_dimensions = self.domain_width/grid['dds']
+        refine_by = None
+        if refine_by is None: refine_by = 2
+        self.refine_by = refine_by
+        self.dimensionality = 3
+        self.current_time = grid["time"]
+        self.unique_identifier = self._handle.__hash__()
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.field_ordering = 'fortran'
+        self.boundary_conditions = [1]*6
+
+        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        self._handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if 'vtk' in args[0]:
+                return True
+        except:
+            pass
+        return False
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/athena/definitions.py
--- /dev/null
+++ b/yt/frontends/athena/definitions.py
@@ -0,0 +1,25 @@
+"""
+Various definitions for various other modules and routines
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/athena/fields.py
--- /dev/null
+++ b/yt/frontends/athena/fields.py
@@ -0,0 +1,88 @@
+"""
+Athena-specific fields
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Samuel W. Skillman, Matthew Turk, J. S. Oishi.  
+  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
+import yt.data_objects.universal_fields
+
+log_translation_dict = {}
+
+translation_dict = {"Density": "density",
+                    "Pressure": "pressure",
+                    "x-velocity": "velocity_x",
+                    "y-velocity": "velocity_y",
+                    "z-velocity": "velocity_z",
+                    "mag_field_x": "cell_centered_B_x ",
+                    "mag_field_y": "cell_centered_B_y ",
+                    "mag_field_z": "cell_centered_B_z "}
+
+AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = AthenaFieldInfo.add_field
+
+KnownAthenaFields = FieldInfoContainer()
+add_athena_field = KnownAthenaFields.add_field
+
+add_athena_field("density", function=NullFunc, take_log=False,
+          units=r"",
+          projected_units =r"")
+
+add_athena_field("pressure", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_x", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_y", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("velocity_z", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
+          units=r"")
+
+add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
+          units=r"")
+
+for f,v in log_translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=True)
+
+for f,v in translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=False)
+


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/athena/io.py
--- /dev/null
+++ b/yt/frontends/athena/io.py
@@ -0,0 +1,107 @@
+"""
+The data-file handling functions
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+from yt.utilities.io_handler import \
+           BaseIOHandler
+import numpy as np
+
+class IOHandlerAthena(BaseIOHandler):
+    _data_style = "athena"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+    _read_table_offset = None
+
+    def _field_dict(self,fhandle):
+        keys = fhandle['field_types'].keys()
+        val = fhandle['field_types'].keys()
+        return dict(zip(keys,val))
+
+    def _read_field_names(self,grid):
+        pass
+
+    def _read_data_set(self,grid,field):
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = np.prod(grid.ActiveDimensions)
+        grid_dims = grid.ActiveDimensions
+        read_table_offset = get_read_table_offset(f)
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = np.fromfile(f, dtype='>f4',
+                    count=grid_ncells).reshape(grid_dims,order='F').copy()
+        if dtype == 'vector':
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid_dims,order='F').copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid_dims,order='F').copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid_dims,order='F').copy()
+        f.close()
+        if grid.pf.field_ordering == 1:
+            return data.T
+        else:
+            return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        if grid.pf.field_ordering == 1:
+            sl.reverse()
+
+        f = file(grid.filename, 'rb')
+        dtype, offset = grid.hierarchy._field_map[field]
+        grid_ncells = np.prod(grid.ActiveDimensions)
+
+        read_table_offset = get_read_table_offset(f)
+        f.seek(read_table_offset+offset)
+        if dtype == 'scalar':
+            data = np.fromfile(f, dtype='>f4', 
+                    count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        if dtype == 'vector':
+            data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+            if '_x' in field:
+                data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_y' in field:
+                data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+            elif '_z' in field:
+                data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
+        f.close()
+        return data
+
+def get_read_table_offset(f):
+    line = f.readline()
+    while True:
+        splitup = line.strip().split()
+        if 'CELL_DATA' in splitup:
+            f.readline()
+            read_table_offset = f.tell()
+            break
+        line = f.readline()
+    return read_table_offset
+
+




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/athena/setup.py
--- /dev/null
+++ b/yt/frontends/athena/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('athena', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -101,18 +101,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -89,17 +89,8 @@
         return self.start_index
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(GridGeometryHandler):
@@ -176,11 +167,13 @@
         # 'Chombo_global'
         levels = f.keys()[1:]
         grids = []
+        self.dds_list = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -98,6 +98,21 @@
 add_field("Density",function=_Density, take_log=True,
           units=r'\rm{g}/\rm{cm^3}')
 
+def _Bx(field,data):
+    return data["X-magnfield"]
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(field,data):
+    return data["Y-magnfield"]
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(field,data):
+    return data["Z-magnfield"]
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +
             data["Y-magnfield"]**2 +


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -456,13 +456,15 @@
         mylog.info("Finished rebuilding")
 
     def _populate_grid_objects(self):
+        reconstruct = ytcfg.getboolean("yt","reconstruct_hierarchy")
         for g,f in izip(self.grids, self.filenames):
             g._prepare_grid()
             g.NumberOfActiveParticles = \
                 self.grid_active_particle_count[g.id - g._id_offset,0]
             g._setup_dx()
             g.set_filename(f[0])
-            #if g.Parent is not None: g._guess_properties_from_parent()
+            if reconstruct:
+                if g.Parent is not None: g._guess_properties_from_parent()
         del self.filenames # No longer needed.
         self.max_level = self.grid_levels.max()
 
@@ -743,23 +745,6 @@
         StaticOutput.__init__(self, filename, data_style, file_style=file_style)
         if "InitialTime" not in self.parameters:
             self.current_time = 0.0
-        rp = os.path.join(self.directory, "rates.out")
-        if os.path.exists(rp):
-            try:
-                self.rates = EnzoTable(rp, rates_out_key)
-            except:
-                pass
-        cp = os.path.join(self.directory, "cool_rates.out")
-        if os.path.exists(cp):
-            try:
-                self.cool = EnzoTable(cp, cool_out_key)
-            except:
-                pass
-
-        # Now fixes for different types of Hierarchies
-        # This includes changing the fieldinfo class!
-        if self["TopGridRank"] == 1: self._setup_1d()
-        elif self["TopGridRank"] == 2: self._setup_2d()
 
     def _setup_1d(self):
         self._hierarchy_class = EnzoHierarchy1D
@@ -919,6 +904,11 @@
         for ptype in self.parameters.get("AppendActiveParticleType", []):
             self.particle_types.append(ptype)
 
+        if self.dimensionality == 1:
+            self._setup_1d()
+        elif self.dimensionality == 2:
+            self._setup_2d()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,16 +171,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+
+def _TotalEnergy(field, data):
+    return data["Total_Energy"] / _convertEnergy(data)
+add_field("TotalEnergy", function=_TotalEnergy,
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -481,7 +487,7 @@
     """
     return np.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
 # Particle functions
 
@@ -604,7 +610,6 @@
                     dtype='float64')
 add_enzo_2d_field("z-velocity", function=_zvel)
 
-
 #
 # Now we do overrides for 1D fields
 #


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -42,11 +42,11 @@
     mpc_conversion, sec_conversion
 from yt.utilities.io_handler import \
     io_registry
-
+from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields, \
     CylindricalFLASHFieldInfo, PolarFLASHFieldInfo
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
-     ValidateDataField
+     ValidateDataField, TranslationFunc
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -144,17 +144,20 @@
         
 
         # This is a possibly slow and verbose fix, and should be re-examined!
-        rdx = (self.parameter_file.domain_right_edge -
-                self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+        rdx = (self.parameter_file.domain_width /
+                self.parameter_file.domain_dimensions)
         nlevels = self.grid_levels.max()
-        dxs = np.zeros((nlevels+1,3),dtype='float64')
+        dxs = np.ones((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
-            dxs[i] = rdx/self.parameter_file.refine_by**i
+            dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
+        if ND < 3:
+            dxs[:,ND:] = rdx[ND:]
+
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = np.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)
@@ -192,11 +195,16 @@
                 self.derived_field_list.append(field)
             if (field not in KnownFLASHFields and
                 field.startswith("particle")) :
-                self.parameter_file.field_info.add_field(field,
-                                                         function=NullFunc,
-                                                         take_log=False,
-                                                         validators = [ValidateDataField(field)],
-                                                         particle_type=True)
+                self.parameter_file.field_info.add_field(
+                        field, function=NullFunc, take_log=False,
+                        validators = [ValidateDataField(field)],
+                        particle_type=True)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
                 
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
@@ -218,6 +226,7 @@
                  storage_filename = None,
                  conversion_override = None):
 
+        if self._handle is not None: return
         self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
@@ -244,13 +253,13 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         if "EOSType" not in self.parameters:
             self.parameters["EOSType"] = -1
-        if self.cosmological_simulation == 1:
-            self._setup_comoving_units()
         if "pc_unitsbase" in self.parameters:
             if self.parameters["pc_unitsbase"] == "CGS":
                 self._setup_cgs_units()
         else:
             self._setup_nounits_units()
+        if self.cosmological_simulation == 1:
+            self._setup_comoving_units()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / \
@@ -267,10 +276,10 @@
         self.conversion_factors['eint'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['ener'] = (1.0 + self.current_redshift)**-2.0
         self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
-        self.conversion_factors['velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['vely'] = self.conversion_factors['velx']
         self.conversion_factors['velz'] = self.conversion_factors['velx']
-        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)
+        self.conversion_factors['particle_velx'] = (1.0 + self.current_redshift)**-1.0
         self.conversion_factors['particle_vely'] = \
             self.conversion_factors['particle_velx']
         self.conversion_factors['particle_velz'] = \
@@ -280,7 +289,8 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
+            self.units[unit] /= (1.0+self.current_redshift)
+            
     def _setup_cgs_units(self):
         self.conversion_factors['dens'] = 1.0
         self.conversion_factors['pres'] = 1.0
@@ -433,6 +443,7 @@
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']
+            self.hubble_constant *= cm_per_mpc * 1.0e-5 * 1.0e-2 # convert to 'h'
         except:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
@@ -474,3 +485,5 @@
         except:
             pass
         return False
+
+


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -316,6 +316,46 @@
 add_field("GasEnergy", function=_GasEnergy, 
           units=r"\rm{ergs}/\rm{g}")
 
+# See http://flash.uchicago.edu/pipermail/flash-users/2012-October/001180.html
+# along with the attachment to that e-mail for details
+def GetMagRescalingFactor(pf):
+    if pf['unitsystem'].lower() == "cgs":
+         factor = 1
+    if pf['unitsystem'].lower() == "si":
+         factor = np.sqrt(4*np.pi/1e7)
+    if pf['unitsystem'].lower() == "none":
+         factor = np.sqrt(4*np.pi)
+    else:
+        raise RuntimeError("Runtime parameter unitsystem with"
+                           "value %s is unrecognized" % pf['unitsystem'])
+    return factor
+
+def _Bx(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magx']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magy']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magz']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
+def _DivB(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['divb']*factor
+add_field("DivB", function=_DivB, take_log=False,
+          units=r"\rm{Gauss}\/\rm{cm}^{-1}")
+
+
+
 def _unknown_coord(field, data):
     raise YTCoordinateNotImplemented
 add_cyl_field("dx", function=_unknown_coord)


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -52,23 +52,6 @@
             count_list, conv_factors):
         pass
 
-    def _select_particles(self, grid, field):
-        f = self._handle
-        npart = f["/tracer particles"].shape[0]
-        total_selected = 0
-        start = 0
-        stride = 1e6
-        blki = self._particle_fields["particle_blk"]
-        bi = grid.id - grid._id_offset
-        fi = self._particle_fields[field]
-        tr = []
-        while start < npart:
-            end = min(start + stride - 1, npart)
-            gi = f["/tracer particles"][start:end,blki] == bi
-            tr.append(f["/tracer particles"][gi,fi])
-            start = end
-        return np.concatenate(tr)
-
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:






diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -37,6 +37,8 @@
            GridGeometryHandler
 from yt.data_objects.static_output import \
            StaticOutput
+from yt.utilities.lib import \
+    get_box_grids_level
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -133,14 +135,25 @@
         del levels, glis, gdims
 
     def _populate_grid_objects(self):
-        for g in self.grids:
+        mask = np.empty(self.grids.size, dtype='int32')
+        for gi, g in enumerate(self.grids):
             g._prepare_grid()
             g._setup_dx()
 
-        for g in self.grids:
+        for gi, g in enumerate(self.grids):
             g.Children = self._get_grid_children(g)
             for g1 in g.Children:
                 g1.Parent.append(g)
+            get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                self.grid_levels[gi],
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            m = mask.astype("bool")
+            m[gi] = False
+            siblings = self.grids[gi:][m[gi:]]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
         self.max_level = self.grid_levels.max()
 
     def _setup_derived_fields(self):


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -102,17 +102,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -100,18 +100,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz here.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE - LE) / self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -99,17 +99,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):






diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('frontends', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
+    config.add_subpackage("athena")
     config.add_subpackage("gdf")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -152,7 +154,6 @@
             self.pf.field_info.add_field(
                     field, lambda a, b: None,
                     convert_function=cf, take_log=False)
-            
 
     def _parse_hierarchy(self):
         self.grid_dimensions = self.stream_handler.dimensions
@@ -296,8 +297,8 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
-                      sim_time=0.0, number_of_particles=0):
+def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                      nprocs=1, sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -313,55 +314,66 @@
     ----------
     data : dict
         This is a dict of numpy arrays, where the keys are the field names.
-    domain_dimensiosn : array_like
+    domain_dimensions : array_like
         This is the domain dimensions of the grid
-    domain_size_in_cm : float
-        The size of the domain, in centimeters
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    nprocs: integer, optional
+        If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
     number_of_particles : int, optional
         If particle fields are included, set this to the number of particles
-        
+
     Examples
     --------
 
-    >>> arr = np.random.random((256, 256, 256))
+    >>> arr = np.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
-                
+    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
     """
+
+    domain_dimensions = np.array(domain_dimensions)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
     sfh = StreamDictFieldHandler()
-    sfh.update({0:data})
-    domain_dimensions = np.array(domain_dimensions)
-    if np.unique(domain_dimensions).size != 1:
-        print "We don't support variably sized domains yet."
-        raise RuntimeError
-    domain_left_edge = np.zeros(3, 'float64')
-    domain_right_edge = np.ones(3, 'float64')
-    grid_left_edges = np.zeros(3, "int64").reshape((1,3))
-    grid_right_edges = np.array(domain_dimensions, "int64").reshape((1,3))
 
-    grid_levels = np.array([0], dtype='int32').reshape((1,1))
-    grid_dimensions = grid_right_edges - grid_left_edges
-
-    grid_left_edges  = grid_left_edges.astype("float64")
-    grid_left_edges /= domain_dimensions*2**grid_levels
-    grid_left_edges *= domain_right_edge - domain_left_edge
-    grid_left_edges += domain_left_edge
-
-    grid_right_edges  = grid_right_edges.astype("float64")
-    grid_right_edges /= domain_dimensions*2**grid_levels
-    grid_right_edges *= domain_right_edge - domain_left_edge
-    grid_right_edges += domain_left_edge
+    if nprocs > 1:
+        temp = {}
+        new_data = {}
+        for key in data.keys():
+            psize = get_psize(np.array(data[key].shape), nprocs)
+            grid_left_edges, grid_right_edges, temp[key] = \
+                decompose_array(data[key], psize, bbox)
+            grid_dimensions = np.array([grid.shape for grid in temp[key]])
+        for gid in range(nprocs):
+            new_data[gid] = {}
+            for key in temp.keys():
+                new_data[gid].update({key:temp[key][gid]})
+        sfh.update(new_data)
+        del new_data, temp
+    else:
+        sfh.update({0:data})
+        grid_left_edges = domain_left_edge
+        grid_right_edges = domain_right_edge
+        grid_dimensions = domain_dimensions.reshape(nprocs,3)
 
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        np.array([-1], dtype='int64'),
-        number_of_particles*np.ones(1, dtype='int64').reshape((1,1)),
-        np.zeros(1).reshape((1,1)),
+        -np.ones(nprocs, dtype='int64'),
+        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
     )
 
@@ -375,10 +387,10 @@
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)
-    spf.units["cm"] = domain_size_in_cm
+    spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
-    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf








diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/gui/reason/html/app/controller/Notebook.js
--- a/yt/gui/reason/html/app/controller/Notebook.js
+++ b/yt/gui/reason/html/app/controller/Notebook.js
@@ -73,9 +73,11 @@
     },
 
     addRequest: function(request_id, command) {
+        /*console.log("Adding request " + request_id);*/
         this.getRequestsStore().add({
             request_id: request_id, command: command,
         });
+        reason.pending.update([this.getRequestsStore().count()]);
     },
 
     addCell: function(cell) {
@@ -85,6 +87,7 @@
             var ind = this.getRequestsStore().find(
                 'request_id', cell['result_id']);
             if (ind != -1) {
+                /*console.log("Removing request " + cell['result_id']);*/
                 var rec = this.getRequestsStore().removeAt(ind);
             }
             reason.pending.update([this.getRequestsStore().count()]);




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -96,6 +96,9 @@
 from yt.frontends.gdf.api import \
     GDFStaticOutput, GDFFieldInfo, add_gdf_field
 
+from yt.frontends.athena.api import \
+    AthenaStaticOutput, AthenaFieldInfo, add_athena_field
+
 #from yt.frontends.art.api import \
 #    ARTStaticOutput, ARTFieldInfo, add_art_field
 
@@ -119,7 +122,7 @@
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
     apply_colormap, scale_image, write_projection, write_fits, \
-    SlicePlot, OffAxisSlicePlot, ProjectionPlot
+    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -23,7 +23,8 @@
 """
 
 import numpy as np
-
+from yt.funcs import *
+from numpy.testing import assert_array_equal
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -125,3 +126,18 @@
         right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
 
     return left, right, level
+
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+    from yt.frontends.stream.api import load_uniform_grid
+    if not iterable(ndims):
+        ndims = [ndims, ndims, ndims]
+    else:
+        assert(len(ndims) == 3)
+    if negative:
+        offset = 0.5
+    else:
+        offset = 0.0
+    data = dict((field, (np.random.random(ndims) - offset) * peak_value)
+                 for field in fields)
+    ug = load_uniform_grid(data, ndims, 1.0)
+    return ug


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -998,11 +998,11 @@
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
-        pbar = get_pbar("Building kd-Tree",
-                np.prod(self.domain_right_edge-self.domain_left_edge))
+        total_vol = np.prod(self.domain_right_edge-self.domain_left_edge)
+        pbar = get_pbar("Building kd-Tree", total_vol)
 
         while current_node is not None:
-            pbar.update(volume_partitioned)
+            pbar.update(min(volume_partitioned, total_vol))
 
             # If we don't have any grids, that means we are revisiting
             # a dividing node, and there is nothing to be done.




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -152,7 +152,7 @@
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
                    action="store", type=str,
-                   dest="unit", default='unitary',
+                   dest="unit", default='1',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
                    action="store", type=float,
@@ -1188,6 +1188,40 @@
         import yt.utilities.lodgeit as lo
         lo.main( None, download=args.number )
 
+class YTNotebookUploadCmd(YTCommand):
+    args = (dict(short="file", type=str),)
+    description = \
+        """
+        Upload an IPython notebook to hub.yt-project.org.
+        """
+
+    name = "upload_notebook"
+    def __call__(self, args):
+        filename = args.file
+        if not os.path.isfile(filename):
+            raise IOError(filename)
+        if not filename.endswith(".ipynb"):
+            print "File must be an IPython notebook!"
+            return 1
+        import json
+        try:
+            t = json.loads(open(filename).read())['metadata']['name']
+        except (ValueError, KeyError):
+            print "File does not appear to be an IPython notebook."
+        from yt.utilities.minimal_representation import MinimalNotebook
+        mn = MinimalNotebook(filename, t)
+        rv = mn.upload()
+        print "Upload successful!"
+        print
+        print "To access your raw notebook go here:"
+        print
+        print "  %s" % (rv['url'])
+        print
+        print "To view your notebook go here:"
+        print
+        print "  %s" % (rv['url'].replace("/go/", "/nb/"))
+        print
+
 class YTPlotCmd(YTCommand):
     args = ("width", "unit", "bn", "proj", "center",
             "zlim", "axis", "field", "weight", "skip",


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/decompose.py
--- /dev/null
+++ b/yt/utilities/decompose.py
@@ -0,0 +1,156 @@
+"""
+Automagical cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Author: Artur Gawryszczak <gawrysz at gmail.com>
+Affiliation: PCSS
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+SIEVE_PRIMES = \
+    lambda l: l and l[:1] + SIEVE_PRIMES([n for n in l if n % l[0]])
+
+
+def decompose_to_primes(max_prime):
+    """ Decompose number into the primes """
+    for prime in SIEVE_PRIMES(range(2, max_prime)):
+        if prime * prime > max_prime:
+            break
+        while max_prime % prime == 0:
+            yield prime
+            max_prime /= prime
+    if max_prime > 1:
+        yield max_prime
+
+
+def decompose_array(arr, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = arr.shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    patches = split_array(arr, psize)
+    return grid_left_edges, grid_right_edges, patches
+
+
+def evaluate_domain_decomposition(n_d, pieces, ldom):
+    """ Evaluate longest to shortest edge ratio
+        BEWARE: lot's of magic here """
+    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
+    bsize = int(np.sum(
+        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    load_balance = float(np.product(n_d)) / \
+        (float(pieces) * np.product((n_d - 1) / ldom + 1))
+
+    # 0.25 is magic number
+    quality = load_balance / (1 + 0.25 * (bsize / ideal_bsize - 1.0))
+    # \todo add a factor that estimates lower cost when x-direction is
+    # not chopped too much
+    # \deprecated estimate these magic numbers
+    quality *= (1. - (0.001 * ldom[0] + 0.0001 * ldom[1]) / pieces)
+    if np.any(ldom > n_d):
+        quality = 0
+
+    return quality
+
+
+def factorize_number(pieces):
+    """ Return array consiting of prime, its power and number of different
+        decompositions in three dimensions for this prime
+    """
+    factors = [factor for factor in decompose_to_primes(pieces)]
+    temp = np.bincount(factors)
+    return np.array(
+        [(prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) / 2)
+         for prime in np.unique(factors)]
+    )
+
+
+def get_psize(n_d, pieces):
+    """ Calculate the best division of array into px*py*pz subarrays.
+        The goal is to minimize the ratio of longest to shortest edge
+        to minimize the amount of inter-process communication.
+    """
+    fac = factorize_number(pieces)
+    nfactors = len(fac[:, 2])
+    best = 0.0
+    while np.all(fac[:, 2] > 0):
+        ldom = np.ones(3, dtype=np.int)
+        for nfac in range(nfactors):
+            i = int(np.sqrt(0.25 + 2 * (fac[nfac, 2] - 1)) - 0.5)
+            k = fac[nfac, 2] - int(1 + i * (i + 1) / 2)
+            i = fac[nfac, 1] - i
+            j = fac[nfac, 1] - (i + k)
+            ldom *= fac[nfac, 0] ** np.array([i, j, k])
+
+        quality = evaluate_domain_decomposition(n_d, pieces, ldom)
+        if quality > best:
+            best = quality
+            p_size = ldom
+        # search for next unique combination
+        for j in range(nfactors):
+            if fac[j, 2] > 1:
+                fac[j, 2] -= 1
+                break
+            else:
+                if (j < nfactors - 1):
+                    fac[j, 2] = int((fac[j, 1] + 1) * (fac[j, 1] + 2) / 2)
+                else:
+                    fac[:, 2] = 0  # no more combinations to try
+
+    return p_size
+
+
+def split_array(tab, psize):
+    """ Split array into px*py*pz subarrays using internal numpy routine. """
+    temp = [np.array_split(array, psize[1], axis=1)
+            for array in np.array_split(tab, psize[2], axis=2)]
+    temp = [item for sublist in temp for item in sublist]
+    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
+    temp = [item for sublist in temp for item in sublist]
+    return temp
+
+
+if __name__ == "__main__":
+
+    NPROC = 12
+    ARRAY = np.zeros((128, 128, 129))
+    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
+    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
+
+    for idx in range(NPROC):
+        print LE[idx, :], RE[idx, :], DATA[idx].shape


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -136,3 +136,15 @@
 class YTCoordinateNotImplemented(YTException):
     def __str__(self):
         return "This coordinate is not implemented for this geometry type."
+
+class YTUnitNotRecognized(YTException):
+    def __init__(self, unit):
+        self.unit = unit
+
+    def __str__(self):
+        return "This parameter file doesn't recognize %s" % self.unit
+
+class YTHubRegisterError(YTException):
+    def __str__(self):
+        return "You must create an API key before uploading.  See " + \
+               "https://data.yt-project.org/getting_started.html"


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/flagging_methods.py
--- /dev/null
+++ b/yt/utilities/flagging_methods.py
@@ -0,0 +1,51 @@
+"""
+Utilities for flagging zones for refinement in a dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np # For modern purposes
+
+flagging_method_registry = {}
+
+def flag_cells(grid, methods):
+    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+    for method in methods:
+        flagged |= method(grid)
+    return flagged
+
+class FlaggingMethod(object):
+    _skip_add = False
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if hasattr(cls, "_type_name") and not cls._skip_add:
+                flagging_method_registry[cls._type_name] = cls
+
+class OverDensity(FlaggingMethod):
+    _type_name = "overdensity"
+    def __init__(self, over_density):
+        self.over_density = over_density
+
+    def __call__(self, pf, grid):
+        rho = grid["Density"] / (pf.refine_by**grid.Level)
+        return (rho > self.over_density)


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/grid_data_format/writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/writer.py
@@ -0,0 +1,171 @@
+"""
+Writing yt data to a GDF file.
+
+Authors: Casey W. Stark <caseywstark at gmail.com>
+Affiliation: UC Berkeley
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Casey W. Stark.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+import os
+
+import h5py
+import numpy as np
+
+from yt import __version__ as yt_version
+
+
+def write_to_gdf(pf, gdf_path, data_author=None, data_comment=None,
+                 particle_type_name="dark_matter"):
+    """
+    Write a parameter file to the given path in the Grid Data Format.
+
+    Parameters
+    ----------
+    pf : StaticOutput object
+        The yt data to write out.
+    gdf_path : string
+        The path of the file to output.
+
+    """
+    # Make sure we have the absolute path to the file first
+    gdf_path = os.path.abspath(gdf_path)
+
+    # Stupid check -- is the file already there?
+    # @todo: make this a specific exception/error.
+    if os.path.exists(gdf_path):
+        raise IOError("A file already exists in the location: %s. Please provide a new one or remove that file." % gdf_path)
+
+    ###
+    # Create and open the file with h5py
+    ###
+    f = h5py.File(gdf_path, "w")
+
+    ###
+    # "gridded_data_format" group
+    ###
+    g = f.create_group("gridded_data_format")
+    g.attrs["data_software"] = "yt"
+    g.attrs["data_software_version"] = yt_version
+    if data_author is not None:
+        g.attrs["data_author"] = data_author
+    if data_comment is not None:
+        g.attrs["data_comment"] = data_comment
+
+    ###
+    # "simulation_parameters" group
+    ###
+    g = f.create_group("simulation_parameters")
+    g.attrs["refine_by"] = pf.refine_by
+    g.attrs["dimensionality"] = pf.dimensionality
+    g.attrs["domain_dimensions"] = pf.domain_dimensions
+    g.attrs["current_time"] = pf.current_time
+    g.attrs["domain_left_edge"] = pf.domain_left_edge
+    g.attrs["domain_right_edge"] = pf.domain_right_edge
+    g.attrs["unique_identifier"] = pf.unique_identifier
+    g.attrs["cosmological_simulation"] = pf.cosmological_simulation
+    # @todo: Where is this in the yt API?
+    g.attrs["num_ghost_zones"] = 0
+    # @todo: Where is this in the yt API?
+    g.attrs["field_ordering"] = 0
+    # @todo: not yet supported by yt.
+    g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')
+
+    if pf.cosmological_simulation:
+        g.attrs["current_redshift"] = pf.current_redshift
+        g.attrs["omega_matter"] = pf.omega_matter
+        g.attrs["omega_lambda"] = pf.omega_lambda
+        g.attrs["hubble_constant"] = pf.hubble_constant
+
+    ###
+    # "field_types" group
+    ###
+    g = f.create_group("field_types")
+
+    # Which field list should we iterate over?
+    for field_name in pf.h.field_list:
+        # create the subgroup with the field's name
+        sg = g.create_group(field_name)
+
+        # grab the display name and units from the field info container.
+        display_name = pf.field_info[field_name].display_name
+        units = pf.field_info[field_name].get_units()
+
+        # check that they actually contain something...
+        if display_name:
+            sg.attrs["field_name"] = display_name
+        else:
+            sg.attrs["field_name"] = field_name
+        if units:
+            sg.attrs["field_units"] = units
+        else:
+            sg.attrs["field_units"] = "None"
+        # @todo: the values must be in CGS already right?
+        sg.attrs["field_to_cgs"] = 1.0
+        # @todo: is this always true?
+        sg.attrs["staggering"] = 0
+
+    ###
+    # "particle_types" group
+    ###
+    g = f.create_group("particle_types")
+
+    # @todo: Particle type iterator
+    sg = g.create_group(particle_type_name)
+    sg["particle_type_name"] = particle_type_name
+
+    ###
+    # root datasets -- info about the grids
+    ###
+    f["grid_dimensions"] = pf.h.grid_dimensions
+    f["grid_left_index"] = np.array(
+            [g.get_global_startindex() for g in pf.h.grids]
+    ).reshape(pf.h.grid_dimensions.shape[0], 3)
+    f["grid_level"] = pf.h.grid_levels
+    # @todo: Fill with proper values
+    f["grid_parent_id"] = -np.ones(pf.h.grid_dimensions.shape[0])
+    f["grid_particle_count"] = pf.h.grid_particle_count
+
+    ###
+    # "data" group -- where we should spend the most time
+    ###
+    g = f.create_group("data")
+
+    for grid in pf.h.grids:
+        # add group for this grid
+
+        grid_group = g.create_group("grid_%010i" % grid.id)
+        # add group for the particles on this grid
+        particles_group = grid_group.create_group("particles")
+        pt_group = particles_group.create_group(particle_type_name)
+
+        # add the field data to the grid group
+        for field_name in pf.h.field_list:
+            # Check if this is a real field or particle data.
+            field_obj = pf.field_info[field_name]
+
+            if field_obj.particle_type:  # particle data
+                pt_group[field_name] = grid.get_data(field_name)
+            else:  # a field
+                grid_group[field_name] = grid.get_data(field_name)
+
+    # don't forget to close the file.
+    f.close()




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -53,8 +53,8 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def read_and_seek(char *filename, int offset1, int offset2,
-                  np.ndarray buffer, int bytes):
+def read_and_seek(char *filename, np.int64_t offset1,
+                  np.int64_t offset2, np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
     cdef char line[1024]


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -590,7 +590,7 @@
         cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
         for i in range(pos_x.shape[0]):
             kdtree_utils.kd_insert3(self.tree,
-                pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+                pos_x[i], pos_y[i], pos_z[i], <void *> (pointer + i*3))
 
     def __dealloc__(self):
         kdtree_utils.kd_free(self.tree)
@@ -616,7 +616,7 @@
     cdef np.float64_t slopes[6], dp[3], ds[3]
     cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
     cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
-    cdef int nstars
+    cdef int nstars, dti, i, j
     cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
     for i in range(3):
         dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
@@ -648,6 +648,7 @@
         dvs[i] = temp
     for dti in range(vri.n_samples): 
         # Now we add the contribution from stars
+        kdtree_utils.kd_res_rewind(ballq)
         for i in range(nstars):
             kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
             colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
@@ -655,20 +656,22 @@
             gexp = (px - pos[0])*(px - pos[0]) \
                  + (py - pos[1])*(py - pos[1]) \
                  + (pz - pos[2])*(pz - pos[2])
-            gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
-            for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+            gaussian = vri.star_coeff * exp(-gexp/vri.star_sigma_num)
+            for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
         for i in range(3):
             pos[i] += local_dds[i]
         FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
                           vri.field_table_ids, vri.grey_opacity)
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
+    kdtree_utils.kd_res_free(ballq)
 
 cdef class VolumeRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
     cdef public object tf_obj
     cdef public object my_field_tables
     cdef kdtree_utils.kdtree **trees
+    cdef object tree_containers
     def __cinit__(self, 
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,
@@ -709,6 +712,7 @@
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
         cdef star_kdtree_container skdc
+        self.tree_containers = star_list
         if star_list is None:
             self.trees = NULL
         else:
@@ -719,10 +723,15 @@
                 self.trees[i] = skdc.tree
 
     cdef void setup(self, PartitionedGrid pg):
+        cdef star_kdtree_container star_tree
         if self.trees == NULL:
             self.sampler = volume_render_sampler
         else:
+            star_tree = self.tree_containers[pg.parent_grid_id]
             self.vra.star_list = self.trees[pg.parent_grid_id]
+            self.vra.star_sigma_num = 2.0*star_tree.sigma**2.0
+            self.vra.star_er = 2.326 * star_tree.sigma
+            self.vra.star_coeff = star_tree.coeff
             self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -30,6 +30,7 @@
 from tempfile import TemporaryFile
 from yt.config import ytcfg
 from yt.funcs import *
+from yt.utilities.exceptions import *
 
 from .poster.streaminghttp import register_openers
 from .poster.encode import multipart_encode
@@ -93,6 +94,7 @@
     def upload(self):
         api_key = ytcfg.get("yt","hub_api_key")
         url = ytcfg.get("yt","hub_url")
+        if api_key == '': raise YTHubRegisterError
         metadata, (final_name, chunks) = self._generate_post()
         if hasattr(self, "_pf_mrep"):
             self._pf_mrep.upload()
@@ -216,3 +218,22 @@
         metadata = self._attrs
         chunks = []
         return (metadata, ("chunks", []))
+
+class MinimalNotebook(MinimalRepresentation):
+    type = "notebook"
+    _attr_list = ("title",)
+
+    def __init__(self, filename, title = None):
+        # First we read in the data
+        if not os.path.isfile(filename):
+            raise IOError(filename)
+        self.data = open(filename).read()
+        if title is None:
+            title = json.loads(self.data)['metadata']['name']
+        self.title = title
+        self.data = np.fromstring(self.data, dtype='c')
+
+    def _generate_post(self):
+        metadata = self._attrs
+        chunks = [ ("notebook", self.data) ]
+        return (metadata, ("chunks", chunks))


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -56,6 +56,7 @@
             mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
             north_vector = None
         if north_vector is not None: self.steady_north = True
+        self.north_vector = north_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
@@ -66,6 +67,8 @@
             t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
             east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            # self.north_vector must remain None otherwise rotations about a fixed axis will break.  
+            # The north_vector calculated here will still be included in self.unit_vectors.
             north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
@@ -74,7 +77,6 @@
         north_vector /= np.sqrt(np.dot(north_vector, north_vector))
         east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
-        self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
         self.inv_mat = np.linalg.pinv(self.unit_vectors)
         
@@ -82,7 +84,7 @@
         r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes related
-        to a an orientable object.
+        to an orientable object.
 
         Parameters
         ----------


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/parallel_tools/controller_system.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -0,0 +1,69 @@
+"""
+A queueing system based on MPI
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+    
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+from contextmanager import contextlib
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+class WorkSplitter(object):
+    def __init__(self, controller, group1, group2):
+        self.group1 = group1
+        self.group2 = group2
+        self.controller = controller
+
+    @classmethod
+    def setup(cls, ng1, ng2):
+        pp, wg = ProcessorPool.from_sizes(
+            [(1, "controller"), (ng1, "group1"), (ng2, "group2")])
+        groupc = pp['controller']
+        group1 = pp['group1']
+        group2 = pp['group2']
+        obj = cls(groupc, group1, group2)
+        obj.run(wg.name)
+
+    def run(self, name):
+        if name == "controller":
+            self.run_controller()
+        elif name == "group1":
+            self.run_group1()
+        elif name == "group2":
+            self.run_group2()
+        else:
+            raise NotImplementedError
+
+    @abstractmethod
+    def run_controller(self):
+        pass
+
+    @abstractmethod
+    def run_group1(self):
+        pass
+
+    @abstractmethod
+    def run_group2(self):
+        pass


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/parallel_tools/io_runner.py
--- /dev/null
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -0,0 +1,195 @@
+"""
+A simple IO staging mechanism
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+from .parallel_analysis_interface import ProcessorPool
+from yt.utilities.io_handler import BaseIOHandler
+from contextlib import contextmanager
+import time
+
+try:
+    from .parallel_analysis_interface import MPI
+except ImportError:
+    pass
+
+YT_TAG_MESSAGE = 317 # Cell 317 knows where to go
+
+class IOCommunicator(BaseIOHandler):
+    def __init__(self, pf, wg, pool):
+        mylog.info("Initializing IOCommunicator")
+        self.pf = pf
+        self.wg = wg # We don't need to use this!
+        self.pool = pool
+        self.comm = pool.comm
+        # We read our grids here
+        self.grids = []
+        storage = {}
+        grids = pf.h.grids.tolist()
+        grids.sort(key=lambda a:a.filename)
+        for sto, g in parallel_objects(grids, storage = storage):
+            sto.result = self.comm.rank
+            sto.result_id = g.id
+            self.grids.append(g)
+        self._id_offset = pf.h.grids[0]._id_offset
+        mylog.info("Reading from disk ...")
+        self.initialize_data()
+        mylog.info("Broadcasting ...")
+        self.comm.comm.bcast(storage, root = wg.ranks[0])
+        mylog.info("Done.")
+        self.hooks = []
+
+    def initialize_data(self):
+        pf = self.pf
+        fields = [f for f in pf.h.field_list
+                  if not pf.field_info[f].particle_type]
+        pfields = [f for f in pf.h.field_list
+                   if pf.field_info[f].particle_type]
+        # Preload is only defined for Enzo ...
+        if pf.h.io._data_style == "enzo_packed_3d":
+            self.queue = pf.h.io.queue
+            pf.h.io.preload(self.grids, fields)
+            for g in self.grids:
+                for f in fields:
+                    if f not in self.queue[g.id]:
+                        d = np.zeros(g.ActiveDimensions, dtype='float64')
+                        self.queue[g.id][f] = d
+                for f in pfields:
+                    self.queue[g.id][f] = self._read(g, f)
+        else:
+            self.queue = {}
+            for g in self.grids:
+                for f in fields + pfields:
+                    self.queue[g.id][f] = pf.h.io._read(g, f)
+
+    def _read(self, g, f):
+        fi = self.pf.field_info[f]
+        if fi.particle_type and g.NumberOfParticles == 0:
+            # because this gets upcast to float
+            return np.array([],dtype='float64')
+        try:
+            temp = self.pf.h.io._read_data_set(g, f)
+        except:# self.pf.hierarchy.io._read_exception as exc:
+            if fi.not_in_all:
+                temp = np.zeros(g.ActiveDimensions, dtype='float64')
+            else:
+                raise
+        return temp
+
+    def wait(self):
+        status = MPI.Status()
+        while 1:
+            if self.comm.comm.Iprobe(MPI.ANY_SOURCE,
+                                YT_TAG_MESSAGE,
+                                status = status):
+                msg = self.comm.comm.recv(
+                        source = status.source, tag = YT_TAG_MESSAGE)
+                if msg['op'] == "end":
+                    mylog.debug("Shutting down IO.")
+                    break
+                self._send_data(msg, status.source)
+                status = MPI.Status()
+            else:
+                time.sleep(1e-2)
+
+    def _send_data(self, msg, dest):
+        grid_id = msg['grid_id']
+        field = msg['field']
+        ts = self.queue[grid_id][field].astype("float64")
+        mylog.debug("Opening send to %s (%s)", dest, ts.shape)
+        self.hooks.append(self.comm.comm.Isend([ts, MPI.DOUBLE], dest = dest))
+
+class IOHandlerRemote(BaseIOHandler):
+    _data_style = "remote"
+
+    def __init__(self, pf, wg, pool):
+        self.pf = pf
+        self.wg = wg # probably won't need
+        self.pool = pool
+        self.comm = pool.comm
+        self.proc_map = self.comm.comm.bcast(None,
+                root = pool['io'].ranks[0])
+        super(IOHandlerRemote, self).__init__()
+
+    def _read_data_set(self, grid, field):
+        dest = self.proc_map[grid.id]
+        msg = dict(grid_id = grid.id, field = field, op="read")
+        mylog.debug("Requesting %s for %s from %s", field, grid, dest)
+        if self.pf.field_info[field].particle_type:
+            data = np.empty(grid.NumberOfParticles, 'float64')
+        else:
+            data = np.empty(grid.ActiveDimensions, 'float64')
+        hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
+        self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
+        mylog.debug("Waiting for data.")
+        MPI.Request.Wait(hook)
+        return data
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        #sl = tuple(reversed(sl))
+        return self._read_data_set(grid,field)[sl]
+
+    def terminate(self):
+        msg = dict(op='end')
+        if self.wg.comm.rank == 0:
+            for rank in self.pool['io'].ranks:
+                mylog.debug("Sending termination message to %s", rank)
+                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
+
+ at contextmanager
+def remote_io(pf, wg, pool):
+    original_io = pf.h.io
+    pf.h.io = IOHandlerRemote(pf, wg, pool)
+    yield
+    pf.h.io.terminate()
+    pf.h.io = original_io
+
+def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+    pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
+    rv = None
+    if wg.name == "work":
+        pf = load(fn)
+        with remote_io(pf, wg, pool):
+            rv = func(pf, *args, **kwargs)
+    elif wg.name == "io":
+        pf = load(fn)
+        io = IOCommunicator(pf, wg, pool)
+        io.wait()
+    # We should broadcast the result
+    rv = pool.comm.mpi_bcast(rv, root=pool['work'].ranks[0])
+    pool.free_all()
+    mylog.debug("Return value: %s", rv)
+    return rv
+
+# Here is an example of how to use this functionality.
+if __name__ == "__main__":
+    def gq(pf):
+        dd = pf.h.all_data()
+        return dd.quantities["TotalQuantity"]("CellMassMsun")
+    q = io_nodes("DD0087/DD0087", 8, 24, gq)
+    mylog.info(q)
+
+


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -271,7 +271,7 @@
         self.size = size
         self.ranks = ranks
         self.comm = comm
-	self.name = name
+        self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -294,11 +294,9 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-
-	# Default name to the workgroup number.
+        # Default name to the workgroup number.
         if name is None: 
-	    name = string(len(workgroups))
-	    
+            name = string(len(workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -430,11 +428,13 @@
             break
     if parallel_capable:
         communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
+    obj_ids = np.arange(len(objects))
 
     to_share = {}
     # If our objects object is slice-aware, like time series data objects are,
     # this will prevent intermediate objects from being created.
-    oiter = itertools.islice(enumerate(objects), my_new_id, None, njobs)
+    oiter = itertools.izip(obj_ids[my_new_id::njobs],
+                           objects[my_new_id::njobs])
     for result_id, obj in oiter:
         if storage is not None:
             rstore = ResultsStorage()


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/tests/test_flagging_methods.py
--- /dev/null
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -0,0 +1,12 @@
+from yt.testing import *
+from yt.utilities.flagging_methods import flagging_method_registry
+
+def setup():
+    global pf
+    pf = fake_random_pf(64)
+    pf.h
+
+def test_over_density():
+    od_flag = flagging_method_registry["overdensity"](0.75) 
+    criterion = (pf.h.grids[0]["Density"] > 0.75)
+    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -1,13 +1,27 @@
-import numpy as np
-from numpy.testing import assert_array_equal
+from yt.testing import *
 import yt.utilities.linear_interpolators as lin
 
 def setup():
     pass
 
+def test_linear_interpolator_1d():
+    random_data = np.random.random(64)
+    fv = {'x': np.mgrid[0.0:1.0:64j]}
+    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
+    assert_array_equal(ufi(fv), random_data)
 
-def test_linear_interpolator():
-    random_data = np.random.random(128)
-    x = {"Random":np.mgrid[0.0:1.0:128j]}
-    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "Random", True)
-    assert_array_equal(ufi(x), random_data)
+def test_linear_interpolator_2d():
+    random_data = np.random.random((64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0), "xy", True)
+    assert_array_equal(bfi(fv), random_data)
+
+def test_linear_interpolator_3d():
+    random_data = np.random.random((64, 64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
+    assert_array_equal(tfi(fv), random_data)


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -66,6 +66,7 @@
 from plot_window import \
     SlicePlot, \
     OffAxisSlicePlot, \
-    ProjectionPlot
+    ProjectionPlot, \
+    OffAxisProjectionPlot
     
 


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -28,6 +28,7 @@
     x_dict, \
     y_dict, \
     axis_names
+from .volume_rendering.api import off_axis_projection
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
 import _MPL
@@ -411,3 +412,28 @@
                                self.bounds).transpose()
         self[item] = buff
         return buff
+
+
+class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.data = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        if item in self.data: return self.data[item]
+        mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+            (item, self.buff_size[0], self.buff_size[1]))
+        ds = self.data_source
+        width = (self.bounds[1] - self.bounds[0],
+                 self.bounds[3] - self.bounds[2],
+                 self.bounds[5] - self.bounds[4])
+        buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                   width, ds.resolution, item,
+                                   weight=ds.weight_field, volume=ds.volume,
+                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+                                   north_vector=ds.north_vector)
+        self[item] = buff.swapaxes(0,1)
+        return buff
+
+




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -152,8 +152,7 @@
         alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
         bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
-        for channel in range(bitmap_array.shape[2]):
-            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
+        bitmap_array = bitmap_array.swapaxes(0,1)
     if filename is not None:
         au.write_png(bitmap_array.copy(), filename)
     else:


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -1504,7 +1504,7 @@
     @rootonly
     def save_book(self, filename, author = None, title = None, keywords = None,
                   subject = None, creator = None, producer = None,
-                  creation_data = None):
+                  creation_date = None):
         r"""Save a multipage PDF of all the current plots, rather than
         individual image files.
 
@@ -1551,15 +1551,21 @@
         >>> dd = pf.h.all_data()
         >>> pc.add_phase_object(dd, ["Density", "Temperature", "CellMassMsun"],
         ...                     weight = None)
-        >>> pc.save_book("my_plots.pdf", author="Matthew Turk", 
+        >>> pc.save_book("my_plots.pdf", author="Yours Truly",
         ...              title="Fun plots")
         """
         from matplotlib.backends.backend_pdf import PdfPages
         outfile = PdfPages(filename)
         for plot in self.plots:
             plot.save_to_pdf(outfile)
-        if info is not None:
-            outfile._file.writeObject(outfile._file.infoObject, info)
+        pdf_keys = ['Title', 'Author', 'Subject', 'Keywords', 'Creator',
+            'Producer', 'CreationDate']
+        pdf_values = [title, author, subject, keywords, creator, producer,
+            creation_date]
+        metadata = outfile.infodict()
+        for key, val in zip(pdf_keys, pdf_values):
+            if isinstance(val, str):
+                metadata[key] = val
         outfile.close()
 
 def wrap_pylab_newplot(func):


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -83,11 +83,11 @@
     def pixel_scale(self,plot):
         x0, x1 = plot.xlim
         xx0, xx1 = plot._axes.get_xlim()
-        dx = (xx0 - xx1)/(x1 - x0)
+        dx = (xx1 - xx0)/(x1 - x0)
         
         y0, y1 = plot.ylim
         yy0, yy1 = plot._axes.get_ylim()
-        dy = (yy0 - yy1)/(y1 - y0)
+        dy = (yy1 - yy0)/(y1 - y0)
 
         return (dx,dy)
 
@@ -149,7 +149,9 @@
     def __call__(self, plot):
         # Instantiation of these is cheap
         if plot._type_name == "CuttingPlane":
-            print "WARNING: Magnetic field on Cutting Plane Not implemented."
+            qcb = CuttingQuiverCallback("CuttingPlaneBx",
+                                        "CuttingPlaneBy",
+                                        self.factor)
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])
@@ -211,8 +213,9 @@
 
 class ContourCallback(PlotCallback):
     _type_name = "contour"
-    def __init__(self, field, ncont=5, factor=4, clim=None, plot_args=None):
-        """ 
+    def __init__(self, field, ncont=5, factor=4, clim=None,
+                 plot_args = None):
+        """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
 
@@ -288,7 +291,7 @@
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
         plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -297,30 +300,31 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, annotate=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True):
         """
-        annotate_grids(alpha=1.0, min_pix=1, annotate=False, periodic=True)
+        annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
-        *annotate* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
+        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.
         """
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
-        self.annotate = annotate # put grid numbers in the corner.
+        self.min_pix_ids = min_pix_ids
+        self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
         y0, y1 = plot.ylim
-        width, height = plot.image._A.shape
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         xi = x_dict[plot.data.axis]
         yi = y_dict[plot.data.axis]
-        dx = width / (x1-x0)
-        dy = height / (y1-y0)
+        (dx, dy) = self.pixel_scale(plot)
+        (xpix, ypix) = plot.image._A.shape
         px_index = x_dict[plot.data.axis]
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
@@ -333,29 +337,32 @@
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
-            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx
-            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
-            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
-            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
+            left_edge_x = (GLE[:,px_index]+pxo-x0)*dx + xx0
+            left_edge_y = (GLE[:,py_index]+pyo-y0)*dy + yy0
+            right_edge_x = (GRE[:,px_index]+pxo-x0)*dx + xx0
+            right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
+            visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
+                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
+            if visible.nonzero()[0].size == 0: continue
             verts = np.array(
-                [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
-                 (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
-            visible =  ( right_edge_px - left_edge_px > self.min_pix ) & \
-                       ( right_edge_px - left_edge_px > self.min_pix )
+                [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
+                 (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
-            if verts.size == 0: continue
             edgecolors = (0.0,0.0,0.0,self.alpha)
-            verts[:,:,0]= (xx1-xx0)*(verts[:,:,0]/width) + xx0
-            verts[:,:,1]= (yy1-yy0)*(verts[:,:,1]/height) + yy0
             grid_collection = matplotlib.collections.PolyCollection(
                 verts, facecolors="none",
                 edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
-            if self.annotate:
-                ids = [g.id for g in plot.data._grids]
-                for n in range(len(left_edge_px)):
-                    plot._axes.text(left_edge_px[n]+2,left_edge_py[n]+2,ids[n])
+            if self.draw_ids:
+                visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
+                               ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
+                active_ids = np.unique(plot.data['GridIndices'])
+                for i in np.where(visible_ids)[0]:
+                    plot._axes.text(
+                        left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
+                        left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
+                        "%d" % active_ids[i], clip_on=True)
             plot._axes.hold(False)
 
 class StreamlineCallback(PlotCallback):
@@ -430,6 +437,9 @@
             iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
+        # scale into data units
+        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
+        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
         for i in range(self.data_size[0]):
             for j in range(self.data_size[1]):
                 plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
@@ -454,6 +464,30 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
+class TimeCallback(PlotCallback):
+    _type_name = "time"
+    def __init__(self, format_code='10.7e'):
+        """
+        This annotates the plot with the current simulation time.
+        For now, the time is displayed in seconds.
+        *format_code* can be optionally set, allowing a custom 
+        c-style format code for the time display.
+        """
+        self.format_code = format_code
+        PlotCallback.__init__(self)
+    
+    def __call__(self, plot):
+        current_time = plot.pf.current_time/plot.pf['Time']
+        timestring = format(current_time,self.format_code)
+        base = timestring[:timestring.find('e')]
+        exponent = timestring[timestring.find('e')+1:]
+        if exponent[0] == '+':
+            exponent = exponent[1:]
+        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
+        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
+        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
+        plot._axes.add_artist(at)
+
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -624,8 +658,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,6 +26,11 @@
 """
 import base64
 import matplotlib.figure
+from matplotlib.mathtext import MathTextParser
+try:
+    from matplotlib.pyparsing import ParseFatalException
+except ImportError:
+    from pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__
@@ -38,7 +43,8 @@
     write_image, apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer
+    ObliqueFixedResolutionBuffer, \
+    OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
@@ -101,7 +107,10 @@
         self.pf = frb.pf
         self.xlim = viewer.xlim
         self.ylim = viewer.ylim
-        self._type_name = ''
+        if 'Cutting' in self.data.__class__.__name__:
+            self._type_name = "CuttingPlane"
+        else:
+            self._type_name = ''
 
 class FieldTransform(object):
     def __init__(self, name, func, locator):
@@ -152,7 +161,7 @@
               center[y_dict[axis]]+width[1]/2]
     return (bounds,center)
 
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1'):
+def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
     if width == None:
         width = (pf.domain_width.min(),
                  pf.domain_width.min())
@@ -163,6 +172,13 @@
         width = (width, width)
     Wx, Wy = width
     width = np.array((Wx/pf[unit], Wy/pf[unit]))
+    if depth != None:
+        if iterable(depth) and isinstance(depth[1],str):
+            d,unit = depth
+            depth = d/pf[unit]
+        elif iterable(depth):
+            raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+        width = np.append(width,depth)
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -171,16 +187,19 @@
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
-    # Transforming to the cutting plane coordinate system
-    center = np.array(center)
-    center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
-    (normal,perp1,perp2) = ortho_find(normal)
-    mat = np.transpose(np.column_stack((perp1,perp2,normal)))
-    center = np.dot(mat,center)
-    width = width/pf.domain_width.min()
+    if width.shape == (2,):
+        # Transforming to the cutting plane coordinate system
+        center = np.array(center)
+        center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
+        (normal,perp1,perp2) = ortho_find(normal)
+        mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+        center = np.dot(mat,center)
+        width = width
+    
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+    else:
+        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
 
-    bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
-    
     return (bounds,center)
 
 class PlotWindow(object):
@@ -189,8 +208,8 @@
     _contour_info = None
     _vector_info = None
     _frb = None
-    def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, 
-                 periodic = True, origin='center-window', oblique=False):
+    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
+                 periodic=True, origin='center-window', oblique=False):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -242,20 +261,14 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        try:
+        if hasattr(self,'zlim'):
+            bounds = self.xlim+self.ylim+self.zlim
+        else:
             bounds = self.xlim+self.ylim
-            if self.oblique == False:
-                self._frb = FixedResolutionBuffer(self.data_source, 
-                                                  bounds, self.buff_size, 
-                                                  self.antialias, 
-                                                  periodic=self._periodic)
-            else:
-                self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                         bounds, self.buff_size, 
-                                                         self.antialias, 
-                                                         periodic=self._periodic)
-        except:
-            raise RuntimeError("Failed to repixelize.")
+        self._frb = self._frb_generator(self.data_source,
+                                        bounds, self.buff_size,
+                                        self.antialias,
+                                        periodic=self._periodic)
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:
@@ -342,42 +355,75 @@
             dy = bounds[3] - bounds[2]
             self.xlim = (self.center[0] - dx/2., self.center[0] + dx/2.)
             self.ylim = (self.center[1] - dy/2., self.center[1] + dy/2.)
-            mylog.info("xlim = %f %f" %self.xlim)
-            mylog.info("ylim = %f %f" %self.ylim)
         else:
-            self.xlim = bounds[0:2]
-            self.ylim = bounds[2:]
-            
+            self.xlim = tuple(bounds[0:2])
+            self.ylim = tuple(bounds[2:4])
+            if len(bounds) == 6:
+                self.zlim = tuple(bounds[4:6])
+        mylog.info("xlim = %f %f" %self.xlim)
+        mylog.info("ylim = %f %f" %self.ylim)
+        if hasattr(self,'zlim'):
+            mylog.info("zlim = %f %f" %self.zlim)
+
     @invalidate_data
     def set_width(self, width, unit = '1'):
         """set the width of the plot window
 
         parameters
         ----------
-        width : float, array of floats, or (float, unit) tuple.
-            the width of the image.
+        width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
+             Width can have four different formats to support windows with variable 
+             x and y widths.  They are:
+             
+             ==================================     =======================
+             format                                 example                
+             ==================================     =======================
+             (float, string)                        (10,'kpc')
+             ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+             float                                  0.2
+             (float, float)                         (0.2, 0.3)
+             ==================================     =======================
+             
+             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
+             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
+             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+             the y axis.  In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
+             in code units.  the width of the image.
         unit : str
             the unit the width has been specified in.
             defaults to code units.  If width is a tuple this 
             argument is ignored
 
         """
-        if iterable(width) and isinstance(width[1],str):
-            unit = width[1]
-            width = width[0]
-        elif not iterable(width):
-            width = (width,width)
+        if iterable(width): 
+            if isinstance(width[1],str):
+                w, unit = width
+                width = (w, w)
+            elif isinstance(width[1], tuple):
+                wx,unitx = width[0]
+                wy,unity = width[1]
+                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        else:
+            width = (width, width)
         Wx, Wy = width
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
-        centerx = (self.xlim[1] + self.xlim[0])/2 
-        centery = (self.ylim[1] + self.ylim[0])/2 
+        centerx = (self.xlim[1] + self.xlim[0])/2.
+        centery = (self.ylim[1] + self.ylim[0])/2. 
+        
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,
                      centery + width[1]/2.)
         
+        if hasattr(self,'zlim'):
+            centerz = (self.zlim[1] + self.zlim[0])/2.
+            mw = max(width)
+            self.zlim = (centerz - mw/2.,
+                         centerz + mw/2.)
+        
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -434,10 +480,11 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
+        self._unit = None
+        self._callbacks = []
+        self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
         self.setup_callbacks()
-        self._callbacks = []
-        self._field_transform = {}
         for field in self._frb.data.keys():
             finfo = self.data_source._get_field_info(*field)
             if finfo.take_log:
@@ -555,13 +602,52 @@
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
+    @invalidate_plot
+    def set_axes_unit(self, unit_name):
+        r"""Set the unit for display on the x and y axes of the image.
+
+        Parameters
+        ----------
+        unit_name : string
+            A unit, available for conversion in the parameter file, that the
+            image extents will be displayed in.  If set to None, any previous
+            units will be reset.  If the unit is None, the default is chosen.
+            If unit_name is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+
+        Raises
+        ------
+        YTUnitNotRecognized
+            If the unit is not known, this will be raised.
+
+        Examples
+        --------
+
+        >>> p = ProjectionPlot(pf, "y", "Density")
+        >>> p.show()
+        >>> p.set_axes_unit("kpc")
+        >>> p.show()
+        >>> p.set_axes_unit(None)
+        >>> p.show()
+        """
+        # blind except because it could be in conversion_factors or units
+        try:
+            self.pf[unit_name]
+        except KeyError: 
+            if unit_name is not None:
+                raise YTUnitNotRecognized(unit_name)
+        self._unit = unit_name
+
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
         mi = fval.min()
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        unit = get_smallest_appropriate_unit(x_width, self.pf)
+        if self._unit is None:
+            unit = get_smallest_appropriate_unit(x_width, self.pf)
+        else:
+            unit = self._unit
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -650,33 +736,52 @@
 
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
+            norm_size = 10.0
+            cbar_frac = 0.0
             if aspect > 1.0:
-                size = (10.0, 10.0/aspect)
+                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
             else:
-                size = (10.0*aspect, 10.0)
+                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
+            if not md['unit'] in ['1', 'u', 'unitary']:
+                axes_unit_label = '\/\/('+md['unit']+')'
+            else:
+                axes_unit_label = ''
+
             if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
-                          r'\/\/('+md['unit'].encode('string-escape')+r')}$' for i in (0,1)]
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
+                        axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$',
-                          r'$\rm{Image\/y}\/\/\rm{('+md['unit'].encode('string-escape')+r')}$']
-                
+                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
+                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
 
             ftype, fname = f
             field_name = self.data_source._get_field_info(ftype, fname).display_name
-            if field_name is None: field_name = fname
-            if md['units'] == None or md['units'] == '':
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
-            else:
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+
+            if field_name is None:
+                field_name = r'$\rm{'+fname+r'}$'
+            elif field_name.find('$') == -1:
+                field_name = r'$\rm{'+field_name+r'}$'
+            
+            parser = MathTextParser('Agg')
+            try:
+                parser.parse(field_name)
+            except ParseFatalException, err:
+                raise YTCannotParseFieldDisplayName(fname,field_name,str(err))
+
+            try:
+                parser.parse(r'$'+md['units']+r'$')
+            except ParseFatalException, err:
+                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
+
 
             self.plots[f].cb.set_label(label)
 
@@ -725,7 +830,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None):
+    def save(self, name=None, mpl_kwargs=None):
         """saves the plot to disk.
 
         Parameters
@@ -733,24 +838,24 @@
         name : string
            the base of the filename.  If not set the filename of 
            the parameter file is used
+        mpl_kwargs : dict
+           A dict of keyword arguments to be passed to matplotlib.
+           
+        >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
         if name == None:
             name = str(self.pf)
         elif name.endswith('.png'):
             return v.save(name)
+        if mpl_kwargs is None: mpl_kwargs = {}
         axis = axis_names[self.data_source.axis]
         weight = None
-        if 'Slice' in self.data_source.__class__.__name__:
-            type = 'Slice'
-        if 'Proj' in self.data_source.__class__.__name__:
-            type = 'Projection'
+        type = self._plot_type
+        if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
-        if 'Cutting' in self.data_source.__class__.__name__:
-            type = 'OffAxisSlice'
         names = []
         for k, v in self.plots.iteritems():
-            if isinstance(k, types.TupleType): k = k[1]
             if axis:
                 n = "%s_%s_%s_%s" % (name, type, axis, k)
             else:
@@ -796,7 +901,11 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None, origin='center-window'):
+    _plot_type = 'Slice'
+    _frb_generator = FixedResolutionBuffer
+
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
+                 origin='center-window'):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -816,11 +925,12 @@
              or the axis name itself
         fields : string
              The name of the field(s) to be plotted.
-        center : two or three-element vector of sequence floats, 'c', or 'center'
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
              The coordinate of the center of the image.  If left blanck,
              the image centers on the location of the maximum density
              cell.  If set to 'c' or 'center', the plot is centered on
-             the middle of the domain.
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -840,6 +950,11 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.  
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -862,9 +977,13 @@
         slc = pf.h.slice(axis, center[axis])
         slc.get_data(fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
-    def __init__(self, pf, axis, fields, center='c', width=None,
+    _plot_type = 'Projection'
+    _frb_generator = FixedResolutionBuffer
+
+    def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
         
@@ -885,11 +1004,12 @@
              or the axis name itself
         fields : string
             The name of the field(s) to be plotted.
-        center : A two or three-element vector of sequence floats, 'c', or 'center'
-            The coordinate of the center of the image.  If left blanck,
-            the image centers on the location of the maximum density
-            cell.  If set to 'c' or 'center', the plot is centered on
-            the middle of the domain.
+        center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+             The coordinate of the center of the image.  If left blanck,
+             the image centers on the location of the maximum density
+             cell.  If set to 'c' or 'center', the plot is centered on
+             the middle of the domain.  If set to 'max', will be at the point
+             of highest density.
         width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
@@ -909,6 +1029,11 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         origin : A string
             The location of the origin of the plot coordinate system.
             Currently, can be set to three options: 'left-domain', corresponding
@@ -934,9 +1059,14 @@
         (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
         proj = pf.h.proj(fields, axis, weight_field=weight_field, center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
+        self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), north_vector=None):
+    _plot_type = 'OffAxisSlice'
+    _frb_generator = ObliqueFixedResolutionBuffer
+
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -964,6 +1094,11 @@
             A tuple containing the width of image and the string key of
             the unit: (width, 'unit').  If set to a float, code units
             are assumed
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
         north-vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
@@ -975,6 +1110,96 @@
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
+
+class OffAxisProjectionDummyDataSource(object):
+    _type_name = 'proj'
+    proj_style = 'integrate'
+    _key_fields = []
+    def __init__(self, center, pf, normal_vector, width, fields, 
+                 interpolated, resolution = (800,800), weight=None,  
+                 volume=None, no_ghost=False, le=None, re=None, 
+                 north_vector=None):
+        self.center = center
+        self.pf = pf
+        self.axis = 4 # always true for oblique data objects
+        self.normal_vector = normal_vector
+        self.width = width
+        self.fields = fields
+        self.interpolated = interpolated
+        self.resolution = resolution
+        self.weight_field = weight
+        self.volume = volume
+        self.no_ghost = no_ghost
+        self.le = le
+        self.re = re
+        self.north_vector = north_vector
+
+class OffAxisProjectionPlot(PWViewerMPL):
+    _plot_type = 'OffAxisProjection'
+    _frb_generator = OffAxisProjectionFixedResolutionBuffer
+
+    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+                 max_level=None, north_vector=None, volume=None, no_ghost=False, 
+                 le=None, re=None, interpolated=False):
+        r"""Creates an off axis projection plot from a parameter file
+
+        Given a pf object, a normal vector to project along, and
+        a field name string, this will return a PWViewrMPL object
+        containing the plot.
+        
+        The plot can be updated using one of the many helper functions
+        defined in PlotWindow.
+
+        Parameters
+        ----------
+        pf : :class:`yt.data_objects.api.StaticOutput`
+            This is the parameter file object corresponding to the
+            simulation output to be plotted.
+        normal : a sequence of floats
+            The vector normal to the slicing plane.
+        fields : string
+            The name of the field(s) to be plotted.
+        center : A two or three-element vector of sequence floats, 'c', or 'center'
+            The coordinate of the center of the image.  If left blanck,
+            the image centers on the location of the maximum density
+            cell.  If set to 'c' or 'center', the plot is centered on
+            the middle of the domain.
+        width : A tuple or a float
+            A tuple containing the width of image and the string key of
+            the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        depth : A tuple or a float
+            A tuple containing the depth to project thourhg and the string
+            key of the unit: (width, 'unit').  If set to a float, code units
+            are assumed
+        weight_field : string
+            The name of the weighting field.  Set to None for no weight.
+        max_level: int
+            The maximum level to project to.
+        axes_unit : A string
+            The name of the unit for the tick labels on the x and y axes.  
+            Defaults to None, which automatically picks an appropriate unit.
+            If axes_unit is '1', 'u', or 'unitary', it will not display the 
+            units, and only show the axes name.
+        north-vector : a sequence of floats
+            A vector defining the 'up' direction in the plot.  This
+            option sets the orientation of the slicing plane.  If not
+            set, an arbitrary grid-aligned north-vector is chosen.
+
+        """
+        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
+        # Hard-coding the resolution for now
+        fields = ensure_list(fields)[:]
+        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
+                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
+                                                       le=le, re=re, north_vector=north_vector)
+        # Hard-coding the origin keyword since the other two options
+        # aren't well-defined for off-axis data objects
+        PWViewerMPL.__init__(self,OffAxisProj,bounds,origin='center-window',periodic=False,oblique=True)
+        self.set_axes_unit(axes_unit)
 
 _metadata_template = """
 %(pf)s<br>
@@ -1156,12 +1381,24 @@
     figure = None
     def __init__(self, field, size):
         self._plot_valid = True
-        self.figure = matplotlib.figure.Figure(figsize = size, frameon = True)
-        # Hardcoding the axis dimensions for now
-        self.axes = self.figure.add_axes((.07,.10,.8,.8))
-        self.cax = self.figure.add_axes((.87,.10,.04,.8))
-
-    def save(self, name, canvas = None):
+        fsize, axrect, caxrect = self._get_best_layout(size)
+        
+        if np.any(np.array(axrect) < 0):
+            self.figure = matplotlib.figure.Figure(figsize = size, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes((.07,.10,.8,.8))
+            self.cax = self.figure.add_axes((.87,.10,.04,.8))
+            mylog.warning('The axis ratio of the requested plot is very narrow.  '
+                          'There is a good chance the plot will not look very good, '
+                          'consider making the plot manually using FixedResolutionBuffer '
+                          'and matplotlib.')
+        else:
+            self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes(axrect)
+            self.cax = self.figure.add_axes(caxrect)
+            
+    def save(self, name, mpl_kwargs, canvas = None):
         if name[-4:] == '.png':
             suffix = ''
         else:
@@ -1178,9 +1415,47 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn, bbox_inches='tight')
+        canvas.print_figure(fn,**mpl_kwargs)
         return fn
 
+    def _get_best_layout(self, size):
+        aspect = 1.0*size[0]/size[1]
+
+        # add room for a colorbar
+        cbar_inches = 0.7
+        newsize = [size[0] + cbar_inches, size[1]]
+        
+        # add buffers for text, and a bit of whitespace on top
+        text_buffx = 1.0/(newsize[0])
+        text_bottomy = 0.7/size[1]
+        text_topy = 0.3/size[1]
+
+        # calculate how much room the colorbar takes
+        cbar_frac = cbar_inches/newsize[0] 
+        
+        # Calculate y fraction, then use to make x fraction.
+        yfrac = 1.0-text_bottomy-text_topy
+        ysize = yfrac*size[1]
+        xsize = aspect*ysize
+        xfrac = xsize/newsize[0]
+
+        # Now make sure it all fits!
+        xbig = xfrac + text_buffx + 2.0*cbar_frac
+        ybig = yfrac + text_bottomy + text_topy
+
+        if xbig > 1:
+            xsize /= xbig
+            ysize /= xbig
+        if ybig > 1:
+            xsize /= ybig
+            ysize /= ybig
+        xfrac = xsize/newsize[0]
+        yfrac = ysize/newsize[1]
+
+        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
+        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
+        return newsize, axrect, caxrect
+
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)
         f = cStringIO.StringIO()
@@ -1202,3 +1477,5 @@
         self.image = self.axes.imshow(data, origin='lower', extent = extent,
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
+        self.image.axes.ticklabel_format(scilimits=(-4,3))
+


diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -298,7 +298,7 @@
             nz = (self.profile[self._current_field] > 0)
             mi = self.profile[self._current_field][nz].min()
         else:
-            mi = self.profile[self._current_field][nz].min()
+            mi = self.profile[self._current_field].min()
         ma = self.profile[self._current_field].max()
         cbar.bounds = (mi, ma)
         cbar.cmap = 'algae'




diff -r 24de0bfbca7012c7ce3a9b23a50abefa76c453ac -r 24087b9826d1e115436cedb19f6544859e957c07 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -195,7 +195,7 @@
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
         self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
-        self.rotation_vector = self.orienter.north_vector
+        self.rotation_vector = self.orienter.unit_vectors[1]
         self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -282,7 +282,7 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.orienter.north_vector
+            north_vector = self.orienter.unit_vectors[1]
         if normal_vector is None:
             normal_vector = self.orienter.normal_vector
         self.orienter.switch_orientation(normal_vector = normal_vector,
@@ -301,7 +301,11 @@
                 np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
+    star_trees = None
     def get_sampler(self, args):
+        kwargs = {}
+        if self.star_trees is not None:
+            kwargs = {'star_list': self.star_trees}
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
@@ -312,9 +316,9 @@
             if self.light_rgba is None:
                 self.set_default_light_rgba()
             sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
-                    light_rgba=self.light_rgba)
+                    light_rgba=self.light_rgba, **kwargs)
         else:
-            sampler = self._sampler_object(*args)
+            sampler = self._sampler_object(*args, **kwargs)
         return sampler
 
     def finalize_image(self, image):
@@ -587,7 +591,7 @@
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
-        north_vector = self.orienter.north_vector
+        north_vector = self.orienter.unit_vectors[1]
         self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
@@ -720,6 +724,9 @@
     ], dtype='float64')
 
 class HEALpixCamera(Camera):
+
+    _sampler_object = None 
+    
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
@@ -733,6 +740,12 @@
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
+
+        if isinstance(self.transfer_function, ProjectionTransferFunction):
+            self._sampler_object = ProjectionSampler
+        else:
+            self._sampler_object = VolumeRenderSampler
+
         if fields is None: fields = ["Density"]
         self.fields = fields
         self.sub_samples = sub_samples
@@ -1667,7 +1680,8 @@
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, 
-                        volume = None, no_ghost = False, interpolated = False):
+                        volume = None, no_ghost = False, interpolated = False,
+                        north_vector = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1726,8 +1740,9 @@
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
-            field, weight=weight, pf=pf, volume=volume,
-            no_ghost=no_ghost, interpolated=interpolated)
+                               field, weight=weight, pf=pf, volume=volume,
+                               no_ghost=no_ghost, interpolated=interpolated, 
+                               north_vector=north_vector)
     image = projcam.snapshot()
     if weight is not None:
         pf.field_info.pop("temp_weightfield")

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list