[yt-svn] commit/yt: 6 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu Sep 20 11:15:53 PDT 2012


6 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/5087f6769726/
changeset:   5087f6769726
branch:      yt
user:        ngoldbaum
date:        2012-09-08 01:49:29
summary:     Updating plot window to accept fields display names that are already
properly formatted mathtext.  Right now this is detected by checking
to see if the first character of the display name is '$' (i.e. the
string begins a latex math macro).  If not, the display name is
assumed to be an improperly formatted latex code and is prepended with
\rm{ and appended with }.  This means that in the future all display
names that include mathematical expressions must be properly formatted
mathtext.
affected #:  3 files

diff -r 82199064771081fe61b58de835e150916e692860 -r 5087f6769726a9527b508470eb1b904bfe6beacd yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -968,7 +968,7 @@
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
-          display_name=r"\mathrm{Particle}\/\mathrm{Density})")
+          display_name=r"$\mathrm{Particle}\/\mathrm{Density}$")
 
 def _MagneticEnergy(field,data):
     """This assumes that your front end has provided Bx, By, Bz in


diff -r 82199064771081fe61b58de835e150916e692860 -r 5087f6769726a9527b508470eb1b904bfe6beacd yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,22 +171,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = "\rm{Total}\/\rm{Energy}",
+          display_name = "$\rm{Total}\/\rm{Energy}$",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -487,7 +487,7 @@
     """
     return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
 # Particle functions
 


diff -r 82199064771081fe61b58de835e150916e692860 -r 5087f6769726a9527b508470eb1b904bfe6beacd yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -723,12 +723,12 @@
                 self.plots[f].image, cax = self.plots[f].cax)
 
             if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit'].encode('string-escape')+')'
+                axes_unit_label = '\/\/('+md['unit']+')'
             else:
                 axes_unit_label = ''
 
             if self.oblique == False:
-                labels = [r'$\rm{'+axis_labels[axis_index][i].encode('string-escape')+
+                labels = [r'$\rm{'+axis_labels[axis_index][i]+
                         axes_unit_label + r'}$' for i in (0,1)]
             else:
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
@@ -738,11 +738,16 @@
             self.plots[f].axes.set_ylabel(labels[1])
 
             field_name = self.data_source.pf.field_info[f].display_name
+            # If the field author has passed us formatted mathtext, leave it alone
+            if field_name[0] == '$':
+                label = field_name[:-1]
+            else:
+                label = r'$\rm{'+field_name+r'}'
             if field_name is None: field_name = f
             if md['units'] == None or md['units'] == '':
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}$'
+                label += r'$'
             else:
-                label = r'$\rm{'+field_name.encode('string-escape')+r'}\/\/('+md['units']+r')$'
+                label += r'\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 



https://bitbucket.org/yt_analysis/yt/changeset/cc17d1b6a2f9/
changeset:   cc17d1b6a2f9
branch:      yt
user:        ngoldbaum
date:        2012-09-09 03:58:00
summary:     Improving the way unit names and display names of derived fields are handled.  Now when PlotWindow is field name or a display name it cannot parse an exception is thrown and a useful error message is printed.  Complicated field names involving arbitrary latex and text is now possible.
affected #:  2 files

diff -r 5087f6769726a9527b508470eb1b904bfe6beacd -r cc17d1b6a2f976688ba07ab333e013becb85f097 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -76,6 +76,30 @@
     def __str__(self):
         return "Simulation time-series type %s not defined." % self.sim_type
 
+class YTCannotParseFieldDisplayName(YTException):
+    def __init__(self, field_name, display_name, mathtext_error):
+        self.field_name = field_name
+        self.display_name = display_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+                % (self.display_name, self.field_name) + self.mathtext_error
+
+class YTCannotParseUnitDisplayName(YTException):
+    def __init__(self, field_name, display_unit, mathtext_error):
+        self.field_name = field_name
+        self.unit_name = unit_name
+        self.mathtext_error = mathtext_error
+
+    def __str__(self):
+        return ("The unit display name \"%s\" "
+                "of the derived field %s " 
+                "contains the following LaTeX parser errors:\n" ) \
+            % (self.unit_name, self.field_name) + self.mathtext_error
+
 class AmbiguousOutputs(YTException):
     def __init__(self, pf):
         YTException.__init__(self, pf)


diff -r 5087f6769726a9527b508470eb1b904bfe6beacd -r cc17d1b6a2f976688ba07ab333e013becb85f097 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,6 +26,8 @@
 """
 import base64
 import matplotlib.figure
+from matplotlib.mathtext import MathTextParser
+from matplotlib.pyparsing import ParseFatalException
 import cStringIO
 import types
 import __builtin__
@@ -738,16 +740,27 @@
             self.plots[f].axes.set_ylabel(labels[1])
 
             field_name = self.data_source.pf.field_info[f].display_name
-            # If the field author has passed us formatted mathtext, leave it alone
-            if field_name[0] == '$':
-                label = field_name[:-1]
+
+            if field_name is None:
+                field_name = r'$\rm{'+f+r'}$'
+            elif field_name.find('$') == -1:
+                field_name = r'$\rm{'+field_name+r'}$'
+            
+            parser = MathTextParser('Agg')
+            try:
+                parser.parse(field_name)
+            except ParseFatalException, err:
+                raise YTCannotParseFieldDisplayName(f,field_name,str(err))
+
+            try:
+                parser.parse(r'$'+md['units']+r'$')
+            except ParseFatalException, err:
+                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
+
+            if md['units'] == None or md['units'] == '':
+                label = field_name
             else:
-                label = r'$\rm{'+field_name+r'}'
-            if field_name is None: field_name = f
-            if md['units'] == None or md['units'] == '':
-                label += r'$'
-            else:
-                label += r'\/\/('+md['units']+r')$'
+                label = field_name+r'$\/\/('+md['units']+r')$'
 
             self.plots[f].cb.set_label(label)
 



https://bitbucket.org/yt_analysis/yt/changeset/cc78ebee2f1f/
changeset:   cc78ebee2f1f
branch:      yt
user:        ngoldbaum
date:        2012-09-19 01:25:39
summary:     Fixing the way 2D data is loaded.  Need to be a bit more careful for 2D AMR simulations.
affected #:  2 files

diff -r 2a6872ca2489c0e8cd38fd5fecc9738b03fefc52 -r cc78ebee2f1f9ab46f73fa00ca7698fac397a4f0 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,6 +210,8 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property


diff -r 2a6872ca2489c0e8cd38fd5fecc9738b03fefc52 -r cc78ebee2f1f9ab46f73fa00ca7698fac397a4f0 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -148,8 +148,8 @@
 
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = np.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)



https://bitbucket.org/yt_analysis/yt/changeset/28ca834d247d/
changeset:   28ca834d247d
branch:      yt
user:        ngoldbaum
date:        2012-09-20 19:58:50
summary:     Merging
affected #:  116 files

diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -20,7 +20,7 @@
     setenv YT_DEST
 endif
 set _OLD_VIRTUAL_YT_DEST="$YT_DEST"
-setenv YT_DEST "${VIRTUAL_ENV}:${YT_DEST}"
+setenv YT_DEST "${VIRTUAL_ENV}"
 
 if ($?PYTHONPATH == 0) then
     setenv PYTHONPATH


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -18,7 +18,7 @@
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
    "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "na.multiply(a, 3, a)".
+   be "np.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
  * When writing a subclass, use the super built-in to access the super class,
@@ -40,8 +40,7 @@
 
    from yt.visualization.plot_collection import PlotCollection
 
- * Numpy is to be imported as "na" not "np".  While this may change in the
-   future, for now this is the correct idiom.
+ * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword
    arguments, then you are doing too much in __init__ and not enough via
    parameter setting.


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def voigt(a,u):
     """
@@ -65,15 +65,15 @@
             J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
                       Sep 1990 (better overflow checking)
     """
-    x = na.asarray(u).astype(na.float64)
-    y = na.asarray(a).astype(na.float64)
+    x = np.asarray(u).astype(np.float64)
+    y = np.asarray(a).astype(np.float64)
 
-    w = na.array([0.462243670,   0.286675505,   0.109017206, 
+    w = np.array([0.462243670,   0.286675505,   0.109017206, 
                   0.0248105209,  0.00324377334, 0.000228338636, 
                   7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
                   2.22939365e-13])
 
-    t = na.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
+    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
                   2.25497400,  2.78880606,  3.34785457, 3.94476404, 
                   4.60368245,  5.38748089])
 
@@ -94,31 +94,31 @@
     y2 = y * y
 
     # limits are y<1.,  x<4 or y<1.8(x+1),  x>4 (no checking performed)
-    u1 = na.exp(-x * x + y2) * na.cos(2. * x * y)
+    u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
 
     # Clenshaw's Algorithm
-    bno1 = na.zeros(x.shape)
-    bno2 = na.zeros(x.shape)
-    x1 = na.clip((x / 5.), -na.inf, 1.)
+    bno1 = np.zeros(x.shape)
+    bno2 = np.zeros(x.shape)
+    x1 = np.clip((x / 5.), -np.inf, 1.)
     coef = 4. * x1 * x1 - 2.
     for i in range(33, -1, -1):
         bn = coef * bno1 - bno2 + c[i]
-        bno2 = na.copy(bno1)
-        bno1 = na.copy(bn)
+        bno2 = np.copy(bno1)
+        bno1 = np.copy(bn)
 
     f = x1 * (bn - bno2)
     dno1 = 1. - 2. * x * f
     dno2 = f
 
-    q = na.abs(x) > 5
+    q = np.abs(x) > 5
     if q.any():
-        x14 = na.power(na.clip(x[q], -na.inf, 500.),  14)
-        x12 = na.power(na.clip(x[q], -na.inf, 1000.), 12)
-        x10 = na.power(na.clip(x[q], -na.inf, 5000.), 10)
-        x8  = na.power(na.clip(x[q], -na.inf, 50000.), 8)
-        x6  = na.power(na.clip(x[q], -na.inf, 1.e6),   6)
-        x4  = na.power(na.clip(x[q], -na.inf, 1.e9),   4)
-        x2  = na.power(na.clip(x[q], -na.inf, 1.e18),  2)
+        x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
+        x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
+        x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
+        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
         dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
@@ -135,12 +135,12 @@
             if (i % 2) == 1:
                 q = -q
                 yn = yn * y2
-                g = dn.astype(na.float64) * yn
+                g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if na.max(na.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8: break
 
     k1 = u1 - 1.12837917 * funct
-    k1 = k1.astype(na.float64).clip(0)
+    k1 = k1.astype(np.float64).clip(0)
     return k1
 
 def tau_profile(lam0, fval, gamma, vkms, column_density, 
@@ -191,19 +191,19 @@
     ## create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
-            na.arange(n_lambda, dtype=na.float) * dlambda - \
+            np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2    # wavelength vector (angstroms)
     nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
 
     ## tau_0
-    tau_X = na.sqrt(na.pi) * e**2 / (me * ccgs) * \
+    tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
     tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq
     x = (nua - nu1) / nudop
-    a = gamma / (4 * na.pi * nudop)   # damping parameter 
+    a = gamma / (4 * np.pi * nudop)   # damping parameter 
     phi = voigt(a, x)                 # profile
     tauphi = tau0 * phi               # profile scaled with tau0
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from absorption_line import tau_profile
 
@@ -48,7 +48,7 @@
         self.tau_field = None
         self.flux_field = None
         self.spectrum_line_list = None
-        self.lambda_bins = na.linspace(lambda_min, lambda_max, n_lambda)
+        self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
         self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
         self.line_list = []
         self.continuum_list = []
@@ -114,13 +114,13 @@
             field_data[field] = input[field].value
         input.close()
 
-        self.tau_field = na.zeros(self.lambda_bins.size)
+        self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
-        self.flux_field = na.exp(-self.tau_field)
+        self.flux_field = np.exp(-self.tau_field)
 
         if output_file.endswith('.h5'):
             self._write_spectrum_hdf5(output_file)
@@ -148,20 +148,20 @@
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = na.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
-            left_index = na.digitize((this_wavelength *
-                                     na.power((tau_min * continuum['normalization'] /
+            right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
+            left_index = np.digitize((this_wavelength *
+                                     np.power((tau_min * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
-            valid_continuua = na.where(((column_density /
+            valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > tau_min) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
-                line_tau = na.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
+                line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
                                      this_wavelength[lixel]), continuum['index']) * \
                                      column_density[lixel] / continuum['normalization']
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
@@ -184,10 +184,10 @@
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
-            thermal_b = km_per_cm * na.sqrt((2 * boltzmann_constant_cgs *
+            thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['Temperature']) /
                                             (amu_cgs * line['atomic_mass']))
-            center_bins = na.digitize((delta_lambda + line['wavelength']),
+            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                       self.lambda_bins)
 
             # ratio of line width to bin width
@@ -201,7 +201,7 @@
                            spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
 
             # loop over all lines wider than the bin width
-            valid_lines = na.where((width_ratio >= 1.0) &
+            valid_lines = np.where((width_ratio >= 1.0) &
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 
 from yt.utilities.linear_interpolators import \
@@ -44,13 +44,13 @@
     mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
     if center is None: center = pf.h.find_max("Density")[1]
     fields = ensure_list(fields)
-    r,theta,phi = na.mgrid[0:rmax:nr*1j,
-                           0:na.pi:ntheta*1j,
-                           0:2*na.pi:nphi*1j]
+    r,theta,phi = np.mgrid[0:rmax:nr*1j,
+                           0:np.pi:ntheta*1j,
+                           0:2*np.pi:nphi*1j]
     new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*na.sin(theta)*na.cos(phi) + center[0]
-    new_grid['y'] = r*na.sin(theta)*na.sin(phi) + center[1]
-    new_grid['z'] = r*na.cos(theta)             + center[2]
+    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
+    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
+    new_grid['z'] = r*np.cos(theta)             + center[2]
     sphere = pf.h.sphere(center, rmax)
     return arbitrary_regrid(new_grid, sphere, fields, smoothed)
 
@@ -62,10 +62,10 @@
     This has not been well-tested other than for regular spherical regridding.
     """
     fields = ensure_list(fields)
-    new_grid['handled'] = na.zeros(new_grid['x'].shape, dtype='bool')
+    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
-        new_grid[field] = na.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = na.argsort(data_source.gridLevels)
+        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
+    grid_order = np.argsort(data_source.gridLevels)
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):
@@ -73,12 +73,12 @@
         cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
 
         # makes x0,x1,y0,y1,z0,z1
-        bounds = na.concatenate(zip(cg.left_edge, cg.right_edge)) 
+        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
 
         
         # Now we figure out which of our points are inside this grid
         # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = na.ones(new_grid['handled'].shape, dtype='bool') # everything at first
+        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
         for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
             # &= does a logical_and on the array
             point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
@@ -116,7 +116,7 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  na.log10(sph_grid[field][:,i,:]))
+                  np.log10(sph_grid[field][:,i,:]))
     pylab.savefig("polar/latitude_%03i.png" % i)
 
 for i in range(n_phi):
@@ -124,6 +124,6 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  na.log10(sph_grid[field][:,:,i]))
+                  np.log10(sph_grid[field][:,:,i]))
     pylab.savefig("polar/longitude_%03i.png" % i)
 """


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.convenience import \
     simulation
@@ -132,12 +132,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (na.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.fabs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:na.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -146,7 +146,7 @@
                     current_slice = cosmology_splice[-1]
                     while current_slice['next'] is not None and \
                             (z < current_slice['next']['redshift'] or \
-                                 na.abs(z - current_slice['next']['redshift']) <
+                                 np.abs(z - current_slice['next']['redshift']) <
                                  z_Tolerance):
                         current_slice = current_slice['next']
 
@@ -164,7 +164,7 @@
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:na.fabs(far_redshift -
+            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
                                                                     obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
@@ -246,9 +246,9 @@
         outputs = []
 
         while z > near_redshift:
-            rounded = na.round(z, decimals=decimals)
+            rounded = np.round(z, decimals=decimals)
             if rounded - z < 0:
-                rounded += na.power(10.0, (-1.0*decimals))
+                rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
@@ -289,7 +289,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -299,9 +299,9 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
-            output['deltazMax'] = na.fabs(z2 - z)
+            output['deltazMax'] = np.fabs(z2 - z)
 
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
@@ -329,7 +329,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -339,10 +339,10 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(na.fabs(z2 - z), deltaz_min)
+            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -364,7 +364,7 @@
         distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
           self.cosmology.HubbleConstantNow / 100.0
 
-        while ((na.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
@@ -374,6 +374,6 @@
             iteration += 1
             if (iteration > max_Iterations):
                 mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, na.fabs(z2 - z)))
+                            (z, np.fabs(z2 - z)))
                 break
-        return na.fabs(z2 - z)
+        return np.fabs(z2 - z)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
@@ -24,25 +24,25 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def common_volume(n_cube_1, n_cube_2, periodic=None):
     "Return the n-volume in common between the two n-cubes."
 
     # Check for proper args.
-    if ((len(na.shape(n_cube_1)) != 2) or
-        (na.shape(n_cube_1)[1] != 2) or
-        (na.shape(n_cube_1) != na.shape(n_cube_2))):
+    if ((len(np.shape(n_cube_1)) != 2) or
+        (np.shape(n_cube_1)[1] != 2) or
+        (np.shape(n_cube_1) != np.shape(n_cube_2))):
         print "Arguments must be 2 (n, 2) numpy array."
         return 0
 
     if ((periodic is not None) and
-        (na.shape(n_cube_1) != na.shape(periodic))):
+        (np.shape(n_cube_1) != np.shape(periodic))):
         print "periodic argument must be (n, 2) numpy array."
         return 0
 
     nCommon = 1.0
-    for q in range(na.shape(n_cube_1)[0]):
+    for q in range(np.shape(n_cube_1)[0]):
         if (periodic is None):
             nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
         else:
@@ -97,10 +97,10 @@
             return min(flen1, flen2)
 
         # Adjust for periodicity
-        seg1[0] = na.mod(seg1[0], scale) + periodic[0]
+        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
         seg1[1] = seg1[0] + len1
         if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = na.mod(seg2[0], scale) + periodic[0]
+        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
         seg2[1] = seg2[0] + len2
         if (seg2[1] > periodic[1]): seg2[1] -= scale
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.analysis_modules.halo_profiler.api import \
@@ -77,7 +77,7 @@
 
     # Write out cube of masks from each slice.
     if cube_file is not None:
-        _write_halo_mask(cube_file, na.array(light_cone_mask))
+        _write_halo_mask(cube_file, np.array(light_cone_mask))
 
     # Write out a text list of all halos in the image.
     if map_file is not None:
@@ -86,7 +86,7 @@
     # Write out final mask.
     if mask_file is not None:
         # Final mask is simply the product of the mask from each slice.
-        final_mask = na.ones(shape=(pixels, pixels))
+        final_mask = np.ones(shape=(pixels, pixels))
         for mask in light_cone_mask:
             final_mask *= mask
         _write_halo_mask(mask_file, final_mask)
@@ -103,7 +103,7 @@
     output = h5py.File(filename, 'a')
     if 'HaloMask' in output.keys():
         del output['HaloMask']
-    output.create_dataset('HaloMask', data=na.array(halo_mask))
+    output.create_dataset('HaloMask', data=np.array(halo_mask))
     output.close()
 
 @parallel_root_only
@@ -155,21 +155,21 @@
     # Make boolean mask and cut out halos.
     dx = slice['box_width_fraction'] / pixels
     x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = na.ones(shape=(pixels, pixels), dtype=bool)
+    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
 
     # Cut out any pixel that has any part at all in the circle.
     for q in range(len(all_halo_radius)):
-        dif_xIndex = na.array(int(all_halo_x[q]/dx) -
-                              na.array(range(pixels))) != 0
-        dif_yIndex = na.array(int(all_halo_y[q]/dx) -
-                              na.array(range(pixels))) != 0
+        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
+                              np.array(range(pixels))) != 0
+        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
+                              np.array(range(pixels))) != 0
 
-        xDistance = (na.abs(x - all_halo_x[q]) -
+        xDistance = (np.abs(x - all_halo_x[q]) -
                      (0.5 * dx)) * dif_xIndex
-        yDistance = (na.abs(x - all_halo_y[q]) -
+        yDistance = (np.abs(x - all_halo_y[q]) -
                      (0.5 * dx)) * dif_yIndex
 
-        distance = na.array([na.sqrt(w**2 + xDistance**2)
+        distance = np.array([np.sqrt(w**2 + xDistance**2)
                              for w in yDistance])
         haloMask *= (distance >= all_halo_radius[q])
 
@@ -231,11 +231,11 @@
                                Mpc_units)
             halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
 
-    halo_x = na.array(halo_x)
-    halo_y = na.array(halo_y)
-    halo_depth = na.array(halo_depth)
-    halo_radius = na.array(halo_radius)
-    halo_mass = na.array(halo_mass)
+    halo_x = np.array(halo_x)
+    halo_y = np.array(halo_y)
+    halo_depth = np.array(halo_depth)
+    halo_radius = np.array(halo_radius)
+    halo_mass = np.array(halo_mass)
 
     # Adjust halo centers along line of sight.
     depth_center = slice['projection_center'][slice['projection_axis']]
@@ -247,15 +247,15 @@
     add_left = (halo_depth + halo_radius) > 1 # should be box width
     add_right = (halo_depth - halo_radius) < 0
 
-    halo_depth = na.concatenate([halo_depth,
+    halo_depth = np.concatenate([halo_depth,
                                  (halo_depth[add_left]-1),
                                  (halo_depth[add_right]+1)])
-    halo_x = na.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = na.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = na.concatenate([halo_radius,
+    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
+    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
+    halo_radius = np.concatenate([halo_radius,
                                   halo_radius[add_left],
                                   halo_radius[add_right]])
-    halo_mass = na.concatenate([halo_mass,
+    halo_mass = np.concatenate([halo_mass,
                                 halo_mass[add_left],
                                 halo_mass[add_right]])
 
@@ -284,19 +284,19 @@
         del mask
     del halo_depth
 
-    all_halo_x = na.array([])
-    all_halo_y = na.array([])
-    all_halo_radius = na.array([])
-    all_halo_mass = na.array([])
+    all_halo_x = np.array([])
+    all_halo_y = np.array([])
+    all_halo_radius = np.array([])
+    all_halo_mass = np.array([])
 
     # Tile halos of width box fraction is greater than one.
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(slice['box_width_fraction']))):
-        for y in range(int(na.ceil(slice['box_width_fraction']))):
-            all_halo_x = na.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = na.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = na.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = na.concatenate([all_halo_mass, halo_mass])
+    for x in range(int(np.ceil(slice['box_width_fraction']))):
+        for y in range(int(np.ceil(slice['box_width_fraction']))):
+            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
+            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
+            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
+            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
 
     del halo_x, halo_y, halo_radius, halo_mass
 
@@ -310,8 +310,8 @@
 
     # Wrap off-edge centers back around to
     # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += na.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += na.ceil(slice['box_width_fraction'])
+    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
+    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
 
     # After shifting, some centers have fractional coverage
     # on both sides of the box.
@@ -319,9 +319,9 @@
 
     # Centers hanging off the right edge.
     add_x_right = all_halo_x + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= na.ceil(slice['box_width_fraction'])
+    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
     add_x_halo_y = all_halo_y[add_x_right]
     add_x_halo_radius = all_halo_radius[add_x_right]
     add_x_halo_mass = all_halo_mass[add_x_right]
@@ -330,7 +330,7 @@
     # Centers hanging off the left edge.
     add_x_left = all_halo_x - all_halo_radius < 0
     add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += na.ceil(slice['box_width_fraction'])
+    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
     add2_x_halo_y = all_halo_y[add_x_left]
     add2_x_halo_radius = all_halo_radius[add_x_left]
     add2_x_halo_mass = all_halo_mass[add_x_left]
@@ -338,10 +338,10 @@
 
     # Centers hanging off the top edge.
     add_y_right = all_halo_y + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_y_halo_x = all_halo_x[add_y_right]
     add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= na.ceil(slice['box_width_fraction'])
+    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
     add_y_halo_radius = all_halo_radius[add_y_right]
     add_y_halo_mass = all_halo_mass[add_y_right]
     del add_y_right
@@ -350,24 +350,24 @@
     add_y_left = all_halo_y - all_halo_radius < 0
     add2_y_halo_x = all_halo_x[add_y_left]
     add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += na.ceil(slice['box_width_fraction'])
+    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
     add2_y_halo_radius = all_halo_radius[add_y_left]
     add2_y_halo_mass = all_halo_mass[add_y_left]
     del add_y_left
 
     # Add the hanging centers back to the projection data.
-    all_halo_x = na.concatenate([all_halo_x,
+    all_halo_x = np.concatenate([all_halo_x,
                                  add_x_halo_x, add2_x_halo_x,
                                  add_y_halo_x, add2_y_halo_x])
-    all_halo_y = na.concatenate([all_halo_y,
+    all_halo_y = np.concatenate([all_halo_y,
                                  add_x_halo_y, add2_x_halo_y,
                                  add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = na.concatenate([all_halo_radius,
+    all_halo_radius = np.concatenate([all_halo_radius,
                                       add_x_halo_radius,
                                       add2_x_halo_radius,
                                       add_y_halo_radius,
                                       add2_y_halo_radius])
-    all_halo_mass = na.concatenate([all_halo_mass,
+    all_halo_mass = np.concatenate([all_halo_mass,
                                     add_x_halo_mass,
                                     add2_x_halo_mass,
                                     add_y_halo_mass,


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -198,7 +198,7 @@
 
         # Calculate projection sizes, and get
         # random projection axes and centers.
-        na.random.seed(self.original_random_seed)
+        np.random.seed(self.original_random_seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -250,9 +250,9 @@
                self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
                 # Random axis and center.
                 self.light_cone_solution[q]['projection_axis'] = \
-                  na.random.randint(0, 3)
+                  np.random.randint(0, 3)
                 self.light_cone_solution[q]['projection_center'] = \
-                  [na.random.random() for i in range(3)]
+                  [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
@@ -342,7 +342,7 @@
                                                    njobs=njobs,
                                                    dynamic=dynamic)
             # Collapse cube into final mask.
-            self.halo_mask = na.ones(shape=(self.pixels, self.pixels),
+            self.halo_mask = np.ones(shape=(self.pixels, self.pixels),
                                      dtype=bool)
             for mask in halo_mask_cube:
                 self.halo_mask *= mask
@@ -428,7 +428,7 @@
                 boxSizeProper = self.simulation.box_size / \
                   (self.simulation.hubble_constant * (1.0 + output['redshift']))
                 pixelarea = (boxSizeProper/self.pixels)**2 #in proper cm^2
-                factor = pixelarea/(4.0*na.pi*dL**2)
+                factor = pixelarea/(4.0*np.pi*dL**2)
                 mylog.info("Distance to slice = %e" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
@@ -461,7 +461,7 @@
                 else:
                     my_image = all_storage[my_slice]['field'] / \
                       all_storage[my_slice]['weight_field']
-                only_on_root(write_image, na.log10(my_image),
+                only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
             self.projection_stack.append(all_storage[my_slice]['field'])
@@ -491,7 +491,7 @@
 
         # Write image.
         if save_final_image:
-            only_on_root(write_image, na.log10(light_cone_projection),
+            only_on_root(write_image, np.log10(light_cone_projection),
                          "%s_%s.png" % (filename, field), cmap_name=cmap_name)
 
         # Write stack to hdf5 file.
@@ -561,7 +561,7 @@
         box_fraction_used = 0.0
 
         # Seed random number generator with new seed.
-        na.random.seed(int(new_seed))
+        np.random.seed(int(new_seed))
 
         for q, output in enumerate(self.light_cone_solution):
             # It is necessary to make the same number of calls to the random
@@ -578,9 +578,9 @@
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
-                newAxis = na.random.randint(0, 3)
+                newAxis = np.random.randint(0, 3)
 
-                newCenter = [na.random.random() for i in range(3)]
+                newCenter = [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice, but with depth center shifted.
@@ -600,8 +600,8 @@
             box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
 
             # Make list of rectangle corners to calculate common volume.
-            newCube = na.zeros(shape=(len(newCenter), 2))
-            oldCube = na.zeros(shape=(len(newCenter), 2))
+            newCube = np.zeros(shape=(len(newCenter), 2))
+            oldCube = np.zeros(shape=(len(newCenter), 2))
             for w in range(len(newCenter)):
                 if (w == self.master_solution[q]['projection_axis']):
                     oldCube[w] = [self.master_solution[q]['projection_center'][w] -
@@ -630,7 +630,7 @@
                                   0.5 * self.master_solution[q]['box_width_fraction']]
 
             my_volume += common_volume(oldCube, newCube,
-                                           periodic=na.array([[0, 1],
+                                           periodic=np.array([[0, 1],
                                                               [0, 1],
                                                               [0, 1]]))
             total_volume += output['box_depth_fraction'] * \
@@ -691,7 +691,7 @@
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
         # Make list of redshifts to include as a dataset attribute.
-        redshiftList = na.array([my_slice['redshift'] \
+        redshiftList = np.array([my_slice['redshift'] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
@@ -727,16 +727,16 @@
 
         if write_data:
             mylog.info("Saving %s to %s." % (field_node, filename))
-            self.projection_stack = na.array(self.projection_stack)
+            self.projection_stack = np.array(self.projection_stack)
             field_dataset = output.create_dataset(field_node,
                                                   data=self.projection_stack)
             field_dataset.attrs['redshifts'] = redshiftList
             field_dataset.attrs['observer_redshift'] = \
-              na.float(self.observer_redshift)
+              np.float(self.observer_redshift)
             field_dataset.attrs['field_of_view_in_arcminutes'] = \
-              na.float(self.field_of_view_in_arcminutes)
+              np.float(self.field_of_view_in_arcminutes)
             field_dataset.attrs['image_resolution_in_arcseconds'] = \
-              na.float(self.image_resolution_in_arcseconds)
+              np.float(self.image_resolution_in_arcseconds)
 
         if (len(self.projection_weight_field_stack) > 0):
             if node_exists:
@@ -754,16 +754,16 @@
             if write_data:
                 mylog.info("Saving %s to %s." % (weight_field_node, filename))
                 self.projection_weight_field_stack = \
-                  na.array(self.projection_weight_field_stack)
+                  np.array(self.projection_weight_field_stack)
                 weight_field_dataset = \
                   output.create_dataset(weight_field_node,
                                         data=self.projection_weight_field_stack)
                 weight_field_dataset.attrs['redshifts'] = redshiftList
                 weight_field_dataset.attrs['observer_redshift'] = \
-                  na.float(self.observer_redshift)
+                  np.float(self.observer_redshift)
                 weight_field_dataset.attrs['field_of_view_in_arcminutes'] = \
-                  na.float(self.field_of_view_in_arcminutes)
+                  np.float(self.field_of_view_in_arcminutes)
                 weight_field_dataset.attrs['image_resolution_in_arcseconds'] = \
-                  na.float(self.image_resolution_in_arcseconds)
+                  np.float(self.image_resolution_in_arcseconds)
 
         output.close()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -98,15 +98,15 @@
     original_weight_field = copy.deepcopy(proj['weight_field'])
 
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
-        for y in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
+    for x in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
+        for y in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
             if ((x + y) > 0):
-                proj['px'] = na.concatenate([proj['px'], original_px+x])
-                proj['py'] = na.concatenate([proj['py'], original_py+y])
-                proj['pdx'] = na.concatenate([proj['pdx'], original_pdx])
-                proj['pdy'] = na.concatenate([proj['pdy'], original_pdy])
-                proj[field] = na.concatenate([proj[field], original_field])
-                proj['weight_field'] = na.concatenate([proj['weight_field'],
+                proj['px'] = np.concatenate([proj['px'], original_px+x])
+                proj['py'] = np.concatenate([proj['py'], original_py+y])
+                proj['pdx'] = np.concatenate([proj['pdx'], original_pdx])
+                proj['pdy'] = np.concatenate([proj['pdy'], original_pdy])
+                proj[field] = np.concatenate([proj[field], original_field])
+                proj['weight_field'] = np.concatenate([proj['weight_field'],
                                                        original_weight_field])
 
     # Delete originals.
@@ -129,17 +129,17 @@
     proj['py'] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj['px'][proj['px'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
-    proj['py'][proj['py'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
+    proj['px'][proj['px'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
+    proj['py'][proj['py'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
     add_x_right = proj['px'] + 0.5 * proj['pdx'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_x_px = proj['px'][add_x_right]
-    add_x_px -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_x_px -= np.ceil(lightConeSlice['box_width_fraction'])
     add_x_py = proj['py'][add_x_right]
     add_x_pdx = proj['pdx'][add_x_right]
     add_x_pdy = proj['pdy'][add_x_right]
@@ -150,7 +150,7 @@
     # Cells hanging off the left edge.
     add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
     add2_x_px = proj['px'][add_x_left]
-    add2_x_px += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_x_px += np.ceil(lightConeSlice['box_width_fraction'])
     add2_x_py = proj['py'][add_x_left]
     add2_x_pdx = proj['pdx'][add_x_left]
     add2_x_pdy = proj['pdy'][add_x_left]
@@ -160,10 +160,10 @@
 
     # Cells hanging off the top edge.
     add_y_right = proj['py'] + 0.5 * proj['pdy'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_y_px = proj['px'][add_y_right]
     add_y_py = proj['py'][add_y_right]
-    add_y_py -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_y_py -= np.ceil(lightConeSlice['box_width_fraction'])
     add_y_pdx = proj['pdx'][add_y_right]
     add_y_pdy = proj['pdy'][add_y_right]
     add_y_field = proj[field][add_y_right]
@@ -174,7 +174,7 @@
     add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
     add2_y_px = proj['px'][add_y_left]
     add2_y_py = proj['py'][add_y_left]
-    add2_y_py += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_y_py += np.ceil(lightConeSlice['box_width_fraction'])
     add2_y_pdx = proj['pdx'][add_y_left]
     add2_y_pdy = proj['pdy'][add_y_left]
     add2_y_field = proj[field][add_y_left]
@@ -182,17 +182,17 @@
     del add_y_left
 
     # Add the hanging cells back to the projection data.
-    proj['px'] = na.concatenate([proj['px'], add_x_px, add_y_px,
+    proj['px'] = np.concatenate([proj['px'], add_x_px, add_y_px,
                                  add2_x_px, add2_y_px])
-    proj['py'] = na.concatenate([proj['py'], add_x_py, add_y_py,
+    proj['py'] = np.concatenate([proj['py'], add_x_py, add_y_py,
                                  add2_x_py, add2_y_py])
-    proj['pdx'] = na.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
+    proj['pdx'] = np.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
                                   add2_x_pdx, add2_y_pdx])
-    proj['pdy'] = na.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
+    proj['pdy'] = np.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
                                   add2_x_pdy, add2_y_pdy])
-    proj[field] = na.concatenate([proj[field], add_x_field, add_y_field,
+    proj[field] = np.concatenate([proj[field], add_x_field, add_y_field,
                                   add2_x_field, add2_y_field])
-    proj['weight_field'] = na.concatenate([proj['weight_field'],
+    proj['weight_field'] = np.concatenate([proj['weight_field'],
                                            add_x_weight_field, add_y_weight_field,
                                            add2_x_weight_field, add2_y_weight_field])
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
@@ -24,7 +24,7 @@
 """
 
 import copy
-import numpy as na
+import numpy as np
 import random as rand
 import sys
 
@@ -128,7 +128,7 @@
         rand.seed(seed)
         state = rand.getstate()
 
-    fail_digits = str(int(na.log10(failures))+1)
+    fail_digits = str(int(np.log10(failures))+1)
 
     while (len(unique_seeds) < solutions):
         # Create new random seed.
@@ -221,7 +221,7 @@
         mylog.error("Light cone solutions do not have equal volumes, will use the smaller one.")
 
     for q in range(len(solution1)):
-        cube1 = na.zeros(shape=(len(solution1[q]['projection_center']), 2))
+        cube1 = np.zeros(shape=(len(solution1[q]['projection_center']), 2))
         volume1 = 1.0
         for w in range(len(cube1)):
             if (w == solution1[q]['projection_axis']):
@@ -232,7 +232,7 @@
             cube1[w] = [solution1[q]['projection_center'][w] - 0.5 * width,
                         solution1[q]['projection_center'][w] + 0.5 * width]
 
-        cube2 = na.zeros(shape=(len(solution2[q]['projection_center']), 2))
+        cube2 = np.zeros(shape=(len(solution2[q]['projection_center']), 2))
         volume2 = 1.0
         for w in range(len(cube2)):
             if (w == solution2[q]['projection_axis']):
@@ -245,7 +245,7 @@
 
         total_volume += min(volume1, volume2)
         my_volume += common_volume(cube1, cube2,
-                                   periodic=na.array([[0, 1],
+                                   periodic=np.array([[0, 1],
                                                       [0, 1],
                                                       [0, 1]]))
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -124,7 +124,7 @@
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        na.random.seed(seed)
+        np.random.seed(seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -162,9 +162,9 @@
                     (box_fraction_used +
                      self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                 # Random start point
-                self.light_ray_solution[q]['start'] = na.random.random(3)
-                theta = na.pi * na.random.random()
-                phi = 2 * na.pi * na.random.random()
+                self.light_ray_solution[q]['start'] = np.random.random(3)
+                theta = np.pi * np.random.random()
+                phi = 2 * np.pi * np.random.random()
                 box_fraction_used = 0.0
             else:
                 # Use end point of previous segment and same theta and phi.
@@ -174,9 +174,9 @@
             self.light_ray_solution[q]['end'] = \
               self.light_ray_solution[q]['start'] + \
                 self.light_ray_solution[q]['traversal_box_fraction'] * \
-                na.array([na.cos(phi) * na.sin(theta),
-                          na.sin(phi) * na.sin(theta),
-                          na.cos(theta)])
+                np.array([np.cos(phi) * np.sin(theta),
+                          np.sin(phi) * np.sin(theta),
+                          np.cos(theta)])
             box_fraction_used += \
               self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -365,30 +365,30 @@
             sub_data = {}
             sub_data['segment_redshift'] = my_segment['redshift']
             for field in all_fields:
-                sub_data[field] = na.array([])
+                sub_data[field] = np.array([])
 
             # Get data for all subsegments in segment.
             for sub_segment in sub_segments:
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = pf.h.ray(sub_segment[0], sub_segment[1])
-                sub_data['dl'] = na.concatenate([sub_data['dl'],
+                sub_data['dl'] = np.concatenate([sub_data['dl'],
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
                 for field in fields:
-                    sub_data[field] = na.concatenate([sub_data[field],
+                    sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = na.array([sub_ray['x-velocity'],
+                    sub_vel = np.array([sub_ray['x-velocity'],
                                         sub_ray['y-velocity'],
                                         sub_ray['z-velocity']])
                     sub_data['los_velocity'] = \
-                      na.concatenate([sub_data['los_velocity'],
-                                      (na.rollaxis(sub_vel, 1) *
+                      np.concatenate([sub_data['los_velocity'],
+                                      (np.rollaxis(sub_vel, 1) *
                                        line_of_sight).sum(axis=1)])
                     del sub_vel
 
@@ -470,20 +470,20 @@
         if fields is None: fields = []
 
         # Create position array from halo list.
-        halo_centers = na.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, na.array(map(lambda halo: halo[field],
+        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
+        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
                                                        halo_list))) \
                                   for field in fields])
 
-        nearest_distance = na.zeros(data['x'].shape)
-        field_data = dict([(field, na.zeros(data['x'].shape)) \
+        nearest_distance = np.zeros(data['x'].shape)
+        field_data = dict([(field, np.zeros(data['x'].shape)) \
                            for field in fields])
         for index in xrange(nearest_distance.size):
-            nearest = na.argmin(periodic_distance(na.array([data['x'][index],
+            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
                                                             data['y'][index],
                                                             data['z'][index]]),
                                                   halo_centers))
-            nearest_distance[index] = periodic_distance(na.array([data['x'][index],
+            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
                                                                   data['y'][index],
                                                                   data['z'][index]]),
                                                         halo_centers[nearest])
@@ -532,41 +532,41 @@
         for field in [field for field in datum.keys()
                       if field not in exceptions]:
             if field in new_data:
-                new_data[field] = na.concatenate([new_data[field], datum[field]])
+                new_data[field] = np.concatenate([new_data[field], datum[field]])
             else:
-                new_data[field] = na.copy(datum[field])
+                new_data[field] = np.copy(datum[field])
     return new_data
 
 def vector_length(start, end):
     "Calculate vector length."
 
-    return na.sqrt(na.power((end - start), 2).sum())
+    return np.sqrt(np.power((end - start), 2).sum())
 
 def periodic_distance(coord1, coord2):
     "Calculate length of shortest vector between to points in periodic domain."
     dif = coord1 - coord2
 
-    dim = na.ones(coord1.shape,dtype=int)
+    dim = np.ones(coord1.shape,dtype=int)
     def periodic_bind(num):
-        pos = na.abs(num % dim)
-        neg = na.abs(num % -dim)
-        return na.min([pos,neg],axis=0)
+        pos = np.abs(num % dim)
+        neg = np.abs(num % -dim)
+        return np.min([pos,neg],axis=0)
 
     dif = periodic_bind(dif)
-    return na.sqrt((dif * dif).sum(axis=-1))
+    return np.sqrt((dif * dif).sum(axis=-1))
 
 def periodic_ray(start, end, left=None, right=None):
     "Break up periodic ray into non-periodic segments."
 
     if left is None:
-        left = na.zeros(start.shape)
+        left = np.zeros(start.shape)
     if right is None:
-        right = na.ones(start.shape)
+        right = np.ones(start.shape)
     dim = right - left
 
     vector = end - start
-    wall = na.zeros(start.shape)
-    close = na.zeros(start.shape, dtype=object)
+    wall = np.zeros(start.shape)
+    close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
     right_bound = vector > 0
@@ -574,15 +574,15 @@
     bound = vector != 0.0
 
     wall[left_bound] = left[left_bound]
-    close[left_bound] = na.max
+    close[left_bound] = np.max
     wall[right_bound] = right[right_bound]
-    close[right_bound] = na.min
-    wall[no_bound] = na.inf
-    close[no_bound] = na.min
+    close[right_bound] = np.min
+    wall[no_bound] = np.inf
+    close[no_bound] = np.min
 
     segments = []
-    this_start = na.copy(start)
-    this_end = na.copy(end)
+    this_start = np.copy(start)
+    this_end = np.copy(end)
     t = 0.0
     tolerance = 1e-6
 
@@ -596,14 +596,14 @@
             this_start[hit_right] -= dim[hit_right]
             this_end[hit_right] -= dim[hit_right]
 
-        nearest = na.array([close[q]([this_end[q], wall[q]]) \
+        nearest = np.array([close[q]([this_end[q], wall[q]]) \
                                 for q in range(start.size)])
         dt = ((nearest - this_start) / vector)[bound].min()
         now = this_start + vector * dt
-        close_enough = na.abs(now - nearest) < 1e-10
+        close_enough = np.abs(now - nearest) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([na.copy(this_start), na.copy(now)])
-        this_start = na.copy(now)
+        segments.append([np.copy(this_start), np.copy(now)])
+        this_start = np.copy(now)
         t += dt
 
     return segments


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -31,7 +31,7 @@
 import h5py
 import itertools
 import math
-import numpy as na
+import numpy as np
 import random
 import sys
 import os.path as path
@@ -123,13 +123,13 @@
         cy = self["particle_position_y"]
         cz = self["particle_position_z"]
         if isinstance(self, FOFHalo):
-            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
         else:
             c_vec = self.maximum_density_location() - self.pf.domain_center
         cx = (cx - c_vec[0])
         cy = (cy - c_vec[1])
         cz = (cz - c_vec[2])
-        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
         return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
     def maximum_density(self):
@@ -158,7 +158,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[1:]
-        return na.array([
+        return np.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
                 self._max_dens[self.id][3]])
@@ -193,7 +193,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return na.array([vx, vy, vz]) / pm.sum()
+        return np.array([vx, vy, vz]) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -216,8 +216,8 @@
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
         vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
         s = vx ** 2. + vy ** 2. + vz ** 2.
-        ms = na.mean(s)
-        return na.sqrt(ms) * pm.size
+        ms = np.mean(s)
+        return np.sqrt(ms) * pm.size
 
     def maximum_radius(self, center_of_mass=True):
         r"""Returns the maximum radius in the halo for all particles,
@@ -246,13 +246,13 @@
             center = self.center_of_mass()
         else:
             center = self.maximum_density_location()
-        rx = na.abs(self["particle_position_x"] - center[0])
-        ry = na.abs(self["particle_position_y"] - center[1])
-        rz = na.abs(self["particle_position_z"] - center[2])
+        rx = np.abs(self["particle_position_x"] - center[0])
+        ry = np.abs(self["particle_position_y"] - center[1])
+        rz = np.abs(self["particle_position_z"] - center[2])
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
-                + na.minimum(ry, DW[1] - ry) ** 2.0
-                + na.minimum(rz, DW[2] - rz) ** 2.0)
+        r = np.sqrt(np.minimum(rx, DW[0] - rx) ** 2.0
+                + np.minimum(ry, DW[1] - ry) ** 2.0
+                + np.minimum(rz, DW[2] - rz) ** 2.0)
         return r.max()
 
     def __getitem__(self, key):
@@ -393,7 +393,7 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins + 1)[over])
+            vir_bin = max(np.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
@@ -419,8 +419,8 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        dist = na.empty(thissize, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
         mark = 0
         # Find the distances to the particles. I don't like this much, but I
@@ -432,15 +432,15 @@
             mark += 1
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(min(dist) * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(min(dist) * .99 + TINY),
             math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1)
         # Find out which bin each particle goes into, and add the particle
         # mass to that bin.
-        inds = na.digitize(dist, self.radial_bins) - 1
+        inds = np.digitize(dist, self.radial_bins) - 1
         if self["particle_position_x"].size > 1:
-            for index in na.unique(inds):
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                na.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["ParticleMassMsun"][inds == index])
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -450,12 +450,12 @@
         (self.radial_bins * cm)**3.0)
         
     def _get_ellipsoid_parameters_basic(self):
-        na.seterr(all='ignore')
+        np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
         # neglecting to check if the 4 particles in the same plane,
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
-        if na.size(self["particle_position_x"]) < 4:
+        if np.size(self["particle_position_x"]) < 4:
             mylog.warning("Too few particles for ellipsoid parameters.")
             return (0, 0, 0, 0, 0, 0, 0)
         # Calculate the parameters that describe the ellipsoid of
@@ -466,19 +466,19 @@
 		    self["particle_position_y"],
 		    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = na.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
 	position = [position[0] - com[0],
 		    position[1] - com[1],
 		    position[2] - com[2]]
 	# different cases of particles being on other side of boundary
-	for axis in range(na.size(DW)):
-	    cases = na.array([position[axis],
+	for axis in range(np.size(DW)):
+	    cases = np.array([position[axis],
 	  		      position[axis] + DW[axis],
 			      position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
-            position[axis] = na.choose(na.abs(cases).argmin(axis=0), cases)
+            position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
 	# find the furthest particle's index
-	r = na.sqrt(position[0]**2 +
+	r = np.sqrt(position[0]**2 +
 		    position[1]**2 +
 		    position[2]**2)
         A_index = r.argmax()
@@ -490,24 +490,24 @@
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = na.empty((na.size(position[0]), 3), dtype='float64')
+	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
-        rr = na.array([position[0],
+        rr = np.array([position[0],
 		       position[1],
 		       position[2]]).T # Similar to tB_vector in old code.
-        tC_vector = na.cross(e0_vector_copy, rr)
+        tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
-            te2[:,dim] *= na.sum(tC_vector**2., axis = 1)**(-0.5)
-        te1 = na.cross(te2, e0_vector_copy)
-        length = na.abs(-na.sum(rr * te1, axis = 1) * \
-            (1. - na.sum(rr * e0_vector_copy, axis = 1)**2. * \
+            te2[:,dim] *= np.sum(tC_vector**2., axis = 1)**(-0.5)
+        te1 = np.cross(te2, e0_vector_copy)
+        length = np.abs(-np.sum(rr * te1, axis = 1) * \
+            (1. - np.sum(rr * e0_vector_copy, axis = 1)**2. * \
             mag_A**-2.)**(-0.5))
         # This problem apparently happens sometimes, that the NaNs are turned
         # into infs, which messes up the nanargmax below.
-        length[length == na.inf] = 0.
-        tB_index = na.nanargmax(length) # ignores NaNs created above.
+        length[length == np.inf] = 0.
+        tB_index = np.nanargmax(length) # ignores NaNs created above.
         mag_B = length[tB_index]
         e1_vector = te1[tB_index]
         e2_vector = te2[tB_index]
@@ -518,24 +518,24 @@
             temp_e0[:,dim] = e0_vector[dim]
             temp_e1[:,dim] = e1_vector[dim]
             temp_e2[:,dim] = e2_vector[dim]
-        length = na.abs(na.sum(rr * temp_e2, axis = 1) * (1 - \
-            na.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
-            na.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
-        length[length == na.inf] = 0.
-        tC_index = na.nanargmax(length)
+        length = np.abs(np.sum(rr * temp_e2, axis = 1) * (1 - \
+            np.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
+            np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+        length[length == np.inf] = 0.
+        tC_index = np.nanargmax(length)
         mag_C = length[tC_index]
         # tilt is calculated from the rotation about x axis
         # needed to align e1 vector with the y axis
         # after e0 is aligned with x axis
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0_vector[1] / e0_vector[0])
+        t1 = np.arctan(e0_vector[1] / e0_vector[0])
         RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
         r1 = (e0_vector * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
-        r2 = na.dot(RY, na.dot(RZ, e1_vector))
-        tilt = na.arctan(r2[2]/r2[1])
+        r2 = np.dot(RY, np.dot(RZ, e1_vector))
+        tilt = np.arctan(r2[2]/r2[1])
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
@@ -572,11 +572,11 @@
 
         #Halo.__init__(self,halo_list,index,
         self.size=Np 
-        self.CoM=na.array([X,Y,Z])
+        self.CoM=np.array([X,Y,Z])
         self.max_dens_point=-1
         self.group_total_mass=-1
         self.max_radius=Rvir
-        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.bulk_vel=np.array([VX,VY,VZ])*1e5
         self.rms_vel=-1
         self.group_total_mass = -1 #not implemented 
     
@@ -651,7 +651,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -704,7 +704,7 @@
         self.bin_count = bins
         period = self.data.pf.domain_right_edge - \
             self.data.pf.domain_left_edge
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
         cen = self.center_of_mass()
         # Cosmology
         h = self.data.pf.hubble_constant
@@ -716,7 +716,7 @@
         # If I own some of this halo operate on the particles.
         if self.indices is not None:
             # Get some pertinent information about the halo.
-            dist = na.empty(self.indices.size, dtype='float64')
+            dist = np.empty(self.indices.size, dtype='float64')
             mark = 0
             # Find the distances to the particles.
             # I don't like this much, but I
@@ -737,15 +737,15 @@
         dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(dist_min * .99 + TINY),
+        self.radial_bins = np.logspace(math.log10(dist_min * .99 + TINY),
             math.log10(dist_max * 1.01 + 2 * TINY), num=self.bin_count + 1)
         if self.indices is not None and self.indices.size > 1:
             # Find out which bin each particle goes into, and add the particle
             # mass to that bin.
-            inds = na.digitize(dist, self.radial_bins) - 1
-            for index in na.unique(inds):
+            inds = np.digitize(dist, self.radial_bins) - 1
+            for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    na.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["ParticleMassMsun"][inds == index])
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -831,7 +831,7 @@
         self.saved_fields = {}
         self.particle_mask = None
         self.ds_sort = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -871,7 +871,7 @@
                     # The result of searchsorted is an array with the positions
                     # of the indexes in pid as they are in sp_pid. This is
                     # because each element of pid is in sp_pid only once.
-                    self.particle_mask = na.searchsorted(sp_pid, pid)
+                    self.particle_mask = np.searchsorted(sp_pid, pid)
                 # We won't store this field below in saved_fields because
                 # that would mean keeping two copies of it, one in the yt
                 # machinery and one here.
@@ -890,9 +890,9 @@
             return None
         elif field == 'particle_index' or field == 'particle_type':
             # the only integer field
-            field_data = na.empty(size, dtype='int64')
+            field_data = np.empty(size, dtype='int64')
         else:
-            field_data = na.empty(size, dtype='float64')
+            field_data = np.empty(size, dtype='float64')
         f.close()
         # Apparently, there's a bug in h5py that was keeping the file pointer
         # f closed, even though it's re-opened below. This del seems to fix
@@ -943,7 +943,7 @@
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]
         updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[2], np.array([basic_parameters[3],
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
@@ -1025,7 +1025,7 @@
         self.tilt = tilt
         self.bin_count = None
         self.overdensity = None
-        self.indices = na.array([])  # Never used for a LoadedHalo.
+        self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
@@ -1084,7 +1084,7 @@
                 self.particle_fields[field] = \
                     self._data_source[field][ii].astype('float64')
             del self._data_source[field]
-        self._base_indices = na.arange(tot_part)[ii]
+        self._base_indices = np.arange(tot_part)[ii]
         gc.collect()
 
     def _get_dm_indices(self):
@@ -1099,10 +1099,10 @@
             return slice(None)
 
     def _parse_output(self):
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount(self.tags + 1)
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount(self.tags + 1)
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         dens = self.densities[sort_indices]
         cp = 0
         for i in unique_ids:
@@ -1112,7 +1112,7 @@
                 continue
             group_indices = grab_indices[cp:cp_c]
             self._groups.append(self._halo_class(self, i, group_indices))
-            md_i = na.argmax(dens[cp:cp_c])
+            md_i = np.argmax(dens[cp:cp_c])
             px, py, pz = \
                 [self.particle_fields['particle_position_%s' % ax][group_indices]
                                             for ax in 'xyz']
@@ -1201,7 +1201,7 @@
         """
         # Set up a vector to multiply other
         # vectors by to project along proj_dim
-        vec = na.array([1., 1., 1.])
+        vec = np.array([1., 1., 1.])
         vec[proj_dim] = 0.
         period = self.pf.domain_right_edge - self.pf.domain_left_edge
         period = period * vec
@@ -1367,9 +1367,9 @@
         splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
         for num in splits:
             if 'nan' not in num:
-                formats += na.array(eval(num)).dtype,
+                formats += np.array(eval(num)).dtype,
             else:
-                formats += na.dtype('float'),
+                formats += np.dtype('float'),
         assert len(formats) == len(names)
 
         #Jc = 1.98892e33/pf['mpchcm']*1e5
@@ -1384,7 +1384,7 @@
                     Rs=1.0/pf['kpchcm'],
                     JX=Jc,JY=Jc,JZ=Jc)
         dtype = {'names':names,'formats':formats}
-        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
         #convert position units  
         for name in names:
             halo_table[name]=halo_table[name]*conv.get(name,1)
@@ -1470,7 +1470,7 @@
                self.particle_fields["particle_position_y"] / self.period[1],
                self.particle_fields["particle_position_z"] / self.period[2],
                self.link)
-        self.densities = na.ones(self.tags.size, dtype='float64') * -1
+        self.densities = np.ones(self.tags.size, dtype='float64') * -1
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
@@ -1518,12 +1518,12 @@
             size = int(line[2])
             fnames = locations[halo]
             # Everything else
-            CoM = na.array([float(line[7]), float(line[8]), float(line[9])])
-            max_dens_point = na.array([float(line[3]), float(line[4]),
+            CoM = np.array([float(line[7]), float(line[8]), float(line[9])])
+            max_dens_point = np.array([float(line[3]), float(line[4]),
                 float(line[5]), float(line[6])])
             group_total_mass = float(line[1])
             max_radius = float(line[13])
-            bulk_vel = na.array([float(line[10]), float(line[11]),
+            bulk_vel = np.array([float(line[10]), float(line[11]),
                 float(line[12])])
             rms_vel = float(line[14])
             if len(line) == 15:
@@ -1541,7 +1541,7 @@
                 e1_vec0 = float(line[18])
                 e1_vec1 = float(line[19])
                 e1_vec2 = float(line[20])
-                e1_vec = na.array([e1_vec0, e1_vec1, e1_vec2])
+                e1_vec = np.array([e1_vec0, e1_vec1, e1_vec2])
                 tilt = float(line[21])
                 self._groups.append(LoadedHalo(self.pf, halo, size = size,
                     CoM = CoM,
@@ -1596,7 +1596,7 @@
             y = float(line[columns['y']])
             z = float(line[columns['z']])
             r = float(line[columns['r']])
-            cen = na.array([x, y, z])
+            cen = np.array([x, y, z])
             # Now we see if there's anything else.
             if extra:
                 temp_dict = {}
@@ -1631,7 +1631,7 @@
         self.rearrange = rearrange
         self.period = period
         self.old_period = period.copy()
-        self.period = na.array([1.] * 3)
+        self.period = np.array([1.] * 3)
         self._data_source = data_source
         self.premerge = premerge
         self.tree = tree
@@ -1645,20 +1645,20 @@
         if (self.particle_fields["particle_index"] < 0).any():
             mylog.error("Negative values in particle_index field. Parallel HOP will fail.")
             exit = True
-        if na.unique(self.particle_fields["particle_index"]).size != \
+        if np.unique(self.particle_fields["particle_index"]).size != \
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
             self.particle_fields['ParticleMassMsun'])
-        na.divide(self.particle_fields["particle_position_x"],
+        np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
-        na.divide(self.particle_fields["particle_position_y"],
+        np.divide(self.particle_fields["particle_position_y"],
             self.old_period[1], self.particle_fields["particle_position_y"])
-        na.divide(self.particle_fields["particle_position_z"],
+        np.divide(self.particle_fields["particle_position_z"],
             self.old_period[2], self.particle_fields["particle_position_z"])
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
@@ -1688,20 +1688,20 @@
         self.period = self.old_period.copy()
         # Precompute the bulk velocity in parallel.
         yt_counters("Precomp bulk vel.")
-        self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
+        self.bulk_vel = np.zeros((self.group_count, 3), dtype='float64')
         yt_counters("bulk vel. reading data")
         pm = obj.mass
         # Fix this back to un-normalized units.
-        na.multiply(pm, self.total_mass, pm)
+        np.multiply(pm, self.total_mass, pm)
         xv = self._data_source["particle_velocity_x"][self._base_indices]
         yv = self._data_source["particle_velocity_y"][self._base_indices]
         zv = self._data_source["particle_velocity_z"][self._base_indices]
         yt_counters("bulk vel. reading data")
         yt_counters("bulk vel. computing")
         select = (self.tags >= 0)
-        calc = len(na.where(select == True)[0])
+        calc = len(np.where(select == True)[0])
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             ms = pm[select]
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
@@ -1710,13 +1710,13 @@
             sort = subchain.argsort()
             vel = vel[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i + 1]], axis=0)
+                self.bulk_vel[u] = np.sum(vel[marks[i]:marks[i + 1]], axis=0)
             del vel, subchain, sort_subchain
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
@@ -1729,27 +1729,27 @@
         # Now calculate the RMS velocity of the groups in parallel, very
         # similarly to the bulk velocity and re-using some of the arrays.
         yt_counters("rms vel computing")
-        rms_vel_temp = na.zeros((self.group_count, 2), dtype='float64')
+        rms_vel_temp = np.zeros((self.group_count, 2), dtype='float64')
         if calc:
-            vel = na.empty((calc, 3), dtype='float64')
+            vel = np.empty((calc, 3), dtype='float64')
             vel[:, 0] = xv[select] * ms
             vel[:, 1] = yv[select] * ms
             vel[:, 2] = zv[select] * ms
             vel = vel[sort]
             for i, u in enumerate(uniq_subchain):
                 # This finds the sum locally.
-                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i + 1]] - \
+                rms_vel_temp[u][0] = np.sum(((vel[marks[i]:marks[i + 1]] - \
                     self.bulk_vel[u]) / self.Tot_M[u]) ** 2.)
                 # I could use self.group_sizes...
                 rms_vel_temp[u][1] = marks[i + 1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
         rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
-        self.rms_vel = na.empty(self.group_count, dtype='float64')
+        self.rms_vel = np.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
             self.rms_vel[groupID] = \
-                na.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
+                np.sqrt(rms_vel_temp[groupID][0] / rms_vel_temp[groupID][1]) * \
                 self.group_sizes[groupID]
         del rms_vel_temp
         yt_counters("rms vel computing")
@@ -1764,16 +1764,16 @@
         """
         Each task will make an entry for all groups, but it may be empty.
         """
-        unique_ids = na.unique(self.tags)
-        counts = na.bincount((self.tags + 1).tolist())
-        sort_indices = na.argsort(self.tags)
-        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        unique_ids = np.unique(self.tags)
+        counts = np.bincount((self.tags + 1).tolist())
+        sort_indices = np.argsort(self.tags)
+        grab_indices = np.indices(self.tags.shape).ravel()[sort_indices]
         del sort_indices
         cp = 0
         index = 0
         # We want arrays for parallel HOP
-        self._groups = na.empty(self.group_count, dtype='object')
-        self._max_dens = na.empty((self.group_count, 4), dtype='float64')
+        self._groups = np.empty(self.group_count, dtype='object')
+        self._max_dens = np.empty((self.group_count, 4), dtype='float64')
         if self.group_count == 0:
             mylog.info("There are no halos found.")
             return
@@ -1861,7 +1861,7 @@
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
-        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge)) / 2.0
+        self.center = (np.array(ds.right_edge) + np.array(ds.left_edge)) / 2.0
 
     def _parse_halolist(self, threshold_adjustment):
         groups = []
@@ -1871,7 +1871,7 @@
         for halo in self._groups:
             this_max_dens = halo.maximum_density_location()
             # if the most dense particle is in the box, keep it
-            if na.all((this_max_dens >= LE) & (this_max_dens <= RE)):
+            if np.all((this_max_dens >= LE) & (this_max_dens <= RE)):
                 # Now we add the halo information to OURSELVES, taken from the
                 # self.hop_list
                 # We need to mock up the HOPHaloList thingie, so we need to
@@ -2128,8 +2128,8 @@
         >>> halos = parallelHF(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding=0.0)
@@ -2141,7 +2141,7 @@
         if self.tree != 'F' and self.tree != 'C':
             mylog.error("No kD Tree specified!")
         period = pf.domain_right_edge - pf.domain_left_edge
-        topbounds = na.array([[0., 0., 0.], period])
+        topbounds = np.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
@@ -2190,14 +2190,14 @@
             # approximation, but it's OK with the safety factor
             padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
-            self.padding = (na.ones(3, dtype='float64') * padding,
-                na.ones(3, dtype='float64') * padding)
+            self.padding = (np.ones(3, dtype='float64') * padding,
+                np.ones(3, dtype='float64') * padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
                 (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
-            LE_padding = na.empty(3, dtype='float64')
-            RE_padding = na.empty(3, dtype='float64')
+            LE_padding = np.empty(3, dtype='float64')
+            RE_padding = np.empty(3, dtype='float64')
             avg_spacing = (float(vol) / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
@@ -2215,9 +2215,9 @@
                     self._data_source.left_edge[(dim + 2) % 3])
                 bin_width = base_padding
                 num_bins = int(math.ceil(width / bin_width))
-                bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+                bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
                     self._data_source.left_edge[dim]
-                counts, bins = na.histogram(data, bins)
+                counts, bins = np.histogram(data, bins)
                 # left side.
                 start = 0
                 count = counts[0]
@@ -2250,8 +2250,8 @@
             total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
-            self.padding = (na.zeros(3, dtype='float64'),
-                na.zeros(3, dtype='float64'))
+            self.padding = (np.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
@@ -2282,8 +2282,8 @@
         n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
-        my_points = na.empty((n_random, 3), dtype='float64')
-        uni = na.array(random.sample(xrange(xp.size), n_random))
+        my_points = np.empty((n_random, 3), dtype='float64')
+        uni = np.array(random.sample(xrange(xp.size), n_random))
         uni = uni[uni.argsort()]
         my_points[:, 0] = xp[uni]
         del xp
@@ -2297,10 +2297,10 @@
         mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
-            root_points = na.empty((tot_random, 3), dtype='float64')
+            root_points = np.empty((tot_random, 3), dtype='float64')
             root_points.shape = (1, 3 * tot_random)
         else:
-            root_points = na.empty([])
+            root_points = np.empty([])
         my_points.shape = (1, n_random * 3)
         root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
@@ -2315,9 +2315,9 @@
         num_bins = 1000
         width = bounds[1][dim] - bounds[0][dim]
         bin_width = width / num_bins
-        bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+        bins = np.arange(num_bins + 1, dtype='float64') * bin_width + \
             bounds[0][dim]
-        counts, bins = na.histogram(points[:, dim], bins)
+        counts, bins = np.histogram(points[:, dim], bins)
         # Find the bin that passes the cut points.
         midpoints = [bounds[0][dim]]
         sum = 0
@@ -2341,7 +2341,7 @@
         subpoints = []
         subbounds = []
         for pair in zip(midpoints[:-1], midpoints[1:]):
-            select = na.bitwise_and(points[:, dim] >= pair[0],
+            select = np.bitwise_and(points[:, dim] >= pair[0],
                 points[:, dim] < pair[1])
             subpoints.append(points[select])
             nb = bounds.copy()
@@ -2363,7 +2363,7 @@
         ms = -self.Tot_M.copy()
         del self.Tot_M
         Cx = self.CoM[:, 0].copy()
-        sorted = na.lexsort([Cx, ms])
+        sorted = np.lexsort([Cx, ms])
         del Cx, ms
         self._groups = self._groups[sorted]
         self._max_dens = self._max_dens[sorted]
@@ -2426,8 +2426,8 @@
         >>> halos = HaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
@@ -2520,8 +2520,8 @@
         >>> halos = FOFHaloFinder(pf)
         """
         if subvolume is not None:
-            ds_LE = na.array(subvolume.left_edge)
-            ds_RE = na.array(subvolume.right_edge)
+            ds_LE = np.array(subvolume.left_edge)
+            ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self.pf = pf
         self.hierarchy = pf.h
@@ -2544,7 +2544,7 @@
             avg_spacing = (float(vol) / n_parts) ** (1. / 3.)
             linking_length = link * avg_spacing
         else:
-            linking_length = na.abs(link)
+            linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -25,7 +25,7 @@
 
 from collections import defaultdict
 import itertools, sys
-import numpy as na
+import numpy as np
 import gc
 
 from yt.funcs import *
@@ -88,23 +88,23 @@
         for taskID in global_bounds:
             thisLE, thisRE = global_bounds[taskID]
             if self.mine != taskID:
-                vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
-                vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2], taskID]))
+                vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2], taskID]))
             if self.mine == taskID:
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisLE[0], thisRE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisLE[1], thisRE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisLE[2]]))
-                my_vertices.append(na.array([thisRE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisLE[0], thisRE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisLE[1], thisRE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisLE[2]]))
+                my_vertices.append(np.array([thisRE[0], thisRE[1], thisRE[2]]))
         # Find the neighbors we share corners with. Yes, this is lazy with
         # a double loop, but it works and this is definitely not a performance
         # bottleneck.
@@ -119,13 +119,13 @@
                 # Also test to see if the distance to this corner is within
                 # max_padding, which is more likely the case with load-balancing
                 # turned on.
-                dx = min( na.fabs(my_vertex[0] - vertex[0]), \
-                    self.period[0] - na.fabs(my_vertex[0] - vertex[0]))
-                dy = min( na.fabs(my_vertex[1] - vertex[1]), \
-                    self.period[1] - na.fabs(my_vertex[1] - vertex[1]))
-                dz = min( na.fabs(my_vertex[2] - vertex[2]), \
-                    self.period[2] - na.fabs(my_vertex[2] - vertex[2]))
-                d = na.sqrt(dx*dx + dy*dy + dz*dz)
+                dx = min( np.fabs(my_vertex[0] - vertex[0]), \
+                    self.period[0] - np.fabs(my_vertex[0] - vertex[0]))
+                dy = min( np.fabs(my_vertex[1] - vertex[1]), \
+                    self.period[1] - np.fabs(my_vertex[1] - vertex[1]))
+                dz = min( np.fabs(my_vertex[2] - vertex[2]), \
+                    self.period[2] - np.fabs(my_vertex[2] - vertex[2]))
+                d = np.sqrt(dx*dx + dy*dy + dz*dz)
                 if d <= self.max_padding:
                     self.neighbors.add(int(vertex[3]))
         # Faces and edges.
@@ -219,13 +219,13 @@
         annulus data.
         """
         if round == 'first':
-            max_pad = na.max(self.padding)
+            max_pad = np.max(self.padding)
             self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
             for neighbor in self.neighbors:
-                self.max_padding = na.maximum(self.global_padding[neighbor], \
+                self.max_padding = np.maximum(self.global_padding[neighbor], \
                     self.max_padding)
 
     def _communicate_padding_data(self):
@@ -247,7 +247,7 @@
         # This will reduce the size of the loop over particles.
         yt_counters("Picking padding data to send.")
         send_count = self.is_inside_annulus.sum()
-        points = na.empty((send_count, 3), dtype='float64')
+        points = np.empty((send_count, 3), dtype='float64')
         points[:,0] = self.xpos[self.is_inside_annulus]
         points[:,1] = self.ypos[self.is_inside_annulus]
         points[:,2] = self.zpos[self.is_inside_annulus]
@@ -280,9 +280,9 @@
         recv_size = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_points[opp_neighbor] = na.empty((opp_size, 3), dtype='float64')
-            recv_mass[opp_neighbor] = na.empty(opp_size, dtype='float64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_points[opp_neighbor] = np.empty((opp_size, 3), dtype='float64')
+            recv_mass[opp_neighbor] = np.empty(opp_size, dtype='float64')
             recv_size += opp_size
         yt_counters("Initalizing recv arrays.")
         # Setup the receiving slots.
@@ -306,11 +306,11 @@
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
         # Now we add the data to ourselves.
-        self.index_pad = na.empty(recv_size, dtype='int64')
-        self.xpos_pad = na.empty(recv_size, dtype='float64')
-        self.ypos_pad = na.empty(recv_size, dtype='float64')
-        self.zpos_pad = na.empty(recv_size, dtype='float64')
-        self.mass_pad = na.empty(recv_size, dtype='float64')
+        self.index_pad = np.empty(recv_size, dtype='int64')
+        self.xpos_pad = np.empty(recv_size, dtype='float64')
+        self.ypos_pad = np.empty(recv_size, dtype='float64')
+        self.zpos_pad = np.empty(recv_size, dtype='float64')
+        self.mass_pad = np.empty(recv_size, dtype='float64')
         so_far = 0
         for opp_neighbor in self.neighbors:
             opp_size = global_send_count[opp_neighbor][self.mine]
@@ -335,7 +335,7 @@
         yt_counters("Flipping coordinates around the periodic boundary.")
         self.size = self.index.size + self.index_pad.size
         # Now that we have the full size, initialize the chainID array
-        self.chainID = na.ones(self.size,dtype='int64') * -1
+        self.chainID = np.ones(self.size,dtype='int64') * -1
         # Clean up explicitly, but these should be empty dicts by now.
         del recv_real_indices, hooks, recv_points, recv_mass
         yt_counters("Communicate discriminated padding")
@@ -348,10 +348,10 @@
         if self.tree == 'F':
             # Yes, we really do need to initialize this many arrays.
             # They're deleted in _parallelHOP.
-            fKD.dens = na.zeros(self.size, dtype='float64', order='F')
-            fKD.mass = na.concatenate((self.mass, self.mass_pad))
+            fKD.dens = np.zeros(self.size, dtype='float64', order='F')
+            fKD.mass = np.concatenate((self.mass, self.mass_pad))
             del self.mass
-            fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
+            fKD.pos = np.empty((3, self.size), dtype='float64', order='F')
             # This actually copies the data into the fortran space.
             self.psize = self.xpos.size
             fKD.pos[0, :self.psize] = self.xpos
@@ -364,7 +364,7 @@
             fKD.pos[2, self.psize:] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
+            fKD.qv = np.asfortranarray(np.empty(3, dtype='float64'))
             fKD.nn = self.num_neighbors
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
@@ -375,8 +375,8 @@
             # Now call the fortran.
             create_tree(0)
         elif self.tree == 'C':
-            self.mass = na.concatenate((self.mass, self.mass_pad))
-            self.pos = na.empty((self.size, 3), dtype='float64')
+            self.mass = np.concatenate((self.mass, self.mass_pad))
+            self.pos = np.empty((self.size, 3), dtype='float64')
             self.psize = self.xpos.size
             self.pos[:self.psize, 0] = self.xpos
             self.pos[:self.psize, 1] = self.ypos
@@ -407,7 +407,7 @@
         # Test to see if the points are in the 'real' region
         (LE, RE) = self.bounds
         if round == 'first':
-            points = na.empty((self.real_size, 3), dtype='float64')
+            points = np.empty((self.real_size, 3), dtype='float64')
             points[:,0] = self.xpos
             points[:,1] = self.ypos
             points[:,2] = self.zpos
@@ -426,21 +426,21 @@
         temp_LE = LE + self.max_padding
         temp_RE = RE - self.max_padding
         if round == 'first':
-            inner = na.invert( (points >= temp_LE).all(axis=1) * \
+            inner = np.invert( (points >= temp_LE).all(axis=1) * \
                 (points < temp_RE).all(axis=1) )
         elif round == 'second' or round == 'third':
             if self.tree == 'F':
-                inner = na.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
+                inner = np.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
                     (fKD.pos.T < temp_RE).all(axis=1) )
             elif self.tree == 'C':
-                inner = na.invert( (self.pos >= temp_LE).all(axis=1) * \
+                inner = np.invert( (self.pos >= temp_LE).all(axis=1) * \
                     (self.pos < temp_RE).all(axis=1) )
         if round == 'first':
             del points
         # After inverting the logic above, we want points that are both
         # inside the real region, but within one padding of the boundary,
         # and this will do it.
-        self.is_inside_annulus = na.bitwise_and(self.is_inside, inner)
+        self.is_inside_annulus = np.bitwise_and(self.is_inside, inner)
         del inner
         # Below we make a mapping of real particle index->local ID
         # Unf. this has to be a dict, because any task can have
@@ -449,10 +449,10 @@
         # as the full number of particles.
         # We can skip this the first two times around.
         if round == 'third':
-            temp = na.arange(self.size)
-            my_part = na.bitwise_or(na.invert(self.is_inside), self.is_inside_annulus)
-            my_part = na.bitwise_and(my_part, (self.chainID != -1))
-            catted_indices = na.concatenate(
+            temp = np.arange(self.size)
+            my_part = np.bitwise_or(np.invert(self.is_inside), self.is_inside_annulus)
+            my_part = np.bitwise_and(my_part, (self.chainID != -1))
+            catted_indices = np.concatenate(
                 (self.index, self.index_pad))[my_part]
             self.rev_index = dict.fromkeys(catted_indices)
             self.rev_index.update(itertools.izip(catted_indices, temp[my_part]))
@@ -468,11 +468,11 @@
         keeping the all of this data, just using it.
         """
         yt_counters("densestNN")
-        self.densestNN = na.empty(self.size,dtype='int64')
+        self.densestNN = np.empty(self.size,dtype='int64')
         # We find nearest neighbors in chunks.
         chunksize = 10000
         if self.tree == 'F':
-            fKD.chunk_tags = na.asfortranarray(na.empty((self.num_neighbors, chunksize), dtype='int64'))
+            fKD.chunk_tags = np.asfortranarray(np.empty((self.num_neighbors, chunksize), dtype='int64'))
             start = 1 # Fortran counting!
             finish = 0
             while finish < self.size:
@@ -486,8 +486,8 @@
                 chunk_NNtags = (fKD.chunk_tags[:,:finish-start+1] - 1).transpose()
                 # Find the densest nearest neighbors by referencing the already
                 # calculated density.
-                n_dens = na.take(self.density,chunk_NNtags)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density,chunk_NNtags)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start + 1): # +1 for fortran counting.
                     j = start + i - 1 # -1 for fortran counting.
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -502,9 +502,9 @@
                 # be as memory efficient - fragmenting?
                 chunk_NNtags = self.kdtree.find_chunk_nearest_neighbors(start, \
                     finish, num_neighbors=self.num_neighbors)
-                n_dens = na.take(self.density, chunk_NNtags)
-                max_loc = na.argmax(n_dens, axis=1)
-                max_loc = na.argmax(n_dens,axis=1)
+                n_dens = np.take(self.density, chunk_NNtags)
+                max_loc = np.argmax(n_dens, axis=1)
+                max_loc = np.argmax(n_dens,axis=1)
                 for i in xrange(finish - start):
                     j = start + i
                     self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
@@ -520,8 +520,8 @@
         """
         yt_counters("build_chains")
         chainIDmax = 0
-        self.densest_in_chain = na.ones(10000, dtype='float64') * -1 # chainID->density, one to one
-        self.densest_in_chain_real_index = na.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
+        self.densest_in_chain = np.ones(10000, dtype='float64') * -1 # chainID->density, one to one
+        self.densest_in_chain_real_index = np.ones(10000, dtype='int64') * -1 # chainID->real_index, one to one
         for i in xrange(int(self.size)):
             # If it's already in a group, move on, or if this particle is
             # in the padding, move on because chains can only terminate in
@@ -536,7 +536,7 @@
             # in the next loop.
             if chainIDnew == chainIDmax:
                 chainIDmax += 1
-        self.padded_particles = na.array(self.padded_particles, dtype='int64')
+        self.padded_particles = np.array(self.padded_particles, dtype='int64')
         self.densest_in_chain = self.__clean_up_array(self.densest_in_chain)
         self.densest_in_chain_real_index = self.__clean_up_array(self.densest_in_chain_real_index)
         yt_counters("build_chains")
@@ -598,9 +598,9 @@
         yt_counters("preconnect_chains")
         yt_counters("local chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] = na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] = np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -626,8 +626,8 @@
         elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
             for i in xrange(self.size):
@@ -685,7 +685,7 @@
         # link is to itself. At that point we've found the densest chain
         # in this set of sets and we keep a record of that.
         yt_counters("preconnect pregrouping.")
-        final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
+        final_chain_map = np.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
         for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
@@ -701,9 +701,9 @@
                 self.chainID[i] = final_chain_map[self.chainID[i]]
         del final_chain_map
         # Now make the chainID assignments consecutive.
-        map = na.empty(self.densest_in_chain.size, dtype='int64')
-        dic_new = na.empty(chain_count - removed, dtype='float64')
-        dicri_new = na.empty(chain_count - removed, dtype='int64')
+        map = np.empty(self.densest_in_chain.size, dtype='int64')
+        dic_new = np.empty(chain_count - removed, dtype='float64')
+        dicri_new = np.empty(chain_count - removed, dtype='int64')
         new = 0
         for i,dic in enumerate(self.densest_in_chain):
             if dic > 0:
@@ -763,9 +763,9 @@
         mylog.info("Sorting chains...")
         yt_counters("global chain sorting.")
         sort = self.densest_in_chain.argsort()
-        sort = na.flipud(sort)
-        map = na.empty(sort.size,dtype='int64')
-        map[sort] =na.arange(sort.size)
+        sort = np.flipud(sort)
+        map = np.empty(sort.size,dtype='int64')
+        map[sort] =np.arange(sort.size)
         self.densest_in_chain = self.densest_in_chain[sort]
         self.densest_in_chain_real_index = self.densest_in_chain_real_index[sort]
         del sort
@@ -779,14 +779,14 @@
         mylog.info("Pre-linking chains 'by hand'...")
         yt_counters("global chain hand-linking.")
         # If there are no repeats, we can skip this mess entirely.
-        uniq = na.unique(self.densest_in_chain_real_index)
+        uniq = np.unique(self.densest_in_chain_real_index)
         if uniq.size != self.densest_in_chain_real_index.size:
             # Find only the real particle indices that are repeated to reduce
             # the dict workload below.
             dicri = self.densest_in_chain_real_index[self.densest_in_chain_real_index.argsort()]
-            diff = na.ediff1d(dicri)
+            diff = np.ediff1d(dicri)
             diff = (diff == 0) # Picks out the places where the ids are equal
-            diff = na.concatenate((diff, [False])) # Makes it the same length
+            diff = np.concatenate((diff, [False])) # Makes it the same length
             # This has only the repeated IDs. Sets are faster at searches than
             # arrays.
             dicri = set(dicri[diff])
@@ -837,11 +837,11 @@
         for opp_neighbor in self.neighbors:
             opp_size = self.global_padded_count[opp_neighbor]
             to_recv_count += opp_size
-            temp_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            temp_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            temp_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            temp_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # The arrays we'll actually keep around...
-        self.recv_real_indices = na.empty(to_recv_count, dtype='int64')
-        self.recv_chainIDs = na.empty(to_recv_count, dtype='int64')
+        self.recv_real_indices = np.empty(to_recv_count, dtype='int64')
+        self.recv_chainIDs = np.empty(to_recv_count, dtype='int64')
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -899,9 +899,9 @@
         """
         yt_counters("connect_chains_across_tasks")
         # Remote (lower dens) chain -> local (higher) chain.
-        chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
+        chainID_translate_map_local = np.arange(self.nchains, dtype='int64')
         # Build the stuff to send.
-        self.uphill_real_indices = na.concatenate((
+        self.uphill_real_indices = np.concatenate((
             self.index, self.index_pad))[self.padded_particles]
         self.uphill_chainIDs = self.chainID[self.padded_particles]
         del self.padded_particles
@@ -991,7 +991,7 @@
         """
         yt_counters("communicate_annulus_chainIDs")
         # Pick the particles in the annulus.
-        real_indices = na.concatenate(
+        real_indices = np.concatenate(
             (self.index, self.index_pad))[self.is_inside_annulus]
         chainIDs = self.chainID[self.is_inside_annulus]
         # We're done with this here.
@@ -1012,8 +1012,8 @@
         recv_chainIDs = dict.fromkeys(self.neighbors)
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
-            recv_real_indices[opp_neighbor] = na.empty(opp_size, dtype='int64')
-            recv_chainIDs[opp_neighbor] = na.empty(opp_size, dtype='int64')
+            recv_real_indices[opp_neighbor] = np.empty(opp_size, dtype='int64')
+            recv_chainIDs[opp_neighbor] = np.empty(opp_size, dtype='int64')
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
@@ -1062,8 +1062,8 @@
         # Plus 2 because we're looking for that neighbor, but only keeping 
         # nMerge + 1 neighbor tags, skipping ourselves.
         if self.tree == 'F':
-            fKD.dist = na.empty(self.nMerge+2, dtype='float64')
-            fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+            fKD.dist = np.empty(self.nMerge+2, dtype='float64')
+            fKD.tags = np.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge+2
         elif self.tree == 'C':
@@ -1160,9 +1160,9 @@
                 top_keys.append(top_key)
                 bot_keys.append(bot_key)
                 vals.append(data[top_key][bot_key])
-        top_keys = na.array(top_keys, dtype='int64')
-        bot_keys = na.array(bot_keys, dtype='int64')
-        vals = na.array(vals, dtype='float64')
+        top_keys = np.array(top_keys, dtype='int64')
+        bot_keys = np.array(bot_keys, dtype='int64')
+        vals = np.array(vals, dtype='float64')
 
         data.clear()
 
@@ -1179,14 +1179,14 @@
         # We need to find out which pairs of self.top_keys, self.bot_keys are
         # both < self.peakthresh, and create arrays that will store this
         # relationship.
-        both = na.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
+        both = np.bitwise_and((self.densest_in_chain[self.top_keys] < self.peakthresh),
             (self.densest_in_chain[self.bot_keys] < self.peakthresh))
         g_high = self.top_keys[both]
         g_low = self.bot_keys[both]
         g_dens = self.vals[both]
         del both
-        self.reverse_map = na.ones(self.densest_in_chain.size) * -1
-        densestbound = na.ones(self.densest_in_chain.size) * -1.0
+        self.reverse_map = np.ones(self.densest_in_chain.size) * -1
+        densestbound = np.ones(self.densest_in_chain.size) * -1.0
         for i, gl in enumerate(g_low):
             if g_dens[i] > densestbound[gl]:
                 densestbound[gl] = g_dens[i]
@@ -1200,7 +1200,7 @@
             if self.densest_in_chain[chainID] >= self.peakthresh:
                 self.reverse_map[chainID] = groupID
                 groupID += 1
-        group_equivalancy_map = na.empty(groupID, dtype='object')
+        group_equivalancy_map = np.empty(groupID, dtype='object')
         for i in xrange(groupID):
             group_equivalancy_map[i] = set([])
         # Loop over all of the chain linkages.
@@ -1259,7 +1259,7 @@
         # Shack.'
         Set_list = []
         # We only want the holes that are modulo mine.
-        keys = na.arange(groupID, dtype='int64')
+        keys = np.arange(groupID, dtype='int64')
         size = self.comm.size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
@@ -1298,7 +1298,7 @@
         del group_equivalancy_map, final_set, keys, select, groupIDs, current_sets
         del mine_groupIDs, not_mine_groupIDs, new_set, to_add_set, liter
         # Convert this list of sets into a look-up table
-        lookup = na.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
+        lookup = np.ones(self.densest_in_chain.size, dtype='int64') * (self.densest_in_chain.size + 2)
         for i,item in enumerate(Set_list):
             item_min = min(item)
             for groupID in item:
@@ -1353,7 +1353,7 @@
             # There are no groups, probably.
             pass
         # Make a secondary map to make the IDs consecutive.
-        values = na.arange(len(temp))
+        values = np.arange(len(temp))
         secondary_map = dict(itertools.izip(temp, values))
         del values
         # Update reverse_map
@@ -1386,8 +1386,8 @@
                 self.chainID[i] = -1
         del self.is_inside
         # Create a densest_in_group, analogous to densest_in_chain.
-        keys = na.arange(group_count)
-        vals = na.zeros(group_count)
+        keys = np.arange(group_count)
+        vals = np.zeros(group_count)
         self.densest_in_group = dict(itertools.izip(keys,vals))
         self.densest_in_group_real_index = self.densest_in_group.copy()
         del keys, vals
@@ -1409,12 +1409,12 @@
         velocity, to save time in HaloFinding.py (fewer barriers!).
         """
         select = (self.chainID != -1)
-        calc = len(na.where(select == True)[0])
-        loc = na.empty((calc, 3), dtype='float64')
+        calc = len(np.where(select == True)[0])
+        loc = np.empty((calc, 3), dtype='float64')
         if self.tree == 'F':
-            loc[:, 0] = na.concatenate((self.xpos, self.xpos_pad))[select]
-            loc[:, 1] = na.concatenate((self.ypos, self.ypos_pad))[select]
-            loc[:, 2] = na.concatenate((self.zpos, self.zpos_pad))[select]
+            loc[:, 0] = np.concatenate((self.xpos, self.xpos_pad))[select]
+            loc[:, 1] = np.concatenate((self.ypos, self.ypos_pad))[select]
+            loc[:, 2] = np.concatenate((self.zpos, self.zpos_pad))[select]
             self.__max_memory()
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
         elif self.tree == 'C':
@@ -1424,15 +1424,15 @@
         # I think this will be faster than several vector operations that need
         # to pull the entire chainID array out of memory several times.
         yt_counters("max dens point")
-        max_dens_point = na.zeros((self.group_count,4),dtype='float64')
-        for i,part in enumerate(na.arange(self.size)[select]):
+        max_dens_point = np.zeros((self.group_count,4),dtype='float64')
+        for i,part in enumerate(np.arange(self.size)[select]):
             groupID = self.chainID[part]
             if part < self.real_size:
                 real_index = self.index[part]
             else:
                 real_index = self.index_pad[part - self.real_size]
             if real_index == self.densest_in_group_real_index[groupID]:
-                max_dens_point[groupID] = na.array([self.density[part], \
+                max_dens_point[groupID] = np.array([self.density[part], \
                 loc[i, 0], loc[i, 1], loc[i, 2]])
         del self.index, self.index_pad, self.densest_in_group_real_index
         # Now we broadcast this, effectively, with an allsum. Even though
@@ -1443,25 +1443,25 @@
         yt_counters("max dens point")
         # Now CoM.
         yt_counters("CoM")
-        CoM_M = na.zeros((self.group_count,3),dtype='float64')
-        Tot_M = na.zeros(self.group_count, dtype='float64')
-        #c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
+        CoM_M = np.zeros((self.group_count,3),dtype='float64')
+        Tot_M = np.zeros(self.group_count, dtype='float64')
+        #c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
         if calc:
-            c_vec = self.max_dens_point[:,1:4][subchain] - na.array([0.5,0.5,0.5])
-            size = na.bincount(self.chainID[select]).astype('int64')
+            c_vec = self.max_dens_point[:,1:4][subchain] - np.array([0.5,0.5,0.5])
+            size = np.bincount(self.chainID[select]).astype('int64')
         else:
             # This task has no particles in groups!
-            size = na.zeros(self.group_count, dtype='int64')
+            size = np.zeros(self.group_count, dtype='int64')
         # In case this task doesn't have all the groups, add trailing zeros.
         if size.size != self.group_count:
-            size = na.concatenate((size, na.zeros(self.group_count - size.size, dtype='int64')))
+            size = np.concatenate((size, np.zeros(self.group_count - size.size, dtype='int64')))
         if calc:
             cc = loc - c_vec
-            cc = cc - na.floor(cc)
-            ms = na.concatenate((self.mass, self.mass_pad))[select]
+            cc = cc - np.floor(cc)
+            ms = np.concatenate((self.mass, self.mass_pad))[select]
             # Most of the time, the masses will be all the same, and we can try
             # to save some effort.
-            ms_u = na.unique(ms)
+            ms_u = np.unique(ms)
             if ms_u.size == 1:
                 single = True
                 Tot_M = size.astype('float64') * ms_u
@@ -1475,13 +1475,13 @@
             sort = subchain.argsort()
             cc = cc[sort]
             sort_subchain = subchain[sort]
-            uniq_subchain = na.unique(sort_subchain)
-            diff_subchain = na.ediff1d(sort_subchain)
+            uniq_subchain = np.unique(sort_subchain)
+            diff_subchain = np.ediff1d(sort_subchain)
             marks = (diff_subchain > 0)
-            marks = na.arange(calc)[marks] + 1
-            marks = na.concatenate(([0], marks, [calc]))
+            marks = np.arange(calc)[marks] + 1
+            marks = np.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                CoM_M[u] = na.sum(cc[marks[i]:marks[i+1]], axis=0)
+                CoM_M[u] = np.sum(cc[marks[i]:marks[i+1]], axis=0)
             if not single:
                 for i,groupID in enumerate(subchain):
                     Tot_M[groupID] += ms[i]
@@ -1490,31 +1490,31 @@
                 # Don't divide by zero.
                 if groupID in self.I_own:
                     CoM_M[groupID] /= Tot_M[groupID]
-                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
+                    CoM_M[groupID] += self.max_dens_point[groupID,1:4] - np.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
         self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
         CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
         self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
-        self.CoM = na.empty((self.group_count,3), dtype='float64')
+        self.CoM = np.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
         yt_counters("CoM")
         self.__max_memory()
         # Now we find the maximum radius for all groups.
         yt_counters("max radius")
-        max_radius = na.zeros(self.group_count, dtype='float64')
+        max_radius = np.zeros(self.group_count, dtype='float64')
         if calc:
             com = self.CoM[subchain]
-            rad = na.fabs(com - loc)
-            dist = (na.minimum(rad, self.period - rad)**2.).sum(axis=1)
+            rad = np.fabs(com - loc)
+            dist = (np.minimum(rad, self.period - rad)**2.).sum(axis=1)
             dist = dist[sort]
             for i, u in enumerate(uniq_subchain):
-                max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
+                max_radius[u] = np.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
         self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
-        self.max_radius = na.sqrt(self.max_radius)
+        self.max_radius = np.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")
         self.__max_memory()
@@ -1558,7 +1558,7 @@
         chain_count = self._build_chains()
         # This array tracks whether or not relationships for this particle
         # need to be examined twice, in preconnect_chains and in connect_chains
-        self.search_again = na.ones(self.size, dtype='bool')
+        self.search_again = np.ones(self.size, dtype='bool')
         if self.premerge:
             chain_count = self._preconnect_chains(chain_count)
         mylog.info('Gobally assigning chainIDs...')
@@ -1625,7 +1625,7 @@
         try:
             arr[key] = value
         except IndexError:
-            arr = na.concatenate((arr, na.ones(10000, dtype=type)*-1))
+            arr = np.concatenate((arr, np.ones(10000, dtype=type)*-1))
             arr[key] = value
         return arr
     


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math, time
 
 from yt.funcs import *
@@ -186,7 +186,7 @@
         f = open(self.halo_file,'r')
         line = f.readline()
         if line == "":
-            self.haloes = na.array([])
+            self.haloes = np.array([])
             return
         while line[0] == '#':
             line = f.readline()
@@ -198,16 +198,16 @@
                 self.haloes.append(float(line[self.mass_column]))
             line = f.readline()
         f.close()
-        self.haloes = na.array(self.haloes)
+        self.haloes = np.array(self.haloes)
 
     def bin_haloes(self):
         """
         With the list of virial masses, find the halo mass function.
         """
-        bins = na.logspace(self.log_mass_min,
+        bins = np.logspace(self.log_mass_min,
             self.log_mass_max,self.num_sigma_bins)
         avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = na.histogram(self.haloes,bins)
+        dis, bins = np.histogram(self.haloes,bins)
         # add right to left
         for i,b in enumerate(dis):
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
@@ -246,13 +246,13 @@
 
         # output arrays
         # 1) log10 of mass (Msolar, NOT Msolar/h)
-        self.Rarray = na.empty(self.num_sigma_bins,dtype='float64')
+        self.Rarray = np.empty(self.num_sigma_bins,dtype='float64')
         # 2) mass (Msolar/h)
-        self.logmassarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.logmassarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 3) spatial scale corresponding to that radius (Mpc/h)
-        self.massarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.massarray = np.empty(self.num_sigma_bins, dtype='float64')
         # 4) sigma(M, z=0, where mass is in Msun/h)
-        self.sigmaarray = na.empty(self.num_sigma_bins, dtype='float64')
+        self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')
 
         # get sigma_8 normalization
         R = 8.0;  # in units of Mpc/h (comoving)
@@ -305,9 +305,9 @@
         
         # output arrays
         # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-        self.dn_M_z = na.empty(self.num_sigma_bins, dtype='float64')
+        self.dn_M_z = np.empty(self.num_sigma_bins, dtype='float64')
         # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-        self.nofmz_cum = na.zeros(self.num_sigma_bins, dtype='float64')
+        self.nofmz_cum = np.zeros(self.num_sigma_bins, dtype='float64')
         
         for j in xrange(self.num_sigma_bins - 1):
             i = (self.num_sigma_bins - 2) - j
@@ -360,7 +360,7 @@
 
         Rcom = self.R;  # this is R in comoving Mpc/h
 
-        f = k*k*self.PofK(k)*na.power( abs(self.WofK(Rcom,k)), 2.0);
+        f = k*k*self.PofK(k)*np.power( abs(self.WofK(Rcom,k)), 2.0);
 
         return f
 
@@ -369,7 +369,7 @@
         /* returns power spectrum as a function of wavenumber k */
         """
 
-        thisPofK = na.power(k, self.primordial_index) * na.power( self.TofK(k), 2.0);
+        thisPofK = np.power(k, self.primordial_index) * np.power( self.TofK(k), 2.0);
 
         return thisPofK;
 
@@ -389,7 +389,7 @@
 
         x = R*k;
 
-        thisWofK = 3.0 * ( na.sin(x) - x*na.cos(x) ) / (x*x*x);
+        thisWofK = 3.0 * ( np.sin(x) - x*np.cos(x) ) / (x*x*x);
 
         return thisWofK;
 
@@ -660,22 +660,22 @@
         self.y_freestream = 17.2*self.f_hdm*(1+0.488*math.pow(self.f_hdm,-7.0/6.0))* \
             SQR(self.num_degen_hdm*self.qq/self.f_hdm);
         temp1 = math.pow(self.growth_k0, 1.0-self.p_cb);
-        temp2 = na.power(self.growth_k0/(1+self.y_freestream),0.7);
-        self.growth_cb = na.power(1.0+temp2, self.p_cb/0.7)*temp1;
-        self.growth_cbnu = na.power(na.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
+        temp2 = np.power(self.growth_k0/(1+self.y_freestream),0.7);
+        self.growth_cb = np.power(1.0+temp2, self.p_cb/0.7)*temp1;
+        self.growth_cbnu = np.power(np.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1;
     
         # Compute the master function
         self.gamma_eff = self.omhh*(self.alpha_gamma+(1-self.alpha_gamma)/ \
             (1+SQR(SQR(kk*self.sound_horizon_fit*0.43))));
         self.qq_eff = self.qq*self.omhh/self.gamma_eff;
     
-        tf_sup_L = na.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
-        tf_sup_C = 14.4+325/(1+60.5*na.power(self.qq_eff,1.11));
+        tf_sup_L = np.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff);
+        tf_sup_C = 14.4+325/(1+60.5*np.power(self.qq_eff,1.11));
         self.tf_sup = tf_sup_L/(tf_sup_L+tf_sup_C*SQR(self.qq_eff));
     
         self.qq_nu = 3.92*self.qq*math.sqrt(self.num_degen_hdm/self.f_hdm);
         self.max_fs_correction = 1+1.2*math.pow(self.f_hdm,0.64)*math.pow(self.num_degen_hdm,0.3+0.6*self.f_hdm)/ \
-            (na.power(self.qq_nu,-1.6)+na.power(self.qq_nu,0.8));
+            (np.power(self.qq_nu,-1.6)+np.power(self.qq_nu,0.8));
         self.tf_master = self.tf_sup*self.max_fs_correction;
     
         # Now compute the CDM+HDM+baryon transfer functions
@@ -707,21 +707,21 @@
     changes by less than *error*. Hopefully someday we can do something
     better than this!
     """
-    xvals = na.logspace(0,na.log10(initial_guess), initial_guess+1)-.9
+    xvals = np.logspace(0,np.log10(initial_guess), initial_guess+1)-.9
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
-    # Trapezoid rule, but with different dxes between values, so na.trapz
+    # Trapezoid rule, but with different dxes between values, so np.trapz
     # will not work.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area0 = na.sum(areas)
+    area0 = np.sum(areas)
     # Next guess.
     next_guess = 10 * initial_guess
-    xvals = na.logspace(0,na.log10(next_guess), 2*initial_guess**2+1)-.99
+    xvals = np.logspace(0,np.log10(next_guess), 2*initial_guess**2+1)-.99
     yvals = fcn(xvals)
     xdiffs = xvals[1:] - xvals[:-1]
     # Trapezoid rule.
     areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-    area1 = na.sum(areas)
+    area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
     area_final = area1
@@ -729,12 +729,12 @@
     one_pow = 3
     while diff > error:
         next_guess *= 10
-        xvals = na.logspace(0,na.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
+        xvals = np.logspace(0,np.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow)
         yvals = fcn(xvals)
         xdiffs = xvals[1:] - xvals[:-1]
         # Trapezoid rule.
         areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0
-        area_next = na.sum(areas)
+        area_next = np.sum(areas)
         diff = area_next - area_last
         area_last = area_next
         one_pow+=1


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -41,7 +41,7 @@
 # 8. Parentage is described by a fraction of particles that pass from one to
 #    the other; we have both descendent fractions and ancestory fractions. 
 
-import numpy as na
+import numpy as np
 import h5py
 import time
 import pdb
@@ -119,7 +119,7 @@
             x,y,z = [float(f) for f in line.split(None, 3)[:-1]]
             hp.append([x,y,z])
         if hp != []:
-            self.halo_positions = na.array(hp)
+            self.halo_positions = np.array(hp)
             self.halo_kdtree = KDTree(self.halo_positions)
         else:
             self.halo_positions = None
@@ -158,7 +158,7 @@
 class HaloParticleList(object):
     def __init__(self, halo_id, position, particle_ids):
         self.halo_id = halo_id
-        self.position = na.array(position)
+        self.position = np.array(position)
         self.particle_ids = particle_ids
         self.number_of_particles = particle_ids.size
 
@@ -168,7 +168,7 @@
     def find_relative_parentage(self, child):
         # Return two values: percent this halo gave to the other, and percent
         # of the other that comes from this halo
-        overlap = na.intersect1d(self.particle_ids, child.particle_ids).size
+        overlap = np.intersect1d(self.particle_ids, child.particle_ids).size
         of_child_from_me = float(overlap)/child.particle_ids.size
         of_mine_from_me = float(overlap)/self.particle_ids.size
         return of_child_from_me, of_mine_from_me


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os, glob, time, gc, md5, sys
 import h5py
 import types
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -174,7 +171,7 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
-        self.with_halos = na.ones(len(restart_files), dtype='bool')
+        self.with_halos = np.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
         self.halo_finder_function = halo_finder_function # which halo finder to use
         self.halo_finder_threshold = halo_finder_threshold # overdensity threshold
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
-            child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            child_points = np.array(child_points)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                query = np.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 
@@ -400,7 +387,7 @@
         # The +1 is an extra element in the array that collects garbage
         # values. This is allowing us to eliminate a try/except later.
         # This extra array element will be cut off eventually.
-        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+        self.child_mass_arr = np.zeros(len(candidates)*NumNeighbors + 1,
             dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
@@ -450,9 +437,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,31 +447,38 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(np.ones(len(thisIDs),
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = np.array([], dtype='int64')
+                parent_masses = np.array([], dtype='float64')
+                parent_halos = np.array([], dtype='int32')
+            else:
+                parent_IDs = np.concatenate(parent_IDs).astype('int64')
+                parent_masses = np.concatenate(parent_masses).astype('float64')
+                parent_halos = np.concatenate(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
-        parent_send = na.ones(parent_IDs.size, dtype='bool')
-        
+        parent_send = np.ones(parent_IDs.size, dtype='bool')
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
+        child_IDs = []
+        child_masses = []
+        child_halos = []
         for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
@@ -492,20 +486,28 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(np.ones(len(thisIDs),
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = np.array([], dtype='int64')
+            child_masses = np.array([], dtype='float64')
+            child_halos = np.array([], dtype='int32')
+        else:
+            child_IDs = np.concatenate(child_IDs).astype('int64')
+            child_masses = np.concatenate(child_masses)
+            child_halos = np.concatenate(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
         
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
-        child_send = na.ones(child_IDs.size, dtype='bool')
-        del sort
+        child_send = np.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,
@@ -618,8 +620,8 @@
     def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
             parent_masses, parent_send = None, child_send = None):
         # Pick out IDs that are in both arrays.
-        parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
-        child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+        parent_in_child = np.in1d(parent_IDs, child_IDs, assume_unique = True)
+        child_in_parent = np.in1d(child_IDs, parent_IDs, assume_unique = True)
         # Pare down the arrays to just matched particle IDs.
         parent_halos_cut = parent_halos[parent_in_child]
         child_halos_cut = child_halos[child_in_parent]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -24,7 +24,7 @@
 """
 
 from copy import deepcopy
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -105,11 +105,11 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = na.log10(temp_profile[field])
+            temp_profile[field] = np.log10(temp_profile[field])
 
     virial = dict((field, 0.0) for field in fields)
 
-    if (not (na.array(overDensity) >= virial_overdensity).any()) and \
+    if (not (np.array(overDensity) >= virial_overdensity).any()) and \
             must_be_virialized:
         mylog.debug("This halo is not virialized!")
         return [False, {}]
@@ -123,7 +123,7 @@
     elif (overDensity[-1] >= virial_overdensity):
         index = -2
     else:
-        for q in (na.arange(len(overDensity),0,-1)-1):
+        for q in (np.arange(len(overDensity),0,-1)-1):
             if (overDensity[q] < virial_overdensity) and (overDensity[q-1] >= virial_overdensity):
                 index = q - 1
                 break
@@ -144,7 +144,7 @@
 
     if use_log:
         for field in virial.keys():
-            virial[field] = na.power(10, virial[field])
+            virial[field] = np.power(10, virial[field])
 
     for vfilter in virial_filters:
         if eval("%s %s %s" % (virial[vfilter[0]],vfilter[1],vfilter[2])):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import os
 import h5py
 import types
@@ -684,7 +684,7 @@
                 max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
                                                                                  lazy_reader=True)
                 max_grid = self.pf.h.grids[mg]
-                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                max_cell = np.unravel_index(maxi, max_grid.ActiveDimensions)
                 sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
                                                              max_grid['y-velocity'][max_cell],
                                                              max_grid['z-velocity'][max_cell]])
@@ -845,7 +845,7 @@
                               (self.projection_output_dir, halo['id'],
                                dataset_name, axis_labels[w])
                             if (frb[hp['field']] != 0).any():
-                                write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+                                write_image(np.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
                             else:
                                 mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
                                             (hp['field'], halo['id']))
@@ -1076,7 +1076,7 @@
                     profile[field].append(float(onLine[q]))
 
         for field in fields:
-            profile[field] = na.array(profile[field])
+            profile[field] = np.array(profile[field])
 
         profile_obj._data = profile
 
@@ -1171,7 +1171,7 @@
         for halo in self.filtered_halos:
             for halo_field in halo_fields:
                 if isinstance(halo[halo_field], types.ListType):
-                    field_data = na.array(halo[halo_field])
+                    field_data = np.array(halo[halo_field])
                     field_data.tofile(out_file, sep="\t", format=format)
                 else:
                     if halo_field == 'id':
@@ -1179,7 +1179,7 @@
                     else:
                         out_file.write("%s" % halo[halo_field])
                 out_file.write("\t")
-            field_data = na.array([halo[field] for field in fields])
+            field_data = np.array([halo[field] for field in fields])
             field_data.tofile(out_file, sep="\t", format=format)
             out_file.write("\n")
         out_file.close()
@@ -1207,7 +1207,7 @@
             value_list = []
             for halo in self.filtered_halos:
                 value_list.append(halo[halo_field])
-            value_list = na.array(value_list)
+            value_list = np.array(value_list)
             out_file.create_dataset(halo_field, data=value_list)
         out_file.close()
 
@@ -1215,7 +1215,7 @@
         fid = open(filename, "w")
         fields = [field for field in sorted(profile.keys()) if field != "UsedBins"]
         fid.write("\t".join(["#"] + fields + ["\n"]))
-        field_data = na.array([profile[field] for field in fields])
+        field_data = np.array([profile[field] for field in fields])
         for line in range(field_data.shape[1]):
             field_data[:, line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -1300,17 +1300,17 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px,
+        plot.field_data['px'] = np.concatenate([plot['px'], add_x_px, add_y_px,
                                                 add2_x_px, add2_y_px])
-        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py,
+        plot.field_data['py'] = np.concatenate([plot['py'], add_x_py, add_y_py,
                                                 add2_x_py, add2_y_py])
-        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
+        plot.field_data['pdx'] = np.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
                                                  add2_x_pdx, add2_y_pdx])
-        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
+        plot.field_data['pdy'] = np.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
                                                  add2_x_pdy, add2_y_pdy])
-        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field,
+        plot.field_data[field] = np.concatenate([plot[field], add_x_field, add_y_field,
                                                  add2_x_field, add2_y_field])
-        plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
+        plot.field_data['weight_field'] = np.concatenate([plot['weight_field'],
                                                           add_x_weight_field, add_y_weight_field,
                                                           add2_x_weight_field, add2_y_weight_field])
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
@@ -24,7 +24,7 @@
 """
 
 import h5py, os.path
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.data_containers import YTFieldData
@@ -57,7 +57,7 @@
         self.Level = level
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
-        self.start_index = na.min([grid.get_global_startindex() for grid in
+        self.start_index = np.min([grid.get_global_startindex() for grid in
                              base_pf.h.select_grids(level)], axis=0).astype('int64')
         self.dds = base_pf.h.select_grids(level)[0].dds.copy()
         dims = (self.RightEdge-self.LeftEdge)/self.dds
@@ -106,11 +106,11 @@
         self.pf = pf
         self.always_copy = always_copy
         self.min_level = min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                              pf.h.select_grids(min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.h.select_grids(min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                                    pf.h.select_grids(min_level)], axis=0).astype('float64')
         if offset is None: offset = (max_right + min_left)/2.0
         self.left_edge_offset = offset
@@ -151,7 +151,7 @@
         # Grid objects on this level...
         if grids is None: grids = self.pf.h.select_grids(level+self.min_level)
         level_node.attrs['delta'] = grids[0].dds*self.mult_factor
-        level_node.attrs['relativeRefinementFactor'] = na.array([2]*3, dtype='int32')
+        level_node.attrs['relativeRefinementFactor'] = np.array([2]*3, dtype='int32')
         level_node.attrs['numGrids'] = len(grids)
         for i,g in enumerate(grids):
             self.export_grid(afile, level_node, g, i, field)
@@ -169,8 +169,8 @@
         int_origin, lint, origin, dds = self._convert_grid(grid)
         grid_node.attrs['integerOrigin'] = int_origin
         grid_node.attrs['origin'] = origin
-        grid_node.attrs['ghostzoneFlags'] = na.zeros(6, dtype='int32')
-        grid_node.attrs['numGhostzones'] = na.zeros(3, dtype='int32')
+        grid_node.attrs['ghostzoneFlags'] = np.zeros(6, dtype='int32')
+        grid_node.attrs['numGhostzones'] = np.zeros(3, dtype='int32')
         grid_node.attrs['dims'] = grid.ActiveDimensions[::-1].astype('int32')
         if not self.always_copy and self.pf.h.data_style == 6 \
            and field in self.pf.h.field_list:
@@ -203,11 +203,11 @@
         # First we set up our translation between original and extracted
         self.data_style = data_style
         self.min_level = pf.min_level
-        self.int_offset = na.min([grid.get_global_startindex() for grid in
+        self.int_offset = np.min([grid.get_global_startindex() for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        min_left = na.min([grid.LeftEdge for grid in
+        min_left = np.min([grid.LeftEdge for grid in
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        max_right = na.max([grid.RightEdge for grid in 
+        max_right = np.max([grid.RightEdge for grid in 
                            pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
         level_dx = pf.base_pf.h.select_grids(pf.min_level)[0].dds[0]
         dims = ((max_right-min_left)/level_dx)
@@ -247,12 +247,12 @@
         # Here we need to set up the grid info, which for the Enzo hierarchy
         # is done like:
         # self.grid_dimensions.flat[:] = ei
-        # self.grid_dimensions -= na.array(si, self.float_type)
+        # self.grid_dimensions -= np.array(si, self.float_type)
         # self.grid_dimensions += 1
         # self.grid_left_edge.flat[:] = LE
         # self.grid_right_edge.flat[:] = RE
         # self.grid_particle_count.flat[:] = np
-        # self.grids = na.array(self.grids, dtype='object')
+        # self.grids = np.array(self.grids, dtype='object')
         #
         # For now, we make the presupposition that all of our grids are
         # strictly nested and we are not doing any cuts.  However, we do
@@ -285,7 +285,7 @@
 
         self.grid_left_edge = self._convert_coords(self.grid_left_edge)
         self.grid_right_edge = self._convert_coords(self.grid_right_edge)
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
 
     def _fill_grid_arrays(self, grid, i):
         # This just fills in the grid arrays for a single grid --


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -22,7 +22,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/level_sets/clump_tools.py
--- a/yt/analysis_modules/level_sets/clump_tools.py
+++ b/yt/analysis_modules/level_sets/clump_tools.py
@@ -23,8 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
-nar = na.array
+import numpy as np
+nar = np.array
 
 counter = 0
 def recursive_all_clumps(clump,list,level,parentnumber):
@@ -89,7 +89,7 @@
     yt.visualization.plot_modification.ClumpContourCallback"""
     minDensity = [c['Density'].min() for c in clump_list]
     
-    args = na.argsort(minDensity)
+    args = np.argsort(minDensity)
     list = nar(clump_list)[args]
     reverse = range(list.size-1,-1,-1)
     return list[reverse]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -24,7 +24,7 @@
 """
 
 from itertools import chain
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.data_point_utilities as data_point_utilities
@@ -63,12 +63,12 @@
     tr = []
     for k in joins.keys():
         v = joins.pop(k)
-        tr.append((k, na.array(list(v), dtype="int64")))
+        tr.append((k, np.array(list(v), dtype="int64")))
     return tr
 
 def identify_contours(data_source, field, min_val, max_val,
                           cached_fields=None):
-    cur_max_id = na.sum([g.ActiveDimensions.prod() for g in data_source._grids])
+    cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
     pbar = get_pbar("First pass", len(data_source._grids))
     grids = sorted(data_source._grids, key=lambda g: -g.Level)
     total_contours = 0
@@ -76,27 +76,27 @@
     for gi,grid in enumerate(grids):
         pbar.update(gi+1)
         cm = data_source._get_cut_mask(grid)
-        if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='bool')
+        if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
         old_field_parameters = grid.field_parameters
         grid.field_parameters = data_source.field_parameters
-        local_ind = na.where( (grid[field] > min_val)
+        local_ind = np.where( (grid[field] > min_val)
                             & (grid[field] < max_val) & cm )
         grid.field_parameters = old_field_parameters
         if local_ind[0].size == 0: continue
-        kk = na.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
-        grid["tempContours"] = na.ones(grid.ActiveDimensions, dtype='int64') * -1
+        kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
+        grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
         grid["tempContours"][local_ind] = kk[:]
         cur_max_id -= local_ind[0].size
-        xi_u,yi_u,zi_u = na.where(grid["tempContours"] > -1)
-        cor_order = na.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
+        xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
+        cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
         fd_orig = grid["tempContours"].copy()
         xi = xi_u[cor_order]
         yi = yi_u[cor_order]
         zi = zi_u[cor_order]
         while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
             pass
-        total_contours += na.unique(grid["tempContours"][grid["tempContours"] > -1]).size
-        new_contours = na.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
+        total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
+        new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
         tree += zip(new_contours, new_contours)
     tree = set(tree)
     pbar.finish()
@@ -110,10 +110,10 @@
         boundary_tree = amr_utils.construct_boundary_relationships(fd)
         tree.update(((a, b) for a, b in boundary_tree))
     pbar.finish()
-    sort_new = na.array(list(tree), dtype='int64')
+    sort_new = np.array(list(tree), dtype='int64')
     mylog.info("Coalescing %s joins", sort_new.shape[0])
     joins = coalesce_join_tree(sort_new)
-    #joins = [(i, na.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
+    #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
     pbar = get_pbar("Joining ", len(joins))
     # This process could and should be done faster
     print "Joining..."
@@ -136,9 +136,9 @@
     data_source.get_data("tempContours", in_grids=True)
     contour_ind = {}
     i = 0
-    for contour_id in na.unique(data_source["tempContours"]):
+    for contour_id in np.unique(data_source["tempContours"]):
         if contour_id == -1: continue
-        contour_ind[i] = na.where(data_source["tempContours"] == contour_id)
+        contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
         mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
         i += 1
     mylog.info("Identified %s contours between %0.5e and %0.5e",


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/radial_column_density/radial_column_density.py
--- a/yt/analysis_modules/radial_column_density/radial_column_density.py
+++ b/yt/analysis_modules/radial_column_density/radial_column_density.py
@@ -105,14 +105,14 @@
         """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.center = na.asarray(center)
+        self.center = np.asarray(center)
         self.max_radius = max_radius
         self.steps = steps
         self.base = base
         self.Nside = Nside
         self.ang_divs = ang_divs
-        self.real_ang_divs = int(na.abs(ang_divs))
-        self.phi, self.theta = na.mgrid[0.0:2*na.pi:ang_divs, 0:na.pi:ang_divs]
+        self.real_ang_divs = int(np.abs(ang_divs))
+        self.phi, self.theta = np.mgrid[0.0:2*np.pi:ang_divs, 0:np.pi:ang_divs]
         self.phi1d = self.phi[:,0]
         self.theta1d = self.theta[0,:]
         self.dphi = self.phi1d[1] - self.phi1d[0]
@@ -135,20 +135,20 @@
         # but this will work for now.
         right = self.pf.domain_right_edge - self.center
         left = self.center - self.pf.domain_left_edge
-        min_r = na.min(right)
-        min_l = na.min(left)
-        self.max_radius = na.min([self.max_radius, min_r, min_l])
+        min_r = np.min(right)
+        min_l = np.min(left)
+        self.max_radius = np.min([self.max_radius, min_r, min_l])
     
     def _make_bins(self):
         # We'll make the bins start from the smallest cell size to the
         # specified radius. Column density inside the same cell as our 
         # center is kind of ill-defined, anyway.
         if self.base == 'lin':
-            self.bins = na.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
+            self.bins = np.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
                 self.steps)
         elif self.base == 'log':
-            self.bins = na.logspace(na.log10(self.pf.h.get_smallest_dx()),
-                na.log10(self.max_radius), self.steps)
+            self.bins = np.logspace(np.log10(self.pf.h.get_smallest_dx()),
+                np.log10(self.max_radius), self.steps)
     
     def _build_surfaces(self, field):
         # This will be index by bin index.
@@ -172,17 +172,17 @@
             Values of zero are found outside the maximum radius and
             in the cell of the user-specified center point.
             This setting is useful if the field is going to be logged
-            (e.g. na.log10) where zeros are inconvenient.
+            (e.g. np.log10) where zeros are inconvenient.
             Default = None
         """
         x = data['x']
         sh = x.shape
-        ad = na.prod(sh)
+        ad = np.prod(sh)
         if type(data) == type(FieldDetector()):
-            return na.ones(sh)
+            return np.ones(sh)
         y = data['y']
         z = data['z']
-        pos = na.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
+        pos = np.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
         del x, y, z
         vals = self._interpolate_value(pos)
         del pos
@@ -199,25 +199,25 @@
         # according to the points angle.
         # 1. Find the angle from the center point to the position.
         vec = pos - self.center
-        phi = na.arctan2(vec[:, 1], vec[:, 0])
+        phi = np.arctan2(vec[:, 1], vec[:, 0])
         # Convert the convention from [-pi, pi) to [0, 2pi).
         sel = (phi < 0)
-        phi[sel] += 2 * na.pi
+        phi[sel] += 2 * np.pi
         # Find the radius.
-        r = na.sqrt(na.sum(vec * vec, axis = 1))
+        r = np.sqrt(np.sum(vec * vec, axis = 1))
         # Keep track of the points outside of self.max_radius, which we'll
         # handle separately before we return.
         outside = (r > self.max_radius)
-        theta = na.arccos(vec[:, 2] / r)
+        theta = np.arccos(vec[:, 2] / r)
         # 2. Find the bin for this position.
-        digi = na.digitize(r, self.bins)
+        digi = np.digitize(r, self.bins)
         # Find the values on the inner and outer surfaces.
-        in_val = na.zeros_like(r)
-        out_val = na.zeros_like(r)
+        in_val = np.zeros_like(r)
+        out_val = np.zeros_like(r)
         # These two will be used for interpolation.
-        in_r = na.zeros_like(r)
-        out_r = na.zeros_like(r)
-        for bin in na.unique(digi):
+        in_r = np.zeros_like(r)
+        out_r = np.zeros_like(r)
+        for bin in np.unique(digi):
             sel = (digi == bin)
             # Special case if we're outside the largest sphere.
             if bin == len(self.bins):
@@ -229,7 +229,7 @@
                 continue
             # Special case if we're inside the smallest sphere.
             elif bin == 0:
-                in_val[sel] = na.zeros_like(phi[sel])
+                in_val[sel] = np.zeros_like(phi[sel])
                 in_r[sel] = 0.
                 out_val[sel] = self._interpolate_surface_value(1,
                     phi[sel], theta[sel])
@@ -244,11 +244,11 @@
                     phi[sel], theta[sel])
                 out_r[sel] = self.bins[bin]
         # Interpolate using a linear fit in column density / r space.
-        val = na.empty_like(r)
+        val = np.empty_like(r)
         # Special case for inside smallest sphere.
         sel = (digi == 0)
         val[sel] = (1. - (out_r[sel] - r[sel]) / out_r[sel]) * out_val[sel]
-        na.invert(sel, sel) # In-place operation!
+        np.invert(sel, sel) # In-place operation!
         val[sel] = (out_val[sel] - in_val[sel]) / (out_r[sel] - in_r[sel]) * \
             (r[sel] - in_r[sel]) + in_val[sel]
         # Fix the things to zero that should be zero.
@@ -259,8 +259,8 @@
         # Given a surface bin and an angle, interpolate the value on
         # that surface to the angle.
         # 1. Find the four values closest to the angle.
-        phi_bin = na.digitize(phi, self.phi1d)
-        theta_bin = na.digitize(theta, self.theta1d)
+        phi_bin = np.digitize(phi, self.phi1d)
+        theta_bin = np.digitize(theta, self.theta1d)
         val00 = self.surfaces[bin][phi_bin - 1, theta_bin - 1]
         val01 = self.surfaces[bin][phi_bin - 1, theta_bin]
         val10 = self.surfaces[bin][phi_bin, theta_bin - 1]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -47,18 +47,18 @@
 
         self.bounds = bounds
         self.ev_bounds = ev_bounds
-        self.ev_vals = na.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
+        self.ev_vals = np.logspace(ev_bounds[0], ev_bounds[1], table.shape[-1])
         
     def _get_interpolator(self, ev_min, ev_max):
         """
         Integrates from ev_min to ev_max and returns an interpolator.
         """
-        e_is, e_ie = na.digitize([ev_min, ev_max], self.ev_vals)
-        bin_table = na.trapz(self.table[...,e_is-1:e_ie],
+        e_is, e_ie = np.digitize([ev_min, ev_max], self.ev_vals)
+        bin_table = np.trapz(self.table[...,e_is-1:e_ie],
                              2.41799e17*
             (self.ev_vals[e_is:e_ie+1]-self.ev_vals[e_is-1:e_is]),
                              axis=-1)
-        bin_table = na.log10(bin_table.clip(1e-80,bin_table.max()))
+        bin_table = np.log10(bin_table.clip(1e-80,bin_table.max()))
         return BilinearFieldInterpolator(
             bin_table, self.bounds, self.field_names[:],
             truncate=True)
@@ -73,8 +73,8 @@
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : na.log10(data["NumberDensity"]),
-                  'Temperature'   : na.log10(data["Temperature"])}
+            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+                  'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
@@ -91,8 +91,8 @@
     e_n_bins, e_min, e_max = e_spec
     T_n_bins, T_min, T_max = T_spec
     # The second one is the fast-varying one
-    rho_is, e_is = na.mgrid[0:rho_n_bins,0:e_n_bins]
-    table = na.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
+    rho_is, e_is = np.mgrid[0:rho_n_bins,0:e_n_bins]
+    table = np.zeros((rho_n_bins, T_n_bins, e_n_bins), dtype='float64')
     mylog.info("Parsing Cloudy files")
     for i,ri,ei in zip(range(rho_n_bins*e_n_bins), rho_is.ravel(), e_is.ravel()):
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 import math, itertools
 
@@ -66,8 +66,8 @@
         """
         self._pf = pf
         self._data_source = data_source
-        self.star_mass = na.array(star_mass)
-        self.star_creation_time = na.array(star_creation_time)
+        self.star_mass = np.array(star_mass)
+        self.star_creation_time = np.array(star_creation_time)
         self.volume = volume
         self.bin_count = bins
         # Check to make sure we have the right set of informations.
@@ -114,13 +114,13 @@
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = na.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
-        inds = na.digitize(ct_stars, self.time_bins) - 1
+        inds = np.digitize(ct_stars, self.time_bins) - 1
         # Sum up the stars created in each time bin.
-        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
-        for index in na.unique(inds):
+        self.mass_bins = np.zeros(self.bin_count + 1, dtype='float64')
+        for index in np.unique(inds):
             self.mass_bins[index] += sum(mass_stars[inds == index])
         # Calculate the cumulative mass sum over time by forward adding.
         self.cum_mass_bins = self.mass_bins.copy()
@@ -162,13 +162,13 @@
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])
             self.Msol_cumulative.append(self.cum_mass_bins[i])
-        self.time = na.array(self.time)
-        self.lookback_time = na.array(self.lookback_time)
-        self.redshift = na.array(self.redshift)
-        self.Msol_yr = na.array(self.Msol_yr)
-        self.Msol_yr_vol = na.array(self.Msol_yr_vol)
-        self.Msol = na.array(self.Msol)
-        self.Msol_cumulative = na.array(self.Msol_cumulative)
+        self.time = np.array(self.time)
+        self.lookback_time = np.array(self.lookback_time)
+        self.redshift = np.array(self.redshift)
+        self.Msol_yr = np.array(self.Msol_yr)
+        self.Msol_yr_vol = np.array(self.Msol_yr_vol)
+        self.Msol = np.array(self.Msol)
+        self.Msol_cumulative = np.array(self.Msol_cumulative)
     
     def write_out(self, name="StarFormationRate.out"):
         r"""Write out the star analysis to a text file *name*. The columns are in
@@ -234,10 +234,10 @@
 METAL3 = 0.2828
 METAL4 = 0.6325
 METAL5 = 1.5811
-METALS = na.array([METAL1, METAL2, METAL3, METAL4, METAL5])
+METALS = np.array([METAL1, METAL2, METAL3, METAL4, METAL5])
 
 # Translate METALS array digitize to the table dicts
-MtoD = na.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
+MtoD = np.array(["Z0001", "Z0004", "Z004", "Z008", "Z02",  "Z05"])
 
 """
 This spectrum code is based on code from Ken Nagamine, converted from C to Python.
@@ -340,7 +340,7 @@
         >>> spec.calculate_spectrum(data_source=sp, min_age = 1.e6)
         """
         # Initialize values
-        self.final_spec = na.zeros(self.wavelength.size, dtype='float64')
+        self.final_spec = np.zeros(self.wavelength.size, dtype='float64')
         self._data_source = data_source
         if iterable(star_mass):
             self.star_mass = star_mass
@@ -372,7 +372,7 @@
                 """)
                 return None
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             if star_metallicity_fraction is not None:
                 self.star_metal = star_metallicity_fraction
@@ -382,7 +382,7 @@
             self.star_creation_time = ct[ct > 0]
             self.star_mass = self._data_source["ParticleMassMsun"][ct > 0]
             if star_metallicity_constant is not None:
-                self.star_metal = na.ones(self.star_mass.size, dtype='float64') * \
+                self.star_metal = np.ones(self.star_mass.size, dtype='float64') * \
                     star_metallicity_constant
             else:
                 self.star_metal = self._data_source["metallicity_fraction"][ct > 0]
@@ -390,7 +390,7 @@
         self.star_metal /= Zsun
         # Age of star in years.
         dt = (self.time_now - self.star_creation_time * self._pf['Time']) / YEAR
-        dt = na.maximum(dt, 0.0)
+        dt = np.maximum(dt, 0.0)
         # Remove young stars
         sub = dt >= self.min_age
         if len(sub) == 0: return
@@ -398,18 +398,18 @@
         dt = dt[sub]
         self.star_creation_time = self.star_creation_time[sub]
         # Figure out which METALS bin the star goes into.
-        Mindex = na.digitize(self.star_metal, METALS)
+        Mindex = np.digitize(self.star_metal, METALS)
         # Replace the indices with strings.
         Mname = MtoD[Mindex]
         # Figure out which age bin this star goes into.
-        Aindex = na.digitize(dt, self.age)
+        Aindex = np.digitize(dt, self.age)
         # Ratios used for the interpolation.
         ratio1 = (dt - self.age[Aindex-1]) / (self.age[Aindex] - self.age[Aindex-1])
         ratio2 = (self.age[Aindex] - dt) / (self.age[Aindex] - self.age[Aindex-1])
         # Sort the stars by metallicity and then by age, which should reduce
         # memory access time by a little bit in the loop.
-        indexes = na.arange(self.star_metal.size)
-        sort = na.asarray([indexes[i] for i in na.lexsort([indexes, Aindex, Mname])])
+        indexes = np.arange(self.star_metal.size)
+        sort = np.asarray([indexes[i] for i in np.lexsort([indexes, Aindex, Mname])])
         Mname = Mname[sort]
         Aindex = Aindex[sort]
         ratio1 = ratio1[sort]
@@ -426,15 +426,15 @@
             # Get the one just before the one above.
             flux_1 = self.flux[star[0]][star[1]-1,:]
             # interpolate in log(flux), linear in time.
-            int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
+            int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux)
             # Add this flux to the total, weighted by mass.
-            self.final_spec += na.power(10., int_flux) * star[4]
+            self.final_spec += np.power(10., int_flux) * star[4]
             pbar.update(i)
         pbar.finish()    
         
         # Normalize.
-        self.total_mass = na.sum(self.star_mass)
-        self.avg_mass = na.mean(self.star_mass)
+        self.total_mass = np.sum(self.star_mass)
+        self.avg_mass = np.mean(self.star_mass)
         tot_metal = sum(self.star_metal * self.star_mass)
         self.avg_metal = math.log10(tot_metal / self.total_mass / Zsun)
 
@@ -455,25 +455,25 @@
 #             # From the flux array for this metal, and our selection, build
 #             # a new flux array just for the ages of these stars, in the 
 #             # same order as the selection of stars.
-#             this_flux = na.matrix(self.flux[metal_name][A])
+#             this_flux = np.matrix(self.flux[metal_name][A])
 #             # Make one for the last time step for each star in the same fashion
 #             # as above.
-#             this_flux_1 = na.matrix(self.flux[metal_name][A-1])
+#             this_flux_1 = np.matrix(self.flux[metal_name][A-1])
 #             # This is kind of messy, but we're going to multiply this_fluxes
 #             # by the appropriate ratios and add it together to do the 
 #             # interpolation in log(flux) and linear in time.
 #             print r1.size
-#             r1 = na.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
-#             r2 = na.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
+#             r1 = np.matrix(r1.tolist()*self.wavelength.size).reshape(self.wavelength.size,r1.size).T
+#             r2 = np.matrix(r2.tolist()*self.wavelength.size).reshape(self.wavelength.size,r2.size).T
 #             print this_flux_1.shape, r1.shape
-#             int_flux = na.multiply(na.log10(this_flux_1),r1) \
-#                 + na.multiply(na.log10(this_flux),r2)
+#             int_flux = np.multiply(np.log10(this_flux_1),r1) \
+#                 + np.multiply(np.log10(this_flux),r2)
 #             # Weight the fluxes by mass.
-#             sm = na.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
-#             int_flux = na.multiply(na.power(10., int_flux), sm)
+#             sm = np.matrix(sm.tolist()*self.wavelength.size).reshape(self.wavelength.size,sm.size).T
+#             int_flux = np.multiply(np.power(10., int_flux), sm)
 #             # Sum along the columns, converting back to an array, adding
 #             # to the full spectrum.
-#             self.final_spec += na.array(int_flux.sum(axis=0))[0,:]
+#             self.final_spec += np.array(int_flux.sum(axis=0))[0,:]
 
     
     def write_out(self, name="sum_flux.out"):
@@ -518,8 +518,8 @@
         >>> spec.write_out_SED(name = "SED.out", flux_norm = 6000.)
         """
         # find the f_nu closest to flux_norm
-        fn_wavelength = na.argmin(abs(self.wavelength - flux_norm))
-        f_nu = self.final_spec * na.power(self.wavelength, 2.) / LIGHT
+        fn_wavelength = np.argmin(abs(self.wavelength - flux_norm))
+        f_nu = self.final_spec * np.power(self.wavelength, 2.) / LIGHT
         # Normalize f_nu
         self.f_nu = f_nu / f_nu[fn_wavelength]
         # Write out.


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -32,7 +32,7 @@
     pass
 
 import time
-import numpy as na
+import numpy as np
 import numpy.linalg as linalg
 import collections
 
@@ -78,14 +78,14 @@
 
     """
 
-    fc = na.array(fc)
-    fwidth = na.array(fwidth)
+    fc = np.array(fc)
+    fwidth = np.array(fwidth)
     
     #we must round the dle,dre to the nearest root grid cells
     ile,ire,super_level,ncells_wide= \
             round_ncells_wide(pf.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide)
 
-    assert na.all((ile-ire)==(ile-ire)[0])
+    assert np.all((ile-ire)==(ile-ire)[0])
     mylog.info("rounding specified region:")
     mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth)))
     mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
@@ -153,7 +153,7 @@
         print "[%03i %03i %03i] "%tuple(dre),
         print " with %i halos"%num_halos
         dle,dre = domain
-        dle, dre = na.array(dle),na.array(dre)
+        dle, dre = np.array(dle),np.array(dre)
         fn = fni 
         fn += "%03i_%03i_%03i-"%tuple(dle)
         fn += "%03i_%03i_%03i"%tuple(dre)
@@ -178,7 +178,7 @@
     dn = pf.domain_dimensions
     for halo in halo_list:
         fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir
-        dle,dre = na.floor(fle*dn), na.ceil(fre*dn)
+        dle,dre = np.floor(fle*dn), np.ceil(fre*dn)
         dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int'))
         if (dle,dre) in domains.keys():
             domains[(dle,dre)] += halo,
@@ -211,7 +211,7 @@
     del field_data
 
     #first we cast every cell as an oct
-    #ngrids = na.max([g.id for g in pf._grids])
+    #ngrids = np.max([g.id for g in pf._grids])
     grids = {}
     levels_all = {} 
     levels_finest = {}
@@ -220,13 +220,13 @@
         levels_all[l]=0
     pbar = get_pbar("Initializing octs ",len(pf.h.grids))
     for gi,g in enumerate(pf.h.grids):
-        ff = na.array([g[f] for f in fields])
+        ff = np.array([g[f] for f in fields])
         og = amr_utils.OctreeGrid(
                 g.child_index_mask.astype('int32'),
                 ff.astype("float64"),
                 g.LeftEdge.astype("float64"),
                 g.ActiveDimensions.astype("int32"),
-                na.ones(1,dtype="float64")*g.dds[0],
+                np.ones(1,dtype="float64")*g.dds[0],
                 g.Level,
                 g.id)
         grids[g.id] = og
@@ -246,11 +246,11 @@
     #oct_list =  amr_utils.OctreeGridList(grids)
     
     #initialize arrays to be passed to the recursion algo
-    o_length = na.sum(levels_all.values())
-    r_length = na.sum(levels_all.values())
-    output   = na.zeros((o_length,len(fields)), dtype='float64')
-    refined  = na.zeros(r_length, dtype='int32')
-    levels   = na.zeros(r_length, dtype='int32')
+    o_length = np.sum(levels_all.values())
+    r_length = np.sum(levels_all.values())
+    output   = np.zeros((o_length,len(fields)), dtype='float64')
+    refined  = np.zeros(r_length, dtype='int32')
+    levels   = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -332,7 +332,7 @@
         #calculate the floating point LE of the children
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
-        subgrid_ile = na.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
+        subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
         for i, (vertex,hilbert_child) in enumerate(hilbert):
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
@@ -340,7 +340,7 @@
                 subgrid = grid #we don't actually descend if we're a superlevel
                 child_ile = cell_index + vertex*2**(-level)
             else:
-                child_ile = subgrid_ile+na.array(vertex)
+                child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
                     subgrid,hilbert_child,output,refined,levels,grids,level+1,
@@ -381,17 +381,17 @@
     col_list.append(pyfits.Column("mass_metals", format='D',
                     array=fd['MetalMass'], unit="Msun"))
     # col_list.append(pyfits.Column("mass_stars", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="Msun"))
     # col_list.append(pyfits.Column("age_m", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("age_l", format='D',
-    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    #                 array=np.zeros(size,dtype='D'),unit="yr*Msun"))
     # col_list.append(pyfits.Column("L_bol", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # col_list.append(pyfits.Column("L_lambda", format='D',
-    #                 array=na.zeros(size,dtype='D')))
+    #                 array=np.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
@@ -402,7 +402,7 @@
                     array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
-                    array=na.zeros(size, dtype='D')))
+                    array=np.zeros(size, dtype='D')))
     cols = pyfits.ColDefs(col_list)
     mg_table = pyfits.new_table(cols)
     mg_table.header.update("M_g_tot", tm)
@@ -411,7 +411,7 @@
     mg_table.name = "GRIDDATA"
 
     # Add a dummy Primary; might be a better way to do this!
-    col_list = [pyfits.Column("dummy", format="F", array=na.zeros(1, dtype='float32'))]
+    col_list = [pyfits.Column("dummy", format="F", array=np.zeros(1, dtype='float32'))]
     cols = pyfits.ColDefs(col_list)
     md_table = pyfits.new_table(cols)
     md_table.header.update("snaptime", pf.current_time*pf['years'])
@@ -437,12 +437,12 @@
 
 def round_ncells_wide(dds,fle,fre,nwide=None):
     fc = (fle+fre)/2.0
-    assert na.all(fle < fc)
-    assert na.all(fre > fc)
-    ic = na.rint(fc*dds) #nearest vertex to the center
+    assert np.all(fle < fc)
+    assert np.all(fre > fc)
+    ic = np.rint(fc*dds) #nearest vertex to the center
     ile,ire = ic.astype('int'),ic.astype('int')
     cfle,cfre = fc.copy(),fc.copy()
-    idx = na.array([0,0,0]) #just a random non-equal array
+    idx = np.array([0,0,0]) #just a random non-equal array
     width = 0.0
     if nwide is None:
         #expand until borders are included and
@@ -450,41 +450,41 @@
         idxq,out=False,True
         while not out or not idxq:
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 0.1/dds
             #quit if idxq is true:
-            idxq = idx[0]>0 and na.all(idx==idx[0])
-            out  = na.all(fle>cfle) and na.all(fre<cfre) 
+            idxq = idx[0]>0 and np.all(idx==idx[0])
+            out  = np.all(fle>cfle) and np.all(fre<cfre) 
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
         #expand until we are nwide cells span
-        while not na.all(idx==nwide):
-            assert na.any(idx<=nwide)
+        while not np.all(idx==nwide):
+            assert np.any(idx<=nwide)
             cfle,cfre = fc-width, fc+width
-            ile = na.rint(cfle*dds).astype('int')
-            ire = na.rint(cfre*dds).astype('int')
+            ile = np.rint(cfle*dds).astype('int')
+            ire = np.rint(cfre*dds).astype('int')
             idx = ire-ile
             width += 1e-2*1.0/dds
-    assert na.all(idx==nwide)
+    assert np.all(idx==nwide)
     assert idx[0]>0
-    maxlevel = -na.rint(na.log2(nwide)).astype('int')
-    assert abs(na.log2(nwide)-na.rint(na.log2(nwide)))<1e-5 #nwide should be a power of 2
+    maxlevel = -np.rint(np.log2(nwide)).astype('int')
+    assert abs(np.log2(nwide)-np.rint(np.log2(nwide)))<1e-5 #nwide should be a power of 2
     return ile,ire,maxlevel,nwide
 
 def round_nearest_edge(pf,fle,fre):
     dds = pf.domain_dimensions
-    ile = na.floor(fle*dds).astype('int')
-    ire = na.ceil(fre*dds).astype('int') 
+    ile = np.floor(fle*dds).astype('int')
+    ire = np.ceil(fre*dds).astype('int') 
     
     #this is the number of cells the super octree needs to expand to
     #must round to the nearest power of 2
-    width = na.max(ire-ile)
+    width = np.max(ire-ile)
     width = nearest_power(width)
     
-    maxlevel = -na.rint(na.log2(width)).astype('int')
+    maxlevel = -np.rint(np.log2(width)).astype('int')
     return ile,ire,maxlevel
 
 def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
@@ -497,14 +497,14 @@
         dd = pf.h.all_data()
     idx = dd["particle_type"] == star_type
     if pos is None:
-        pos = na.array([dd["particle_position_%s" % ax]
+        pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
     if vel is None:
-        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+        vel = np.array([dd["particle_velocity_%s" % ax][idx]
                         for ax in 'xyz']).transpose()
         # Velocity is cm/s, we want it to be kpc/yr
         #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
@@ -525,8 +525,8 @@
     formation_time = pf.current_time*pf['years']-age
     #create every column
     col_list = []
-    col_list.append(pyfits.Column("ID", format="J", array=na.arange(current_mass.size).astype('int32')))
-    col_list.append(pyfits.Column("parent_ID", format="J", array=na.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("ID", format="J", array=np.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("parent_ID", format="J", array=np.arange(current_mass.size).astype('int32')))
     col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
     col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
     col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
@@ -540,7 +540,7 @@
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
     #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=na.zeros(current_mass.size)))
+    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -570,7 +570,7 @@
                 / data["dynamical_time"])
         xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
                 / data["dynamical_time"])
-        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*np.exp(-xv1)))
         minitial = data["ParticleMassMsun"] / denom
         return minitial
 
@@ -698,14 +698,14 @@
     camera_positions in Sunrise.
     """
 
-    sim_center = na.array(sim_center)
+    sim_center = np.array(sim_center)
     if sim_sphere_radius is None:
         sim_sphere_radius = 10.0/pf['kpc']
     if sim_axis_short is None:
         if dd is None:
             dd = pf.h.all_data()
-        pos = na.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
+        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
+        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
         mas = dd["particle_mass"]
         pos = pos[idx]
         mas = mas[idx]
@@ -722,14 +722,14 @@
     if scene_distance is  None:
         scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
     if scene_fov is None:
-        radii = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))
+        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
         #idx= radii < sim_halo_radius*0.10
         #radii = radii[idx]
         #mass  = mas[idx] #copying mass into mas
-        si = na.argsort(radii)
+        si = np.argsort(radii)
         radii = radii[si]
         mass  = mas[si]
-        idx, = na.where(na.cumsum(mass)>mass.sum()/2.0)
+        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
         re = radii[idx[0]]
         scene_fov = 5*re
         scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
@@ -745,11 +745,11 @@
     
     #rotate the camera
     if scene_rot :
-        irotation = na.eye(3)
-    sunrise_pos = matmul(irotation,na.array(scene_position)*scene_distance) #do NOT include sim center
+        irotation = np.eye(3)
+    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
     sunrise_up  = matmul(irotation,scene_up)
     sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*na.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
+    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
 
     #change to physical kpc
     sunrise_pos *= pf['kpc']
@@ -763,11 +763,11 @@
     use this to muliply two matricies, it will think that you're
     trying to multiply by a set of vectors and all hell will break
     loose."""    
-    assert type(v) is not na.matrix
-    v = na.asarray(v)
-    m, vs = [na.asmatrix(a) for a in (m, v)]
+    assert type(v) is not np.matrix
+    v = np.asarray(v)
+    m, vs = [np.asmatrix(a) for a in (m, v)]
 
-    result = na.asarray(na.transpose(m * na.transpose(vs)))    
+    result = np.asarray(np.transpose(m * np.transpose(vs)))    
     if len(v.shape) == 1:
         return result[0]
     return result
@@ -775,14 +775,14 @@
 
 def mag(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
-        return na.sqrt( (vs**2).sum() )
-    return na.sqrt( (vs**2).sum(axis=1) )
+        return np.sqrt( (vs**2).sum() )
+    return np.sqrt( (vs**2).sum(axis=1) )
 
 def mag2(vs):
     """Compute the norms of a set of vectors or a single vector."""
-    vs = na.asarray(vs)
+    vs = np.asarray(vs)
     if len(vs.shape) == 1:
         return (vs**2).sum()
     return (vs**2).sum(axis=1)
@@ -791,25 +791,25 @@
 def position_moment(rs, ms=None, axes=None):
     """Find second position moment tensor.
     If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = na.asarray(rs)
+    rs = np.asarray(rs)
     Npart, N = rs.shape
-    if ms is None: ms = na.ones(Npart)
-    else: ms = na.asarray(ms)    
+    if ms is None: ms = np.ones(Npart)
+    else: ms = np.asarray(ms)    
     if axes is not None:
-        axes = na.asarray(axes,dtype=float64)
+        axes = np.asarray(axes,dtype=float64)
         axes = axes/axes.max()
         norms2 = mag2(rs/axes)
     else:
-        norms2 = na.ones(Npart)
+        norms2 = np.ones(Npart)
     M = ms.sum()
-    result = na.zeros((N,N))
+    result = np.zeros((N,N))
     # matrix is symmetric, so only compute half of it then fill in the
     # other half
     for i in range(N):
         for j in range(i+1):
             result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
         
-    result = result + result.transpose() - na.identity(N)*result
+    result = result + result.transpose() - np.identity(N)*result
     return result
     
 
@@ -826,7 +826,7 @@
     make the long axis line up with the x axis and the short axis line
     up with the x (z) axis for the 2 (3) dimensional case."""
     # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: na.sqrt(na.sum(x**2.0))
+    mag = lambda x: np.sqrt(np.sum(x**2.0))
     v = v/mag(v)
     w = w/mag(w)    
     if check:
@@ -843,7 +843,7 @@
     w_prime = euler_passive(w,phi,theta,0.)
     if w_prime[0] < 0: w_prime = -w_prime
     # Now last Euler angle should just be this:
-    psi = na.arctan2(w_prime[1],w_prime[0])
+    psi = np.arctan2(w_prime[1],w_prime[0])
     return phi, theta, psi
 
 def find_euler_phi_theta(v):
@@ -851,19 +851,19 @@
     direction"""
     # Make sure the vector is normalized
     v = v/mag(v)
-    theta = na.arccos(v[2])
-    phi = na.arctan2(v[0],-v[1])
+    theta = np.arccos(v[2])
+    phi = np.arctan2(v[0],-v[1])
     return phi,theta
 
 def euler_matrix(phi, the, psi):
     """Make an Euler transformation matrix"""
-    cpsi=na.cos(psi)
-    spsi=na.sin(psi)
-    cphi=na.cos(phi)
-    sphi=na.sin(phi)
-    cthe=na.cos(the)
-    sthe=na.sin(the)
-    m = na.mat(na.zeros((3,3)))
+    cpsi=np.cos(psi)
+    spsi=np.sin(psi)
+    cphi=np.cos(phi)
+    sphi=np.sin(phi)
+    cthe=np.cos(the)
+    sthe=np.sin(the)
+    m = np.mat(np.zeros((3,3)))
     m[0,0] = cpsi*cphi - cthe*sphi*spsi
     m[0,1] = cpsi*sphi + cthe*cphi*spsi
     m[0,2] = spsi*sthe
@@ -912,9 +912,9 @@
 cameraset_ring = collections.OrderedDict()
 
 segments = 20
-for angle in na.linspace(0,360,segments):
-    pos = [na.cos(angle),0.,na.sin(angle)]
-    vc  = [na.cos(90-angle),0.,na.sin(90-angle)] 
+for angle in np.linspace(0,360,segments):
+    pos = [np.cos(angle),0.,np.sin(angle)]
+    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
     cameraset_ring['02i'%angle]=(pos,vc)
             
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -144,10 +144,10 @@
             length_range[0] = math.sqrt(3) * self.pf.h.get_smallest_dx()
         # Make the list of ruler lengths.
         if length_type == "lin":
-            self.lengths = na.linspace(length_range[0], length_range[1],
+            self.lengths = np.linspace(length_range[0], length_range[1],
                 length_number)
         elif length_type == "log":
-            self.lengths = na.logspace(math.log10(length_range[0]),
+            self.lengths = np.logspace(math.log10(length_range[0]),
                 math.log10(length_range[1]), length_number)
         else:
             # Something went wrong.
@@ -177,7 +177,7 @@
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge
-        self.mt = na.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
+        self.mt = np.random.mtrand.RandomState(seed = 1234 * self.mine + salt)
     
     def add_function(self, function, out_labels, sqrt, corr_norm=None):
         r"""Add a function to the list that will be evaluated at the
@@ -265,7 +265,7 @@
                 mylog.info("Doing length %1.5e" % length)
             # Things stop when this value below equals total_values.
             self.generated_points = 0
-            self.gen_array = na.zeros(self.size, dtype='int64')
+            self.gen_array = np.zeros(self.size, dtype='int64')
             self.comm_cycle_count = 0
             self.final_comm_cycle_count = 0
             self.sent_done = False
@@ -280,7 +280,7 @@
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
-                        #(na.abs(na.log10(na.abs(self.recv_points))) > 20).any():
+                        #(np.abs(np.log10(np.abs(self.recv_points))) > 20).any():
                     raise ValueError("self.recv_points is no good!")
                 self.points = self.recv_points.copy()
                 self.fields_vals = self.recv_fields_vals.copy()
@@ -312,7 +312,7 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        fKD.pos = na.asfortranarray(na.empty((3,xp.size), dtype='float64'))
+        fKD.pos = np.asfortranarray(np.empty((3,xp.size), dtype='float64'))
         # Normalize the grid points only within the kdtree.
         fKD.pos[0, :] = xp[:] / self.period[0]
         fKD.pos[1, :] = yp[:] / self.period[1]
@@ -332,8 +332,8 @@
         xp = self.ds["x"]
         yp = self.ds["y"]
         zp = self.ds["z"]
-        self.sizes = [na.unique(xp).size, na.unique(yp).size, na.unique(zp).size]        
-        self.sort = na.lexsort([zp, yp, xp])
+        self.sizes = [np.unique(xp).size, np.unique(yp).size, np.unique(zp).size]        
+        self.sort = np.lexsort([zp, yp, xp])
         del xp, yp, zp
         self.ds.clear_data()
     
@@ -341,7 +341,7 @@
         """
         Builds an array to store the field values array.
         """
-        self.fields_vals = na.empty((self.comm_size, len(self.fields)*2), \
+        self.fields_vals = np.empty((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         # At the same time build a dict to label the columns.
         self.fields_columns = {}
@@ -353,7 +353,7 @@
         Initializes the array that contains the random points as all negatives
         to start with.
         """
-        self.points = na.ones((self.comm_size, 6), dtype='float64') * -1.0
+        self.points = np.ones((self.comm_size, 6), dtype='float64') * -1.0
     
     def _setup_done_hooks_on_root(self):
         """
@@ -364,7 +364,7 @@
         self.recv_done = {}
         for task in xrange(self.size):
             if task == self.mine: continue
-            self.recv_done[task] = na.zeros(1, dtype='int64')
+            self.recv_done[task] = np.zeros(1, dtype='int64')
             self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
@@ -376,13 +376,13 @@
         if self.sent_done: return
         if self.mine !=0:
             # I send when I *think* things should finish.
-            self.send_done = na.ones(1, dtype='int64') * \
+            self.send_done = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
             self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
-            self.recv_done[0] = na.ones(1, dtype='int64') * \
+            self.recv_done[0] = np.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
         self.sent_done = True
     
@@ -416,10 +416,10 @@
         Creates the recv buffers and calls a non-blocking MPI receive pointing
         to the left-hand neighbor.
         """
-        self.recv_points = na.ones((self.comm_size, 6), dtype='float64') * -1.
-        self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
+        self.recv_points = np.ones((self.comm_size, 6), dtype='float64') * -1.
+        self.recv_fields_vals = np.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
-        self.recv_gen_array = na.zeros(self.size, dtype='int64')
+        self.recv_gen_array = np.zeros(self.size, dtype='int64')
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
         self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
@@ -470,7 +470,7 @@
         Picks out size random pairs separated by length *length*.
         """
         # First make random points inside this subvolume.
-        r1 = na.empty((size,3), dtype='float64')
+        r1 = np.empty((size,3), dtype='float64')
         for dim in range(3):
             r1[:,dim] = self.mt.uniform(low=self.ds.left_edge[dim],
                 high=self.ds.right_edge[dim], size=size)
@@ -480,15 +480,15 @@
         # but phi and theta are switched to the Physics convention.
         if self.constant_phi is None:
             phi = self.mt.uniform(low=0, high=2.*math.pi, size=size)
-        else: phi = self.constant_phi * na.ones(size, dtype='float64')
+        else: phi = self.constant_phi * np.ones(size, dtype='float64')
         if self.constant_theta is None:
             v = self.mt.uniform(low=0., high=1, size=size)
-            theta = na.arccos(2 * v - 1)
-        else: theta = self.constant_theta * na.ones(size, dtype='float64')
-        r2 = na.empty((size,3), dtype='float64')
-        r2[:,0] = r1[:,0] + length * na.cos(phi) * na.sin(theta)
-        r2[:,1] = r1[:,1] + length * na.sin(phi) * na.sin(theta)
-        r2[:,2] = r1[:,2] + length * na.cos(theta)
+            theta = np.arccos(2 * v - 1)
+        else: theta = self.constant_theta * np.ones(size, dtype='float64')
+        r2 = np.empty((size,3), dtype='float64')
+        r2[:,0] = r1[:,0] + length * np.cos(phi) * np.sin(theta)
+        r2[:,1] = r1[:,1] + length * np.sin(phi) * np.sin(theta)
+        r2[:,2] = r1[:,2] + length * np.cos(theta)
         # Reflect so it's inside the (full) volume.
         r2 %= self.period
         return (r1, r2)
@@ -508,7 +508,7 @@
             points[:, 1] = points[:, 1] / self.period[1]
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
-            fKD.nn_tags = na.asfortranarray(na.empty((1, points.shape[0]), dtype='int64'))
+            fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
             find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
@@ -521,7 +521,7 @@
         """
         # First find the grid data index field.
         indices = self._find_nearest_cell(points)
-        results = na.empty((len(indices), len(self.fields)), dtype='float64')
+        results = np.empty((len(indices), len(self.fields)), dtype='float64')
         # Put the field values into the columns of results.
         for field in self.fields:
             col = self.fields_columns[field]
@@ -547,7 +547,7 @@
                 self.generated_points += size
                 # If size != select.sum(), we need to pad the end of new_r1/r2
                 # which is what is effectively happening below.
-                newpoints = na.ones((ssum, 6), dtype='float64') * -1.
+                newpoints = np.ones((ssum, 6), dtype='float64') * -1.
                 newpoints[:size,:3] = new_r1
                 newpoints[:size,3:] = new_r2
                 # Now we insert them into self.points.
@@ -564,9 +564,9 @@
             # or I don't need to make any new points and I'm just processing the
             # array. Start by finding the indices of the points I own.
             self.points.shape = (self.comm_size*2, 3) # Doesn't make a copy - fast!
-            select = na.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((self.points < self.ds.left_edge).any(axis=1),
                 (self.points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             mypoints = self.points[select]
             if mypoints.size > 0:
                 # Get the fields values.
@@ -583,19 +583,19 @@
             # To run the functions, what is key is that the
             # second point in the pair is ours.
             second_points = self.points[:,3:]
-            select = na.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
+            select = np.bitwise_or((second_points < self.ds.left_edge).any(axis=1),
                 (second_points >= self.ds.right_edge).any(axis=1))
-            select = na.invert(select)
+            select = np.invert(select)
             if select.any():
                 points_to_eval = self.points[select]
                 fields_to_eval = self.fields_vals[select]
                 
                 # Find the normal vector between our points.
-                vec = na.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
-                norm = na.sqrt(na.sum(na.multiply(vec,vec), axis=1))
+                vec = np.abs(points_to_eval[:,:3] - points_to_eval[:,3:])
+                norm = np.sqrt(np.sum(np.multiply(vec,vec), axis=1))
                 # I wish there was a better way to do this, but I can't find it.
                 for i, n in enumerate(norm):
-                    vec[i] = na.divide(vec[i], n)
+                    vec[i] = np.divide(vec[i], n)
                 
                 # Now evaluate the functions.
                 for fcn_set in self._fsets:
@@ -604,7 +604,7 @@
                     fcn_set._bin_results(length, fcn_results)
                 
                 # Now clear the buffers at the processed points.
-                self.points[select] = na.array([-1.]*6, dtype='float64')
+                self.points[select] = np.array([-1.]*6, dtype='float64')
                 
             else:
                 # We didn't clear any points, so we should move on with our
@@ -712,8 +712,8 @@
         self.corr_norm = corr_norm # A number used to normalize a correlation function.
         # These below are used to track how many times the function returns
         # unbinned results.
-        self.too_low = na.zeros(len(self.out_labels), dtype='int32')
-        self.too_high = na.zeros(len(self.out_labels), dtype='int32')
+        self.too_low = np.zeros(len(self.out_labels), dtype='int32')
+        self.too_high = np.zeros(len(self.out_labels), dtype='int32')
         
     def set_pdf_params(self, bin_type="lin", bin_number=1000, bin_range=None):
         r"""Set the parameters used to build the Probability Distribution Function
@@ -772,14 +772,14 @@
             bin_type, bin_number = [bin_type], [bin_number]
             bin_range = [bin_range]
         self.bin_type = bin_type
-        self.bin_number = na.array(bin_number) - 1
+        self.bin_number = np.array(bin_number) - 1
         self.dims = range(len(bin_type))
         # Create the dict that stores the arrays to store the bin hits, and
         # the arrays themselves.
         self.length_bin_hits = {}
         for length in self.tpf.lengths:
             # It's easier to index flattened, but will be unflattened later.
-            self.length_bin_hits[length] = na.zeros(self.bin_number,
+            self.length_bin_hits[length] = np.zeros(self.bin_number,
                 dtype='int64').flatten()
         # Create the bin edges for each dimension.
         # self.bins is indexed by dimension
@@ -792,10 +792,10 @@
                 raise ValueError("bin_range[1] must be larger than bin_range[0]")
             # Make the edges for this dimension.
             if bin_type[dim] == "lin":
-                self.bin_edges[dim] = na.linspace(bin_range[dim][0], bin_range[dim][1],
+                self.bin_edges[dim] = np.linspace(bin_range[dim][0], bin_range[dim][1],
                     bin_number[dim])
             elif bin_type[dim] == "log":
-                self.bin_edges[dim] = na.logspace(math.log10(bin_range[dim][0]),
+                self.bin_edges[dim] = np.logspace(math.log10(bin_range[dim][0]),
                     math.log10(bin_range[dim][1]), bin_number[dim])
             else:
                 raise SyntaxError("bin_edges is either \"lin\" or \"log\".")
@@ -822,32 +822,32 @@
         is flattened, so we need to figure out the offset for this hit by
         factoring the sizes of the other dimensions.
         """
-        hit_bin = na.zeros(results.shape[0], dtype='int64')
+        hit_bin = np.zeros(results.shape[0], dtype='int64')
         multi = 1
-        good = na.ones(results.shape[0], dtype='bool')
+        good = np.ones(results.shape[0], dtype='bool')
         for dim in range(len(self.out_labels)):
             for d1 in range(dim):
                 multi *= self.bin_edges[d1].size
             if dim == 0 and len(self.out_labels)==1:
                 try:
-                    digi = na.digitize(results, self.bin_edges[dim])
+                    digi = np.digitize(results, self.bin_edges[dim])
                 except ValueError:
                     # The user probably did something like 
                     # return a * b rather than
                     # return a[0] * b[0], which will only happen
                     # for single field functions.
-                    digi = na.digitize(results[0], self.bin_edges[dim])
+                    digi = np.digitize(results[0], self.bin_edges[dim])
             else:
-                digi = na.digitize(results[:,dim], self.bin_edges[dim])
+                digi = np.digitize(results[:,dim], self.bin_edges[dim])
             too_low = (digi == 0)
             too_high = (digi == self.bin_edges[dim].size)
             self.too_low[dim] += (too_low).sum()
             self.too_high[dim] += (too_high).sum()
-            newgood = na.bitwise_and(na.invert(too_low), na.invert(too_high))
-            good = na.bitwise_and(good, newgood)
-            hit_bin += na.multiply((digi - 1), multi)
-        digi_bins = na.arange(self.length_bin_hits[length].size+1)
-        hist, digi_bins = na.histogram(hit_bin[good], digi_bins)
+            newgood = np.bitwise_and(np.invert(too_low), np.invert(too_high))
+            good = np.bitwise_and(good, newgood)
+            hit_bin += np.multiply((digi - 1), multi)
+        digi_bins = np.arange(self.length_bin_hits[length].size+1)
+        hist, digi_bins = np.histogram(hit_bin[good], digi_bins)
         self.length_bin_hits[length] += hist
 
     def _dim_sum(self, a, dim):
@@ -855,11 +855,11 @@
         Given a multidimensional array a, this finds the sum over all the
         elements leaving the dimension dim untouched.
         """
-        dims = na.arange(len(a.shape))
-        dims = na.flipud(dims)
+        dims = np.arange(len(a.shape))
+        dims = np.flipud(dims)
         gt_dims = dims[dims > dim]
         lt_dims = dims[dims < dim]
-        iter_dims = na.concatenate((gt_dims, lt_dims))
+        iter_dims = np.concatenate((gt_dims, lt_dims))
         for this_dim in iter_dims:
             a = a.sum(axis=this_dim)
         return a
@@ -882,6 +882,6 @@
         """
         xi = {}
         for length in self.tpf.lengths:
-            xi[length] = -1 + na.sum(self.length_bin_hits[length] * \
+            xi[length] = -1 + np.sum(self.length_bin_hits[length] * \
                 self.bin_edges[0][:-1]) / self.corr_norm
         return xi


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import numpy.core.records as rec
 
 # Now define convenience functions
@@ -41,5 +41,5 @@
     """
     blanks = []
     for atype in desc['formats']:
-        blanks.append(na.zeros(elements, dtype=atype))
+        blanks.append(np.zeros(elements, dtype=atype))
     return rec.fromarrays(blanks, **desc)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -24,7 +24,7 @@
 """
 
 import glob
-import numpy as na
+import numpy as np
 import os, os.path, inspect, types
 from functools import wraps
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -29,7 +29,7 @@
 
 data_object_registry = {}
 
-import numpy as na
+import numpy as np
 import math
 import weakref
 import exceptions
@@ -74,9 +74,9 @@
         return item
     except AttributeError:
         if item:
-            return na.ones(shape, dtype='bool')
+            return np.ones(shape, dtype='bool')
         else:
-            return na.zeros(shape, dtype='bool')
+            return np.zeros(shape, dtype='bool')
 
 def restore_grid_state(func):
     """
@@ -181,13 +181,13 @@
         if field not in self.field_data.keys():
             if field == "RadiusCode":
                 center = self.field_parameters['center']
-                tempx = na.abs(self['x'] - center[0])
-                tempx = na.minimum(tempx, self.DW[0] - tempx)
-                tempy = na.abs(self['y'] - center[1])
-                tempy = na.minimum(tempy, self.DW[1] - tempy)
-                tempz = na.abs(self['z'] - center[2])
-                tempz = na.minimum(tempz, self.DW[2] - tempz)
-                tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
+                tempx = np.abs(self['x'] - center[0])
+                tempx = np.minimum(tempx, self.DW[0] - tempx)
+                tempy = np.abs(self['y'] - center[1])
+                tempy = np.minimum(tempy, self.DW[1] - tempy)
+                tempz = np.abs(self['z'] - center[2])
+                tempz = np.minimum(tempz, self.DW[2] - tempz)
+                tr = np.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
             else:
                 raise KeyError(field)
         else: tr = self.field_data[field]
@@ -235,14 +235,14 @@
             self.set_field_parameter(key, val)
 
     def __set_default_field_parameters(self):
-        self.set_field_parameter("center",na.zeros(3,dtype='float64'))
-        self.set_field_parameter("bulk_velocity",na.zeros(3,dtype='float64'))
+        self.set_field_parameter("center",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
             pass
-        elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
-            center = na.array(center)
+        elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
+            center = np.array(center)
         elif center in ("c", "center"):
             center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
@@ -250,7 +250,7 @@
         elif center.startswith("max_"):
             center = self.pf.h.find_max(center[4:])[1]
         else:
-            center = na.array(center, dtype='float64')
+            center = np.array(center, dtype='float64')
         self.center = center
         self.set_field_parameter('center', center)
 
@@ -376,7 +376,7 @@
         field_order += [field for field in fields if field not in field_order]
         fid = open(filename,"w")
         fid.write("\t".join(["#"] + field_order + ["\n"]))
-        field_data = na.array([self.field_data[field] for field in field_order])
+        field_data = np.array([self.field_data[field] for field in field_order])
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -421,11 +421,11 @@
         return grids
 
     def select_grid_indices(self, level):
-        return na.where(self.grid_levels == level)
+        return np.where(self.grid_levels == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
-            self.__grid_left_edge = na.array([g.LeftEdge for g in self._grids])
+            self.__grid_left_edge = np.array([g.LeftEdge for g in self._grids])
         return self.__grid_left_edge
 
     def __del_grid_left_edge(self):
@@ -441,7 +441,7 @@
 
     def __get_grid_right_edge(self):
         if self.__grid_right_edge == None:
-            self.__grid_right_edge = na.array([g.RightEdge for g in self._grids])
+            self.__grid_right_edge = np.array([g.RightEdge for g in self._grids])
         return self.__grid_right_edge
 
     def __del_grid_right_edge(self):
@@ -457,7 +457,7 @@
 
     def __get_grid_levels(self):
         if self.__grid_levels == None:
-            self.__grid_levels = na.array([g.Level for g in self._grids])
+            self.__grid_levels = np.array([g.Level for g in self._grids])
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +474,7 @@
 
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
-            self.__grid_dimensions = na.array([g.ActiveDimensions for g in self._grids])
+            self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
         return self.__grid_dimensions
 
     def __del_grid_dimensions(self):
@@ -516,13 +516,13 @@
             if field not in self.hierarchy.field_list and not in_grids:
                 if field not in ("dts", "t") and self._generate_field(field):
                     continue # True means we already assigned it
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
             if not self.field_data.has_key(field):
                 continue
             if self._sortkey is None:
-                self._sortkey = na.argsort(self[self.sort_by])
+                self._sortkey = np.argsort(self[self.sort_by])
             # We *always* sort the field here if we have not successfully
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
@@ -581,7 +581,7 @@
 
     def _get_list_of_grids(self):
         # This bugs me, but we will give the tie to the LeftEdge
-        y = na.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
+        y = np.where( (self.px >=  self.pf.hierarchy.grid_left_edge[:,self.px_ax])
                     & (self.px < self.pf.hierarchy.grid_right_edge[:,self.px_ax])
                     & (self.py >=  self.pf.hierarchy.grid_left_edge[:,self.py_ax])
                     & (self.py < self.pf.hierarchy.grid_right_edge[:,self.py_ax]))
@@ -604,10 +604,10 @@
         else:
             sl = self._cut_masks[grid.id]
         if not iterable(grid[field]):
-            gf = grid[field] * na.ones(grid.child_mask[sl].shape)
+            gf = grid[field] * np.ones(grid.child_mask[sl].shape)
         else:
             gf = grid[field][sl]
-        return gf[na.where(grid.child_mask[sl])]
+        return gf[np.where(grid.child_mask[sl])]
 
 class AMRRayBase(AMR1DData):
     _type_name = "ray"
@@ -646,10 +646,10 @@
         >>> print ray["Density"], ray["t"], ray["dts"]
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
-        self.start_point = na.array(start_point, dtype='float64')
-        self.end_point = na.array(end_point, dtype='float64')
+        self.start_point = np.array(start_point, dtype='float64')
+        self.end_point = np.array(end_point, dtype='float64')
         self.vec = self.end_point - self.start_point
-        #self.vec /= na.sqrt(na.dot(self.vec, self.vec))
+        #self.vec /= np.sqrt(np.dot(self.vec, self.vec))
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
         self._dts, self._ts = {}, {}
@@ -659,7 +659,7 @@
         # Get the value of the line at each LeftEdge and RightEdge
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        p = na.zeros(self.pf.h.num_grids, dtype='bool')
+        p = np.zeros(self.pf.h.num_grids, dtype='bool')
         # Check left faces first
         for i in range(3):
             i1 = (i+1) % 3
@@ -670,10 +670,10 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( na.all( LE <= self.start_point, axis=1 ) 
-                & na.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( na.all( LE <= self.end_point,   axis=1 ) 
-                & na.all( RE >= self.end_point,   axis=1 ) )
+        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+                & np.all( RE >= self.start_point, axis=1 ) )
+        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+                & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
     def _get_line_at_coord(self, v, index):
@@ -684,24 +684,24 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
         gf = grid[field]
         if not iterable(gf):
-            gf = gf * na.ones(grid.child_mask.shape)
+            gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
                        grid.dds, self.center, self.vec)
-        self._dts[grid.id] = na.abs(dts)
-        self._ts[grid.id] = na.abs(ts)
+        self._dts[grid.id] = np.abs(dts)
+        self._ts[grid.id] = np.abs(ts)
         return mask
 
 class AMRStreamlineBase(AMR1DData):
@@ -745,11 +745,11 @@
         """
         AMR1DData.__init__(self, pf, fields, **kwargs)
         self.positions = positions
-        self.dts = na.empty_like(positions[:,0])
-        self.dts[:-1] = na.sqrt(na.sum((self.positions[1:]-
+        self.dts = np.empty_like(positions[:,0])
+        self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-1]
-        self.ts = na.add.accumulate(self.dts)
+        self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
         self._dts, self._ts = {}, {}
@@ -760,14 +760,14 @@
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
         # Check left faces first
-        min_streampoint = na.min(self.positions, axis=0)
-        max_streampoint = na.max(self.positions, axis=0)
-        p = na.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
+        min_streampoint = np.min(self.positions, axis=0)
+        max_streampoint = np.max(self.positions, axis=0)
+        p = np.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
         self._grids = self.hierarchy.grids[p]
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = na.logical_and(self._get_cut_mask(grid),
+        mask = np.logical_and(self._get_cut_mask(grid),
                               grid.child_mask)
         if field == 'dts': return self._dts[grid.id][mask]
         if field == 't': return self._ts[grid.id][mask]
@@ -775,13 +775,13 @@
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = na.zeros(grid.ActiveDimensions, dtype='int')
-        dts = na.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = na.zeros(grid.ActiveDimensions, dtype='float64')
+        mask = np.zeros(grid.ActiveDimensions, dtype='int')
+        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
+        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
-        points_in_grid = na.all(self.positions > grid.LeftEdge, axis=1) & \
-                         na.all(self.positions <= grid.RightEdge, axis=1) 
-        pids = na.where(points_in_grid)[0]
+        points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
+                         np.all(self.positions <= grid.RightEdge, axis=1) 
+        pids = np.where(points_in_grid)[0]
         for i, pos in zip(pids, self.positions[points_in_grid]):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
@@ -842,8 +842,8 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = na.array([])
-            else: data = na.concatenate(data)
+            if len(data) == 0: data = np.array([])
+            else: data = np.concatenate(data)
             temp_data[field] = data
             # Now the next field can use this field
             self[field] = temp_data[field] 
@@ -891,7 +891,7 @@
 
         >>> proj = pf.h.proj(0, "Density")
         >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_100kpc.png')
         """
         if center is None:
             center = self.get_field_parameter("center")
@@ -944,11 +944,11 @@
         """
         import yt.utilities.delaunay as de
         if log_spacing:
-            zz = na.log10(self[field])
+            zz = np.log10(self[field])
         else:
             zz = self[field]
-        xi, yi = na.array( \
-                 na.mgrid[LE[0]:RE[0]:side*1j, \
+        xi, yi = np.array( \
+                 np.mgrid[LE[0]:RE[0]:side*1j, \
                           LE[1]:RE[1]:side*1j], 'float64')
         zi = de.Triangulation(self['px'],self['py']).nn_interpolator(zz)\
                  [LE[0]:RE[0]:side*1j, \
@@ -1082,7 +1082,7 @@
             points = None
             t = self.comm.par_combine_object(None, datatype="array", op="cat")
         else:
-            points = na.concatenate(points)
+            points = np.concatenate(points)
             # We have to transpose here so that _par_combine_object works
             # properly, as it and the alltoall assume the long axis is the last
             # one.
@@ -1124,27 +1124,27 @@
         nx = grid.child_mask.shape[xaxis]
         ny = grid.child_mask.shape[yaxis]
         mask = self.__cut_mask_child_mask(grid)[sl]
-        cm = na.where(mask.ravel()== 1)
-        cmI = na.indices((nx,ny))
+        cm = np.where(mask.ravel()== 1)
+        cmI = np.indices((nx,ny))
         ind = cmI[0, :].ravel()   # xind
         npoints = cm[0].shape
         # create array of "npoints" ones that will be reused later
-        points = na.ones(npoints, 'float64')
+        points = np.ones(npoints, 'float64')
         # calculate xpoints array
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
         del cmI   # no longer needed 
-        t = na.vstack( (t, points * ind[cm] * dy + \
+        t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
         del ind, cm   # no longer needed
         # calculate zpoints array
-        t = na.vstack((t, points * self.coord))
+        t = np.vstack((t, points * self.coord))
         # calculate dx array
-        t = na.vstack((t, points * dx * 0.5))
+        t = np.vstack((t, points * dx * 0.5))
         # calculate dy array
-        t = na.vstack((t, points * dy * 0.5))
+        t = np.vstack((t, points * dy * 0.5))
         # return [xpoints, ypoints, zpoints, dx, dy] as (5, npoints) array
         return t.swapaxes(0, 1)
 
@@ -1169,7 +1169,7 @@
             dv = self.hierarchy.io._read_data_slice(grid, field, self.axis, sl_ind) * conv_factor
         else:
             dv = grid[field]
-            if dv.size == 1: dv = na.ones(grid.ActiveDimensions)*dv
+            if dv.size == 1: dv = np.ones(grid.ActiveDimensions)*dv
             dv = dv[sl]
         mask = self.__cut_mask_child_mask(grid)[sl]
         dataVals = dv.ravel()[mask.ravel() == 1]
@@ -1251,11 +1251,11 @@
         # ax + by + cz + d = 0
         self.orienter = Orientation(normal, north_vector = north_vector)
         self._norm_vec = self.orienter.normal_vector
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._x_vec = self.orienter.unit_vectors[0]
         self._y_vec = self.orienter.unit_vectors[1]
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
@@ -1276,7 +1276,7 @@
         # @todo: Convert to using corners
         LE = self.pf.h.grid_left_edge
         RE = self.pf.h.grid_right_edge
-        vertices = na.array([[LE[:,0],LE[:,1],LE[:,2]],
+        vertices = np.array([[LE[:,0],LE[:,1],LE[:,2]],
                              [RE[:,0],RE[:,1],RE[:,2]],
                              [LE[:,0],LE[:,1],RE[:,2]],
                              [RE[:,0],RE[:,1],LE[:,2]],
@@ -1285,27 +1285,27 @@
                              [LE[:,0],RE[:,1],LE[:,2]],
                              [RE[:,0],LE[:,1],RE[:,2]]])
         # This gives us shape: 8, 3, n_grid
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
         self.D = D
         self._grids = self.hierarchy.grids[
-            na.where(na.logical_not(na.all(D<0,axis=0) | na.all(D>0,axis=0) )) ]
+            np.where(np.logical_not(np.all(D<0,axis=0) | np.all(D>0,axis=0) )) ]
 
     @cache_mask
     def _get_cut_mask(self, grid):
         # This is slow.  Suggestions for improvement would be great...
         ss = grid.ActiveDimensions
-        D = na.ones(ss) * self._d
+        D = np.ones(ss) * self._d
         x = grid.LeftEdge[0] + grid.dds[0] * \
-                (na.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[0], dtype='float64')+0.5)
         y = grid.LeftEdge[1] + grid.dds[1] * \
-                (na.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[1], dtype='float64')+0.5)
         z = grid.LeftEdge[2] + grid.dds[2] * \
-                (na.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
+                (np.arange(grid.ActiveDimensions[2], dtype='float64')+0.5)
         D += (x * self._norm_vec[0]).reshape(ss[0],1,1)
         D += (y * self._norm_vec[1]).reshape(1,ss[1],1)
         D += (z * self._norm_vec[2]).reshape(1,1,ss[2])
-        diag_dist = na.sqrt(na.sum(grid.dds**2.0))
-        cm = (na.abs(D) <= 0.5*diag_dist) # Boolean
+        diag_dist = np.sqrt(np.sum(grid.dds**2.0))
+        cm = (np.abs(D) <= 0.5*diag_dist) # Boolean
         return cm
 
     def _generate_coords(self):
@@ -1313,12 +1313,12 @@
         for grid in self._get_grids():
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
-        else: points = na.concatenate(points)
+        else: points = np.concatenate(points)
         t = self.comm.par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
-        self['px'] = na.dot(pos, self._x_vec)
-        self['py'] = na.dot(pos, self._y_vec)
-        self['pz'] = na.dot(pos, self._norm_vec)
+        self['px'] = np.dot(pos, self._x_vec)
+        self['py'] = np.dot(pos, self._y_vec)
+        self['pz'] = np.dot(pos, self._norm_vec)
         self['pdx'] = t[:,3] * 0.5
         self['pdy'] = t[:,3] * 0.5
         self['pdz'] = t[:,3] * 0.5
@@ -1326,14 +1326,14 @@
     def _generate_grid_coords(self, grid):
         pointI = self._get_point_indices(grid)
         coords = [grid[ax][pointI].ravel() for ax in 'xyz']
-        coords.append(na.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
-        return na.array(coords).swapaxes(0,1)
+        coords.append(np.ones(coords[0].shape, 'float64') * just_one(grid['dx']))
+        return np.array(coords).swapaxes(0,1)
 
     def _get_data_from_grid(self, grid, field):
         if not self.pf.field_info[field].particle_type:
             pointI = self._get_point_indices(grid)
             if grid[field].size == 1: # dx, dy, dz, cellvolume
-                t = grid[field] * na.ones(grid.ActiveDimensions)
+                t = grid[field] * np.ones(grid.ActiveDimensions)
                 return t[pointI].ravel()
             return grid[field][pointI].ravel()
         else:
@@ -1344,10 +1344,10 @@
 
     @cache_point_indices
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _gen_node_name(self):
         cen_name = ("%s" % (self.center,)).replace(" ","_")[1:-1]
@@ -1391,7 +1391,7 @@
         >>> L = sp.quantities["AngularMomentumVector"]()
         >>> cutting = pf.h.cutting(L, c)
         >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
             w, u = width
@@ -1435,34 +1435,34 @@
         self.width = width
         self.dims = dims
         self.dds = self.width / self.dims
-        self.bounds = na.array([0.0,1.0,0.0,1.0])
+        self.bounds = np.array([0.0,1.0,0.0,1.0])
         
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
 
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         if node_name is False:
             self._refresh_data()
@@ -1479,11 +1479,11 @@
         # within width/2 of the center.
         vertices = self.hierarchy.gridCorners
         # Shape = (8,3,n_grid)
-        D = na.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        valid_grids = na.where(na.logical_not(na.all(D<0,axis=0) |
-                                              na.all(D>0,axis=0) ))[0]
+        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
+        valid_grids = np.where(np.logical_not(np.all(D<0,axis=0) |
+                                              np.all(D>0,axis=0) ))[0]
         # Now restrict these grids to a rect. prism that bounds the slice
-        sliceCorners = na.array([ \
+        sliceCorners = np.array([ \
             self.center + 0.5*self.width * (+self._x_vec + self._y_vec),
             self.center + 0.5*self.width * (+self._x_vec - self._y_vec),
             self.center + 0.5*self.width * (-self._x_vec - self._y_vec),
@@ -1491,12 +1491,12 @@
         sliceLeftEdge = sliceCorners.min(axis=0)
         sliceRightEdge = sliceCorners.max(axis=0)
         # Check for bounding box and grid overlap
-        leftOverlap = na.less(self.hierarchy.gridLeftEdge[valid_grids],
+        leftOverlap = np.less(self.hierarchy.gridLeftEdge[valid_grids],
                               sliceRightEdge).all(axis=1)
-        rightOverlap = na.greater(self.hierarchy.gridRightEdge[valid_grids],
+        rightOverlap = np.greater(self.hierarchy.gridRightEdge[valid_grids],
                                   sliceLeftEdge).all(axis=1)
         self._grids = self.hierarchy.grids[valid_grids[
-            na.where(leftOverlap & rightOverlap)]]
+            np.where(leftOverlap & rightOverlap)]]
         self._grids = self._grids[::-1]
 
     def _generate_coords(self):
@@ -1512,7 +1512,7 @@
             pointI = self._get_point_indices(grid)
             if len(pointI) == 0: return
             vc = self._calc_vertex_centered_data(grid, field)
-            bds = na.array(zip(grid.LeftEdge,
+            bds = np.array(zip(grid.LeftEdge,
                                grid.RightEdge)).ravel()
             interp = TrilinearFieldInterpolator(vc, bds, ['x', 'y', 'z'])
             self[field][pointI] = interp( \
@@ -1538,27 +1538,27 @@
         self.width = width
         self.dds = self.width / self.dims
         self.set_field_parameter('center', center)
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
+        vecs = np.identity(3)
+        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
         ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
+        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
+        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
+        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
         self.set_field_parameter('cp_x_vec',self._x_vec)
         self.set_field_parameter('cp_y_vec',self._y_vec)
         self.set_field_parameter('cp_z_vec',self._norm_vec)
         # Calculate coordinates of each pixel
         _co = self.dds * \
-              (na.mgrid[-self.dims/2 : self.dims/2,
+              (np.mgrid[-self.dims/2 : self.dims/2,
                         -self.dims/2 : self.dims/2] + 0.5)
 
-        self._coord = self.center + na.outer(_co[0,:,:], self._x_vec) + \
-                      na.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = na.ones(self.dims*self.dims, dtype='int8')
+        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
+                      np.outer(_co[1,:,:], self._y_vec)
+        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
 
         self._refresh_data()
         return
@@ -1584,7 +1584,7 @@
                     continue # A "True" return means we did it
             if not self._vc_data.has_key(field):
                 self._vc_data[field] = {}
-            self[field] = na.zeros(_size, dtype='float64')
+            self[field] = np.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
             self[field] = self.comm.mpi_allreduce(\
@@ -1686,9 +1686,9 @@
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
         self.proj_style = style
         if style == "mip":
-            self.func = na.max
+            self.func = np.max
         elif style == "integrate":
-            self.func = na.sum # for the future
+            self.func = np.sum # for the future
         else:
             raise NotImplementedError(style)
         self.weight_field = weight_field
@@ -1743,7 +1743,7 @@
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
-        return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
+        return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
                         style = self.proj_style)
 
     def _get_dls(self, grid, fields):
@@ -1755,8 +1755,8 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        dls = na.array(dls)
-        convs = na.array(convs)
+        dls = np.array(dls)
+        convs = np.array(convs)
         if self.proj_style == "mip":
             dls[:] = 1.0
             convs[:] = 1.0
@@ -1822,14 +1822,14 @@
                 ds = gs[0].dds[0]
             else:
                 ds = 0.0
-            dxs.append(na.ones(nvals.shape[0], dtype='float64') * ds)
-        coord_data = na.concatenate(coord_data, axis=0).transpose()
-        field_data = na.concatenate(field_data, axis=0).transpose()
+            dxs.append(np.ones(nvals.shape[0], dtype='float64') * ds)
+        coord_data = np.concatenate(coord_data, axis=0).transpose()
+        field_data = np.concatenate(field_data, axis=0).transpose()
         if self._weight is None:
             dls, convs = self._get_dls(self._grids[0], fields)
             field_data *= convs[:,None]
-        weight_data = na.concatenate(weight_data, axis=0).transpose()
-        dxs = na.concatenate(dxs, axis=0).transpose()
+        weight_data = np.concatenate(weight_data, axis=0).transpose()
+        dxs = np.concatenate(dxs, axis=0).transpose()
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = dxs
@@ -1843,7 +1843,7 @@
         data['pdy'] = data['pdx'] # generalization is out the window!
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -1853,7 +1853,7 @@
     def _add_grid_to_tree(self, tree, grid, fields, zero_out, dls):
         # We build up the fields to add
         if self._weight is None or fields is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -1873,16 +1873,16 @@
         weight_proj = self.func(weight_data, axis=self.axis) * wdl
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.logical_or.reduce(used_data, self.axis)
+            used_points = np.logical_or.reduce(used_data, self.axis)
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         xind, yind = [arr[used_points].ravel()
-                      for arr in na.indices(full_proj[0].shape)]
+                      for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
-        to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
+        to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
         tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
                     to_add, weight_proj[used_points].ravel())
 
@@ -1894,8 +1894,8 @@
         if len(grids_to_initialize) == 0: return
         pbar = get_pbar('Initializing tree % 2i / % 2i' \
                           % (level, self._max_level), len(grids_to_initialize))
-        start_index = na.empty(2, dtype="int64")
-        dims = na.empty(2, dtype="int64")
+        start_index = np.empty(2, dtype="int64")
+        dims = np.empty(2, dtype="int64")
         xax = x_dict[self.axis]
         yax = y_dict[self.axis]
         for pi, grid in enumerate(grids_to_initialize):
@@ -1920,7 +1920,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2024,7 +2024,7 @@
         self._max_level = max_level
         self._weight = weight_field
         self.preload_style = preload_style
-        self.func = na.sum # for the future
+        self.func = np.sum # for the future
         self.__retval_coords = {}
         self.__retval_fields = {}
         self.__retval_coarse = {}
@@ -2083,7 +2083,7 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        return na.array(dls), na.array(convs)
+        return np.array(dls), np.array(convs)
 
     def __project_level(self, level, fields):
         grids_to_project = self.source.select_grids(level)
@@ -2112,12 +2112,12 @@
             field_data.append([pi[fine] for pi in self.__retval_fields[grid.id]])
             self.__retval_coords[grid.id] = [pi[coarse] for pi in self.__retval_coords[grid.id]]
             self.__retval_fields[grid.id] = [pi[coarse] for pi in self.__retval_fields[grid.id]]
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
         if self._weight is not None:
             field_data = field_data / coord_data[3,:].reshape((1,coord_data.shape[1]))
         else:
-            field_data *= convs[...,na.newaxis]
+            field_data *= convs[...,np.newaxis]
         mylog.info("Level %s done: %s final", \
                    level, coord_data.shape[1])
         pdx = grids_to_project[0].dds[x_dict[self.axis]] # this is our dl
@@ -2142,7 +2142,7 @@
                 args += self.__retval_coords[grid2.id] + [self.__retval_fields[grid2.id]]
                 args += self.__retval_coords[grid1.id] + [self.__retval_fields[grid1.id]]
                 args.append(1) # Refinement factor
-                args.append(na.ones(args[0].shape, dtype='int64'))
+                args.append(np.ones(args[0].shape, dtype='int64'))
                 kk = CombineGrids(*args)
                 goodI = args[-1].astype('bool')
                 self.__retval_coords[grid2.id] = \
@@ -2169,8 +2169,8 @@
                     # that this complicated rounding is because sometimes
                     # epsilon differences in dds between the grids causes this
                     # to round to up or down from the expected value.
-                    args.append(int(na.rint(grid2.dds / grid1.dds)[0]))
-                    args.append(na.ones(args[0].shape, dtype='int64'))
+                    args.append(int(np.rint(grid2.dds / grid1.dds)[0]))
+                    args.append(np.ones(args[0].shape, dtype='int64'))
                     kk = CombineGrids(*args)
                     goodI = args[-1].astype('bool')
                     self.__retval_coords[grid2.id] = \
@@ -2213,8 +2213,8 @@
                 self.__project_level(level, fields)
             coord_data.append(my_coords)
             field_data.append(my_fields)
-            pdxs.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
-            pdys.append(my_pdx * na.ones(my_coords.shape[1], dtype='float64'))
+            pdxs.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
+            pdys.append(my_pdx * np.ones(my_coords.shape[1], dtype='float64'))
             if self._check_region and False:
                 check=self.__cleanup_level(level - 1)
                 if len(check) > 0: all_data.append(check)
@@ -2225,10 +2225,10 @@
                 del self.__overlap_masks[grid.id]
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
-        coord_data = na.concatenate(coord_data, axis=1)
-        field_data = na.concatenate(field_data, axis=1)
-        pdxs = na.concatenate(pdxs, axis=1)
-        pdys = na.concatenate(pdys, axis=1)
+        coord_data = np.concatenate(coord_data, axis=1)
+        field_data = np.concatenate(field_data, axis=1)
+        pdxs = np.concatenate(pdxs, axis=1)
+        pdys = np.concatenate(pdys, axis=1)
         # We now convert to half-widths and center-points
         data = {}
         data['pdx'] = pdxs; del pdxs
@@ -2244,7 +2244,7 @@
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
         data = self.comm.par_combine_object(data, datatype='dict', op='cat')
-        field_data = na.vsplit(data.pop('fields'), len(fields))
+        field_data = np.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
             if self.serialize: self._store_fields(field, self._node_name)
@@ -2260,7 +2260,7 @@
         # in _get_data_from_grid *and* we attempt not to load weight data
         # independently of the standard field data.
         if self._weight is None:
-            weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
+            weight_data = np.ones(grid.ActiveDimensions, dtype='float64')
             if zero_out: weight_data[grid.child_indices] = 0
             masked_data = [fd.astype('float64') * weight_data
                            for fd in self._get_data_from_grid(grid, fields)]
@@ -2278,18 +2278,18 @@
         weight_proj = self.func(weight_data, axis=self.axis)
         if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
             used_data = self._get_points_in_region(grid).astype('bool')
-            used_points = na.where(na.logical_or.reduce(used_data, self.axis))
+            used_points = np.where(np.logical_or.reduce(used_data, self.axis))
         else:
-            used_data = na.array([1.0], dtype='bool')
+            used_data = np.array([1.0], dtype='bool')
             used_points = slice(None)
         if zero_out:
-            subgrid_mask = na.logical_and.reduce(
-                                na.logical_or(grid.child_mask,
+            subgrid_mask = np.logical_and.reduce(
+                                np.logical_or(grid.child_mask,
                                              ~used_data),
                                 self.axis).astype('int64')
         else:
-            subgrid_mask = na.ones(full_proj[0].shape, dtype='int64')
-        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj[0].shape)]
+            subgrid_mask = np.ones(full_proj[0].shape, dtype='int64')
+        xind, yind = [arr[used_points].ravel() for arr in np.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
@@ -2300,7 +2300,7 @@
 
     def _get_points_in_region(self, grid):
         pointI = self.source._get_point_indices(grid, use_child_mask=False)
-        point_mask = na.zeros(grid.ActiveDimensions)
+        point_mask = np.zeros(grid.ActiveDimensions)
         point_mask[pointI] = 1.0
         if self._field_cuts is not None:
             for cut in self._field_cuts:
@@ -2367,30 +2367,30 @@
         >>> print fproj["Density"]
         """
         AMR2DData.__init__(self, axis, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.dims = na.array([dims]*2)
-        self.ActiveDimensions = na.array([dims]*3, dtype='int32')
+        self.dims = np.array([dims]*2)
+        self.ActiveDimensions = np.array([dims]*3, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
-        self.global_startindex = na.rint((self.left_edge - self.pf.domain_left_edge)
+        self.global_startindex = np.rint((self.left_edge - self.pf.domain_left_edge)
                                          /self.dds).astype('int64')
         self._dls = {}
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        if na.any(self.left_edge < self.pf.domain_left_edge) or \
-           na.any(self.right_edge > self.pf.domain_right_edge):
+        if np.any(self.left_edge < self.pf.domain_left_edge) or \
+           np.any(self.right_edge > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids(
                             self.left_edge, self.right_edge)
         else:
             grids,ind = self.pf.hierarchy.get_box_grids(
                             self.left_edge, self.right_edge)
         level_ind = (self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
         self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
 
     def _generate_coords(self):
@@ -2398,9 +2398,9 @@
         yax = y_dict[self.axis]
         ci = self.left_edge + self.dds*0.5
         cf = self.left_edge + self.dds*(self.ActiveDimensions-0.5)
-        cx = na.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
-        cy = na.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
-        blank = na.ones( (self.ActiveDimensions[xax],
+        cx = np.mgrid[ci[xax]:cf[xax]:self.ActiveDimensions[xax]*1j]
+        cy = np.mgrid[ci[yax]:cf[yax]:self.ActiveDimensions[yax]*1j]
+        blank = np.ones( (self.ActiveDimensions[xax],
                           self.ActiveDimensions[yax]), dtype='float64')
         self['px'] = cx[None,:] * blank
         self['py'] = cx[:,None] * blank
@@ -2422,7 +2422,7 @@
         if len(fields_to_get) == 0: return
         temp_data = {}
         for field in fields_to_get:
-            self[field] = na.zeros(self.dims, dtype='float64')
+            self[field] = np.zeros(self.dims, dtype='float64')
         dls = self.__setup_dls(fields_to_get)
         for i,grid in enumerate(self._get_grids()):
             mylog.debug("Getting fields from %s", i)
@@ -2483,10 +2483,10 @@
             if ( (i%100) == 0):
                 mylog.info("Working on % 7i / % 7i", i, len(self._grids))
             grid.set_field_parameter("center", self.center)
-            points.append((na.ones(
+            points.append((np.ones(
                 grid.ActiveDimensions,dtype='float64')*grid['dx'])\
                     [self._get_point_indices(grid)])
-            t = na.concatenate([t,points])
+            t = np.concatenate([t,points])
             del points
         self['dx'] = t
         #self['dy'] = t
@@ -2496,8 +2496,8 @@
     @restore_grid_state
     def _generate_grid_coords(self, grid, field=None):
         pointI = self._get_point_indices(grid)
-        dx = na.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
-        tr = na.array([grid['x'][pointI].ravel(), \
+        dx = np.ones(pointI[0].shape[0], 'float64') * grid.dds[0]
+        tr = np.array([grid['x'][pointI].ravel(), \
                 grid['y'][pointI].ravel(), \
                 grid['z'][pointI].ravel(), \
                 grid["RadiusCode"][pointI].ravel(),
@@ -2533,7 +2533,7 @@
                 if self._generate_field(field):
                     continue # True means we already assigned it
             mylog.info("Getting field %s from %s", field, len(self._grids))
-            self[field] = na.concatenate(
+            self[field] = np.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
         for field in fields_to_get:
@@ -2545,21 +2545,21 @@
     def _get_data_from_grid(self, grid, field):
         if field in self.pf.field_info and self.pf.field_info[field].particle_type:
             # int64 -> float64 with the first real set of data
-            if grid.NumberOfParticles == 0: return na.array([], dtype='int64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='int64')
             pointI = self._get_particle_indices(grid)
             if self.pf.field_info[field].vector_field:
                 f = grid[field]
-                return na.array([f[i,:][pointI] for i in range(3)])
+                return np.array([f[i,:][pointI] for i in range(3)])
             if self._is_fully_enclosed(grid): return grid[field].ravel()
             return grid[field][pointI].ravel()
         if field in self.pf.field_info and self.pf.field_info[field].vector_field:
             pointI = self._get_point_indices(grid)
             f = grid[field]
-            return na.array([f[i,:][pointI] for i in range(3)])
+            return np.array([f[i,:][pointI] for i in range(3)])
         else:
             tr = grid[field]
             if tr.size == 1: # dx, dy, dz, cellvolume
-                tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
+                tr = tr * np.ones(grid.ActiveDimensions, dtype='float64')
             if len(grid.Children) == 0 and grid.OverlappingSiblings is None \
                 and self._is_fully_enclosed(grid):
                 return tr.ravel()
@@ -2579,19 +2579,19 @@
             if grid.has_key(field):
                 new_field = grid[field]
             else:
-                new_field = na.ones(grid.ActiveDimensions, dtype=dtype) * default_val
+                new_field = np.ones(grid.ActiveDimensions, dtype=dtype) * default_val
             new_field[pointI] = self[field][i:i+np]
             grid[field] = new_field
             i += np
 
     def _is_fully_enclosed(self, grid):
-        return na.all(self._get_cut_mask)
+        return np.all(self._get_cut_mask)
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.zeros(grid.ActiveDimensions, dtype='bool')
+        k = np.zeros(grid.ActiveDimensions, dtype='bool')
         k = (k | self._get_cut_mask(grid))
         if use_child_mask: k = (k & grid.child_mask)
-        return na.where(k)
+        return np.where(k)
 
     def _get_cut_particle_mask(self, grid):
         if self._is_fully_enclosed(grid):
@@ -2600,9 +2600,9 @@
         return self._get_cut_mask(fake_grid)
 
     def _get_particle_indices(self, grid):
-        k = na.zeros(grid.NumberOfParticles, dtype='bool')
+        k = np.zeros(grid.NumberOfParticles, dtype='bool')
         k = (k | self._get_cut_particle_mask(grid))
-        return na.where(k)
+        return np.where(k)
 
     def cut_region(self, field_cuts):
         """
@@ -2705,16 +2705,16 @@
                 samples.append(svals)
             verts.append(my_verts)
         pb.finish()
-        verts = na.concatenate(verts).transpose()
+        verts = np.concatenate(verts).transpose()
         verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
         verts = verts.transpose()
         if sample_values is not None:
-            samples = na.concatenate(samples)
+            samples = np.concatenate(samples)
             samples = self.comm.par_combine_object(samples, op='cat',
                                 datatype='array')
         if rescale:
-            mi = na.min(verts, axis=0)
-            ma = na.max(verts, axis=0)
+            mi = np.min(verts, axis=0)
+            ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
             f = open(filename, "w")
@@ -2818,7 +2818,7 @@
         mask = self._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(field)
         if fluxing_field is None:
-            ff = na.ones(vals.shape, dtype="float64")
+            ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
         xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
@@ -2835,10 +2835,10 @@
         them to be plotted.
         """
         if log_space:
-            cons = na.logspace(na.log10(min_val),na.log10(max_val),
+            cons = np.logspace(np.log10(min_val),np.log10(max_val),
                                num_levels+1)
         else:
-            cons = na.linspace(min_val, max_val, num_levels+1)
+            cons = np.linspace(min_val, max_val, num_levels+1)
         contours = {}
         if cache: cached_fields = defaultdict(lambda: dict())
         else: cached_fields = None
@@ -2867,7 +2867,7 @@
         """
         for grid in self._grids:
             if default_value != None:
-                grid[field] = na.ones(grid.ActiveDimensions)*default_value
+                grid[field] = np.ones(grid.ActiveDimensions)*default_value
             grid[field][self._get_point_indices(grid)] = value
 
     _particle_handler = None
@@ -2951,36 +2951,36 @@
         grid_vals, xi, yi, zi = [], [], [], []
         for grid in self._base_region._grids:
             xit,yit,zit = self._base_region._get_point_indices(grid)
-            grid_vals.append(na.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
+            grid_vals.append(np.ones(xit.shape, dtype='int') * (grid.id-grid._id_offset))
             xi.append(xit)
             yi.append(yit)
             zi.append(zit)
-        grid_vals = na.concatenate(grid_vals)[self._base_indices]
-        grid_order = na.argsort(grid_vals)
+        grid_vals = np.concatenate(grid_vals)[self._base_indices]
+        grid_order = np.argsort(grid_vals)
         # Note: grid_vals is still unordered
-        grid_ids = na.unique(grid_vals)
-        xi = na.concatenate(xi)[self._base_indices][grid_order]
-        yi = na.concatenate(yi)[self._base_indices][grid_order]
-        zi = na.concatenate(zi)[self._base_indices][grid_order]
-        bc = na.bincount(grid_vals)
+        grid_ids = np.unique(grid_vals)
+        xi = np.concatenate(xi)[self._base_indices][grid_order]
+        yi = np.concatenate(yi)[self._base_indices][grid_order]
+        zi = np.concatenate(zi)[self._base_indices][grid_order]
+        bc = np.bincount(grid_vals)
         splits = []
         for i,v in enumerate(bc):
             if v > 0: splits.append(v)
-        splits = na.add.accumulate(splits)
-        xis, yis, zis = [na.array_split(aa, splits) for aa in [xi,yi,zi]]
+        splits = np.add.accumulate(splits)
+        xis, yis, zis = [np.array_split(aa, splits) for aa in [xi,yi,zi]]
         self._indices = {}
         h = self._base_region.pf.h
         for grid_id, x, y, z in itertools.izip(grid_ids, xis, yis, zis):
             # grid_id needs no offset
             ll = h.grids[grid_id].ActiveDimensions.prod() \
-               - (na.logical_not(h.grids[grid_id].child_mask)).sum()
+               - (np.logical_not(h.grids[grid_id].child_mask)).sum()
             # This means we're completely enclosed, except for child masks
             if x.size == ll:
                 self._indices[grid_id] = None
             else:
                 # This will slow things down a bit, but conserve memory
                 self._indices[grid_id] = \
-                    na.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
+                    np.zeros(h.grids[grid_id].ActiveDimensions, dtype='bool')
                 self._indices[grid_id][(x,y,z)] = True
         self._grids = h.grids[self._indices.keys()]
 
@@ -2992,16 +2992,16 @@
         return False
 
     def _get_cut_mask(self, grid):
-        cm = na.zeros(grid.ActiveDimensions, dtype='bool')
+        cm = np.zeros(grid.ActiveDimensions, dtype='bool')
         cm[self._get_point_indices(grid, False)] = True
         return cm
 
-    __empty_array = na.array([], dtype='bool')
+    __empty_array = np.array([], dtype='bool')
     def _get_point_indices(self, grid, use_child_mask=True):
         # Yeah, if it's not true, we don't care.
         tr = self._indices.get(grid.id-grid._id_offset, self.__empty_array)
-        if tr is None: tr = na.where(grid.child_mask)
-        else: tr = na.where(tr)
+        if tr is None: tr = np.where(grid.child_mask)
+        else: tr = np.where(tr)
         return tr
 
     def __repr__(self):
@@ -3018,7 +3018,7 @@
             grid = self.pf.h.grids[g]
             if g in other._indices and g in self._indices:
                 # We now join the indices
-                ind = na.zeros(grid.ActiveDimensions, dtype='bool')
+                ind = np.zeros(grid.ActiveDimensions, dtype='bool')
                 ind[self._indices[g]] = True
                 ind[other._indices[g]] = True
                 if ind.prod() == grid.ActiveDimensions.prod(): ind = None
@@ -3056,7 +3056,7 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        point_mask = na.ones(grid.ActiveDimensions, dtype='bool')
+        point_mask = np.ones(grid.ActiveDimensions, dtype='bool')
         point_mask *= self._base_region._get_cut_mask(grid)
         for cut in self._field_cuts:
             point_mask *= eval(cut)
@@ -3076,35 +3076,35 @@
         within the cylinder will be selected.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._norm_vec = na.array(normal)/na.sqrt(na.dot(normal,normal))
+        self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
         self.set_field_parameter("normal", self._norm_vec)
         self._height = fix_length(height, self.pf)
         self._radius = fix_length(radius, self.pf)
-        self._d = -1.0 * na.dot(self._norm_vec, self.center)
+        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._refresh_data()
 
     def _get_list_of_grids(self):
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((self.pf.h.grid_corners -
+        D = np.sqrt(np.sum((self.pf.h.grid_corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
+        R = np.sqrt(D**2.0-H**2.0)
         self._grids = self.hierarchy.grids[
-            ( (na.any(na.abs(H)<self._height,axis=0))
-            & (na.any(R<self._radius,axis=0)
-            & (na.logical_not((na.all(H>0,axis=0) | (na.all(H<0, axis=0)))) )
+            ( (np.any(np.abs(H)<self._height,axis=0))
+            & (np.any(R<self._radius,axis=0)
+            & (np.logical_not((np.all(H>0,axis=0) | (np.all(H<0, axis=0)))) )
             ) ) ]
         self._grids = self.hierarchy.grids
 
     def _is_fully_enclosed(self, grid):
         corners = grid._corners.reshape((8,3,1))
-        H = na.sum(self._norm_vec.reshape((1,3,1)) * corners,
+        H = np.sum(self._norm_vec.reshape((1,3,1)) * corners,
                    axis=1) + self._d
-        D = na.sqrt(na.sum((corners -
+        D = np.sqrt(np.sum((corners -
                            self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = na.sqrt(D**2.0-H**2.0)
-        return (na.all(na.abs(H) < self._height, axis=0) \
-            and na.all(R < self._radius, axis=0))
+        R = np.sqrt(D**2.0-H**2.0)
+        return (np.all(np.abs(H) < self._height, axis=0) \
+            and np.all(R < self._radius, axis=0))
 
     @cache_mask
     def _get_cut_mask(self, grid):
@@ -3115,13 +3115,13 @@
               + grid['y'] * self._norm_vec[1] \
               + grid['z'] * self._norm_vec[2] \
               + self._d
-            d = na.sqrt(
+            d = np.sqrt(
                 (grid['x'] - self.center[0])**2.0
               + (grid['y'] - self.center[1])**2.0
               + (grid['z'] - self.center[2])**2.0
                 )
-            r = na.sqrt(d**2.0-h**2.0)
-            cm = ( (na.abs(h) <= self._height)
+            r = np.sqrt(d**2.0-h**2.0)
+            cm = ( (np.abs(h) <= self._height)
                  & (r <= self._radius))
         return cm
 
@@ -3138,8 +3138,8 @@
         describe the box.  No checks are done to ensure that the box satisfies
         a right-hand rule, but if it doesn't, behavior is undefined.
         """
-        self.origin = na.array(origin)
-        self.box_vectors = na.array(box_vectors, dtype='float64')
+        self.origin = np.array(origin)
+        self.box_vectors = np.array(box_vectors, dtype='float64')
         self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
         center = origin + 0.5*self.box_vectors.sum(axis=0)
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
@@ -3150,11 +3150,11 @@
         xv = self.box_vectors[0,:]
         yv = self.box_vectors[1,:]
         zv = self.box_vectors[2,:]
-        self._x_vec = xv / na.sqrt(na.dot(xv, xv))
-        self._y_vec = yv / na.sqrt(na.dot(yv, yv))
-        self._z_vec = zv / na.sqrt(na.dot(zv, zv))
-        self._rot_mat = na.array([self._x_vec,self._y_vec,self._z_vec])
-        self._inv_mat = na.linalg.pinv(self._rot_mat)
+        self._x_vec = xv / np.sqrt(np.dot(xv, xv))
+        self._y_vec = yv / np.sqrt(np.dot(yv, yv))
+        self._z_vec = zv / np.sqrt(np.dot(zv, zv))
+        self._rot_mat = np.array([self._x_vec,self._y_vec,self._z_vec])
+        self._inv_mat = np.linalg.pinv(self._rot_mat)
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
@@ -3172,7 +3172,7 @@
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
             
 
@@ -3185,7 +3185,7 @@
     def _get_cut_mask(self, grid):
         if self._is_fully_enclosed(grid):
             return True
-        pm = na.zeros(grid.ActiveDimensions, dtype='int32')
+        pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
                               self._rot_mat, grid.LeftEdge, 
                               grid.RightEdge, grid.dds, pm, 0)
@@ -3228,7 +3228,7 @@
                                                            self.right_edge)
 
     def _is_fully_enclosed(self, grid):
-        return na.all( (grid._corners <= self.right_edge)
+        return np.all( (grid._corners <= self.right_edge)
                      & (grid._corners >= self.left_edge))
 
     @cache_mask
@@ -3282,10 +3282,10 @@
 
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self.left_edge = na.array(left_edge)
-        self.right_edge = na.array(right_edge)
+        self.left_edge = np.array(left_edge)
+        self.right_edge = np.array(right_edge)
         self._refresh_data()
-        self.offsets = (na.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
+        self.offsets = (np.mgrid[-1:1:3j,-1:1:3j,-1:1:3j] * \
                         (self.pf.domain_right_edge -
                          self.pf.domain_left_edge)[:,None,None,None])\
                        .transpose().reshape(27,3) # cached and in order
@@ -3300,7 +3300,7 @@
                            self.left_edge[1]+off_y,self.left_edge[2]+off_z]
             region_right = [self.right_edge[0]+off_x,
                             self.right_edge[1]+off_y,self.right_edge[2]+off_z]
-            if (na.all((grid._corners <= region_right) &
+            if (np.all((grid._corners <= region_right) &
                        (grid._corners >= region_left))):
                 return True
         return False
@@ -3310,7 +3310,7 @@
         if self._is_fully_enclosed(grid):
             return True
         else:
-            cm = na.zeros(grid.ActiveDimensions,dtype='bool')
+            cm = np.zeros(grid.ActiveDimensions,dtype='bool')
             dxp, dyp, dzp = self._dx_pad * grid.dds
             for off_x, off_y, off_z in self.offsets:
                 cm = cm | ( (grid['x'] - dxp + off_x < self.right_edge[0])
@@ -3350,7 +3350,7 @@
         Child cells are not returned.
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
-        self._grids = na.array(grid_list)
+        self._grids = np.array(grid_list)
         self.grid_list = self._grids
 
     def _get_list_of_grids(self):
@@ -3361,13 +3361,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 class AMRMaxLevelCollection(AMR3DData):
@@ -3394,13 +3394,13 @@
 
     @cache_mask
     def _get_cut_mask(self, grid):
-        return na.ones(grid.ActiveDimensions, dtype='bool')
+        return np.ones(grid.ActiveDimensions, dtype='bool')
 
     def _get_point_indices(self, grid, use_child_mask=True):
-        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        k = np.ones(grid.ActiveDimensions, dtype='bool')
         if use_child_mask and grid.Level < self.max_level:
             k[grid.child_indices] = False
-        pointI = na.where(k == True)
+        pointI = np.where(k == True)
         return pointI
 
 
@@ -3441,14 +3441,14 @@
         # Now we sort by level
         grids = grids.tolist()
         grids.sort(key=lambda x: (x.Level, x.LeftEdge[0], x.LeftEdge[1], x.LeftEdge[2]))
-        self._grids = na.empty(len(grids), dtype='object')
+        self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = na.abs(grid._corners - self.center)
-        r = na.minimum(r, na.abs(self.DW[None,:]-r))
-        corner_radius = na.sqrt((r**2.0).sum(axis=1))
-        return na.all(corner_radius <= self.radius)
+        r = np.abs(grid._corners - self.center)
+        r = np.minimum(r, np.abs(self.DW[None,:]-r))
+        corner_radius = np.sqrt((r**2.0).sum(axis=1))
+        return np.all(corner_radius <= self.radius)
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3477,7 +3477,7 @@
         can define a ellipsoid of any proportion.  Only cells whose centers are
         within the ellipsoid will be selected.
         """
-        AMR3DData.__init__(self, na.array(center), fields, pf, **kwargs)
+        AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
@@ -3488,12 +3488,12 @@
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
-        t1 = na.arctan(e0[1] / e0[0])
+        t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
         RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
         r1 = (e0 * RZ).sum(axis = 1)
         # find the t2 angle needed to rotate about y axis to align e0 to x
-        t2 = na.arctan(-r1[2] / r1[0])
+        t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
         given the tilt about the x axis when e0 was aligned 
@@ -3505,7 +3505,7 @@
         e1 = ((0, 1, 0) * RX).sum(axis = 1)
         e1 = (e1 * RY).sum(axis = 1)
         e1 = (e1 * RZ).sum(axis = 1)
-        e2 = na.cross(e0, e1)
+        e2 = np.cross(e0, e1)
 
         self._e1 = e1
         self._e2 = e2
@@ -3535,7 +3535,7 @@
                                   x.LeftEdge[0], \
                                   x.LeftEdge[1], \
                                   x.LeftEdge[2]))
-        self._grids = na.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype = 'object')
 
     def _is_fully_enclosed(self, grid):
         """
@@ -3545,18 +3545,18 @@
         vr = (grid._corners - self.center)
         # 3 possible cases of locations taking periodic BC into account
         # just listing the components, find smallest later
-        dotarr=na.array([vr, vr + self.DW, vr - self.DW])
+        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
         # these vrdote# finds the product of vr components with e#
         # square the results
         # find the smallest
         # sums it
-        vrdote0_2 = (na.multiply(dotarr, self._e0)**2).min(axis \
+        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
+        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
+        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        return na.all(vrdote0_2 / self._A**2 + \
+        return np.all(vrdote0_2 / self._A**2 + \
                       vrdote1_2 / self._B**2 + \
                       vrdote2_2 / self._C**2 <=1.0)
 
@@ -3572,21 +3572,21 @@
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = na.zeros(grid["x"].shape, dtype = 'float64')
+        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
         dim = grid["x"].shape
         # need this to take into account non-cube root grid tiles
-        dot_evec = na.zeros([3, dim[0], dim[1], dim[2]])
+        dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
         for i, ax in enumerate('xyz'):
             # distance to center
             ar  = grid[ax]-self.center[i]
             # cases to take into account periodic BC
-            case = na.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
             # find which of the 3 cases is smallest in magnitude
-            index = na.abs(case).argmin(axis = 0)
+            index = np.abs(case).argmin(axis = 0)
             # restrict distance to only the smallest cases
-            vec = na.choose(index, case)
+            vec = np.choose(index, case)
             # sum up to get the dot product with e_vectors
-            dot_evec += na.array([vec * self._e0[i], \
+            dot_evec += np.array([vec * self._e0[i], \
                                   vec * self._e1[i], \
                                   vec * self._e2[i]])
         # Calculate the eqn of ellipsoid, if it is inside
@@ -3627,22 +3627,22 @@
         """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
-        self.left_edge = na.array(left_edge)
+        self.left_edge = np.array(left_edge)
         self.level = level
         self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = na.array(dims,dtype='int32')
+        self.ActiveDimensions = np.array(dims,dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar
-        self.global_startindex = na.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint((self.left_edge-self.pf.domain_left_edge)/self.dds).astype('int64')
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._refresh_data()
 
     def _get_list_of_grids(self, buffer = 0.0):
         if self._grids is not None: return
-        if na.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
-           na.any(self.right_edge + buffer > self.pf.domain_right_edge):
+        if np.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
+           np.any(self.right_edge + buffer > self.pf.domain_right_edge):
             grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
                             self.right_edge + buffer, self.level)
@@ -3650,14 +3650,14 @@
             grids,ind = self.pf.hierarchy.get_box_grids_below_level(
                 self.left_edge - buffer,
                 self.right_edge + buffer, self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        sort_ind = np.argsort(self.pf.h.grid_levels.ravel()[ind])
         self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
     def _refresh_data(self):
         AMR3DData._refresh_data(self)
-        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
+        self['dx'] = self.dds[0] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dy'] = self.dds[1] * np.ones(self.ActiveDimensions, dtype='float64')
+        self['dz'] = self.dds[2] * np.ones(self.ActiveDimensions, dtype='float64')
 
     def get_data(self, fields=None):
         if self._grids is None:
@@ -3677,7 +3677,7 @@
                 except NeedsOriginalGrid, ngt_exception:
                     pass
             obtain_fields.append(field)
-            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
+            self[field] = np.zeros(self.ActiveDimensions, dtype='float64') -999
         if len(obtain_fields) == 0: return
         mylog.debug("Getting fields %s from %s possible grids",
                    obtain_fields, len(self._grids))
@@ -3689,9 +3689,9 @@
             count -= self._get_data_from_grid(grid, obtain_fields)
             if count <= 0: break
         if self._use_pbar: pbar.finish()
-        if count > 0 or na.any(self[obtain_fields[0]] == -999):
+        if count > 0 or np.any(self[obtain_fields[0]] == -999):
             # and self.dx < self.hierarchy.grids[0].dx:
-            n_bad = na.where(self[obtain_fields[0]]==-999)[0].size
+            n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
             
@@ -3737,7 +3737,7 @@
         g_fields = []
         for field in fields:
             if not grid.has_key(field): grid[field] = \
-               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
+               np.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
             g_fields.append(grid[field])
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
@@ -3832,7 +3832,7 @@
         if self.level > 0:
             for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]
-                if na.any(self[field] == -999):
+                if np.any(self[field] == -999):
                     # and self.dx < self.hierarchy.grids[0].dx:
                     n_bad = (self[field]==-999).sum()
                     mylog.error("Covering problem: %s cells are uncovered", n_bad)
@@ -3846,35 +3846,35 @@
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
-        self.domain_width = na.rint((self.pf.domain_right_edge -
+        self.global_startindex = np.rint(LL / dx).astype('int64') - 1
+        self.domain_width = np.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
-            self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
-            idims = na.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
+            self.global_startindex = np.array(np.floor(LL/ dx), dtype='int64')
+            idims = np.rint((self.ActiveDimensions*self.dds)/dx).astype('int64')
             fields = ensure_list(fields)
             for field in fields:
-                self.field_data[field] = na.zeros(idims,dtype='float64')-999
+                self.field_data[field] = np.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
-        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
+        dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
+        output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
 
         for field in fields:
-            output_field = na.zeros(output_dims, dtype="float64")
+            output_field = np.zeros(output_dims, dtype="float64")
             output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
@@ -3944,7 +3944,7 @@
             self._all_regions.append(item)
             # So cut_masks don't get messed up.
             item._boolean_touched = True
-        self._all_regions = na.unique(self._all_regions)
+        self._all_regions = np.unique(self._all_regions)
     
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
@@ -3969,7 +3969,7 @@
                 # The whole grid is in the hybrid region if a) its cut_mask
                 # in the original region is identical to the new one and b)
                 # the original region cut_mask is all ones.
-                if (local == na.bitwise_and(overall, local)).all() and \
+                if (local == np.bitwise_and(overall, local)).all() and \
                         (local == True).all():
                     self._all_overlap.append(grid)
                     continue
@@ -3997,7 +3997,7 @@
         return (grid in self._all_overlap)
 
     def _get_list_of_grids(self):
-        self._grids = na.array(self._some_overlap + self._all_overlap,
+        self._grids = np.array(self._some_overlap + self._all_overlap,
             dtype='object')
 
     def _get_cut_mask(self, grid, field=None):
@@ -4054,13 +4054,13 @@
             if i == 0: continue
             if item == "AND":
                 # So, the next item in level_masks we want to AND.
-                na.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
             if item == "NOT":
                 # It's convenient to remember that NOT == AND NOT
-                na.bitwise_and(this_cut_mask, na.invert(level_masks[i+1]),
+                np.bitwise_and(this_cut_mask, np.invert(level_masks[i+1]),
                     this_cut_mask)
             if item == "OR":
-                na.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
+                np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
         if not isinstance(grid, FakeGridForParticles):
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -100,7 +100,7 @@
             if not iterable(rv): rv = (rv,)
             for i in range(self.n_ret): self.retvals[i].append(rv[i])
             g.clear_data()
-        self.retvals = [na.array(self.retvals[i]) for i in range(self.n_ret)]
+        self.retvals = [np.array(self.retvals[i]) for i in range(self.n_ret)]
         return self.c_func(self._data_source, *self.retvals)
 
     def _finalize_parallel(self):
@@ -110,7 +110,7 @@
         # our long axis is the first one!
         rv = []
         for my_list in self.retvals:
-            data = na.array(my_list).transpose()
+            data = np.array(my_list).transpose()
             rv.append(self.comm.par_combine_object(data,
                         datatype="array", op="cat").transpose())
         self.retvals = rv
@@ -185,7 +185,7 @@
 
     return x,y,z, den
 def _combCenterOfMass(data, x,y,z, den):
-    return na.array([x.sum(), y.sum(), z.sum()])/den.sum()
+    return np.array([x.sum(), y.sum(), z.sum()])/den.sum()
 add_quantity("CenterOfMass", function=_CenterOfMass,
              combine_function=_combCenterOfMass, n_ret = 4)
 
@@ -218,7 +218,7 @@
     xv = xv.sum()/w
     yv = yv.sum()/w
     zv = zv.sum()/w
-    return na.array([xv, yv, zv])
+    return np.array([xv, yv, zv])
 add_quantity("BulkVelocity", function=_BulkVelocity,
              combine_function=_combBulkVelocity, n_ret=4)
 
@@ -249,9 +249,9 @@
     return [j_mag]
 
 def _combAngularMomentumVector(data, j_mag):
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     L_vec = j_mag.sum(axis=0)
-    L_vec_norm = L_vec / na.sqrt((L_vec**2.0).sum())
+    L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
     return L_vec_norm
 add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
              combine_function=_combAngularMomentumVector, n_ret=1)
@@ -268,17 +268,17 @@
     amx = data["SpecificAngularMomentumX"]*data["CellMassMsun"]
     amy = data["SpecificAngularMomentumY"]*data["CellMassMsun"]
     amz = data["SpecificAngularMomentumZ"]*data["CellMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
     weight=data["CellMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
 def _combBaryonSpinParameter(data, j_mag, m_enc, e_term_pre, weight):
     # Because it's a vector field, we have to ensure we have enough dimensions
-    if len(j_mag.shape) < 2: j_mag = na.expand_dims(j_mag, 0)
+    if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
     W = weight.sum()
     M = m_enc.sum()
-    J = na.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
-    E = na.sqrt(e_term_pre.sum()/W)
+    J = np.sqrt(((j_mag.sum(axis=0))**2.0).sum())/W
+    E = np.sqrt(e_term_pre.sum()/W)
     G = 6.67e-8 # cm^3 g^-1 s^-2
     spin = J * E / (M*1.989e33*G)
     return spin
@@ -292,11 +292,11 @@
     """
     m_enc = data["CellMassMsun"].sum() + data["ParticleMassMsun"].sum()
     amx = data["ParticleSpecificAngularMomentumX"]*data["ParticleMassMsun"]
-    if amx.size == 0: return (na.zeros((3,), dtype='float64'), m_enc, 0, 0)
+    if amx.size == 0: return (np.zeros((3,), dtype='float64'), m_enc, 0, 0)
     amy = data["ParticleSpecificAngularMomentumY"]*data["ParticleMassMsun"]
     amz = data["ParticleSpecificAngularMomentumZ"]*data["ParticleMassMsun"]
-    j_mag = na.array([amx.sum(), amy.sum(), amz.sum()])
-    e_term_pre = na.sum(data["ParticleMassMsun"]
+    j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+    e_term_pre = np.sum(data["ParticleMassMsun"]
                        *data["ParticleVelocityMagnitude"]**2.0)
     weight=data["ParticleMassMsun"].sum()
     return j_mag, m_enc, e_term_pre, weight
@@ -360,15 +360,15 @@
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
         kinetic += thermal
     if periodic_test:
-        kinetic = na.ones_like(kinetic)
+        kinetic = np.ones_like(kinetic)
     # Gravitational potential energy
     # We only divide once here because we have velocity in cgs, but radius is
     # in code.
     G = 6.67e-8 / data.convert("cm") # cm^3 g^-1 s^-2
     # Check for periodicity of the clump.
-    two_root = 2. / na.array(data.pf.domain_dimensions)
+    two_root = 2. / np.array(data.pf.domain_dimensions)
     domain_period = data.pf.domain_right_edge - data.pf.domain_left_edge
-    periodic = na.array([0., 0., 0.])
+    periodic = np.array([0., 0., 0.])
     for i,dim in enumerate(["x", "y", "z"]):
         sorted = data[dim][data[dim].argsort()]
         # If two adjacent values are different by (more than) two root grid
@@ -380,7 +380,7 @@
             # define the gap from the right boundary, which we'll use for the
             # periodic adjustment later.
             sel = (diff >= two_root[i])
-            index = na.min(na.nonzero(sel))
+            index = np.min(np.nonzero(sel))
             # The last addition term below ensures that the data makes a full
             # wrap-around.
             periodic[i] = data.pf.domain_right_edge[i] - sorted[index + 1] + \
@@ -402,26 +402,26 @@
             local_data[dim] += periodic[i]
             local_data[dim] %= domain_period[i]
     if periodic_test:
-        local_data["CellMass"] = na.ones_like(local_data["CellMass"])
+        local_data["CellMass"] = np.ones_like(local_data["CellMass"])
     import time
     t1 = time.time()
     if treecode:
         # Calculate the binding energy using the treecode method.
         # Faster but less accurate.
         # The octree doesn't like uneven root grids, so we will make it cubical.
-        root_dx = 1./na.array(data.pf.domain_dimensions).astype('float64')
-        left = min([na.amin(local_data['x']), na.amin(local_data['y']),
-            na.amin(local_data['z'])])
-        right = max([na.amax(local_data['x']), na.amax(local_data['y']),
-            na.amax(local_data['z'])])
-        cover_min = na.array([left, left, left])
-        cover_max = na.array([right, right, right])
+        root_dx = 1./np.array(data.pf.domain_dimensions).astype('float64')
+        left = min([np.amin(local_data['x']), np.amin(local_data['y']),
+            np.amin(local_data['z'])])
+        right = max([np.amax(local_data['x']), np.amax(local_data['y']),
+            np.amax(local_data['z'])])
+        cover_min = np.array([left, left, left])
+        cover_max = np.array([right, right, right])
         # Fix the coverage to match to root grid cell left 
         # edges for making indexes.
         cover_min = cover_min - cover_min % root_dx
         cover_max = cover_max - cover_max % root_dx
-        cover_imin = (cover_min * na.array(data.pf.domain_dimensions)).astype('int64')
-        cover_imax = (cover_max * na.array(data.pf.domain_dimensions) + 1).astype('int64')
+        cover_imin = (cover_min * np.array(data.pf.domain_dimensions)).astype('int64')
+        cover_imax = (cover_max * np.array(data.pf.domain_dimensions) + 1).astype('int64')
         cover_ActiveDimensions = cover_imax - cover_imin
         # Create the octree with these dimensions.
         # One value (mass) with incremental=True.
@@ -429,12 +429,12 @@
         #print 'here', cover_ActiveDimensions
         # Now discover what levels this data comes from, not assuming
         # symmetry.
-        dxes = na.unique(data['dx']) # unique returns a sorted array,
-        dyes = na.unique(data['dy']) # so these will all have the same
-        dzes = na.unique(data['dz']) # order.
+        dxes = np.unique(data['dx']) # unique returns a sorted array,
+        dyes = np.unique(data['dy']) # so these will all have the same
+        dzes = np.unique(data['dz']) # order.
         # We only need one dim to figure out levels, we'll use x.
         dx = 1./data.pf.domain_dimensions[0]
-        levels = (na.log(dx / dxes) / na.log(data.pf.refine_by)).astype('int')
+        levels = (np.log(dx / dxes) / np.log(data.pf.refine_by)).astype('int')
         lsort = levels.argsort()
         levels = levels[lsort]
         dxes = dxes[lsort]
@@ -447,9 +447,9 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = na.array([local_data["CellMass"][sel]], order='F')
+	    vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
-               na.ones_like(thisx).astype('float64'), treecode = 1)
+               np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)
         mylog.info("Using a treecode to find gravitational energy for %d cells." % local_data['x'].size)
@@ -484,7 +484,7 @@
     m = (data['CellMass'] * mass_scale_factor).astype('float32')
     assert(m.size > bsize)
 
-    gsize=int(na.ceil(float(m.size)/bsize))
+    gsize=int(np.ceil(float(m.size)/bsize))
     assert(gsize > 16)
 
     # Now the tedious process of rescaling our values...
@@ -492,7 +492,7 @@
     x = ((data['x'] - data['x'].min()) * length_scale_factor).astype('float32')
     y = ((data['y'] - data['y'].min()) * length_scale_factor).astype('float32')
     z = ((data['z'] - data['z'].min()) * length_scale_factor).astype('float32')
-    p = na.zeros(z.shape, dtype='float32')
+    p = np.zeros(z.shape, dtype='float32')
     
     x_gpu = cuda.mem_alloc(x.size * x.dtype.itemsize)
     y_gpu = cuda.mem_alloc(y.size * y.dtype.itemsize)
@@ -569,7 +569,7 @@
          block=(bsize,1,1), grid=(gsize, gsize), time_kernel=True)
     cuda.memcpy_dtoh(p, p_gpu)
     p1 = p.sum()
-    if na.any(na.isnan(p)): raise ValueError
+    if np.any(np.isnan(p)): raise ValueError
     return p1 * (length_scale_factor / (mass_scale_factor**2.0))
 
 def _Extrema(data, fields, non_zero = False, filter=None):
@@ -613,9 +613,9 @@
                 maxs.append(-1e90)
     return len(fields), mins, maxs
 def _combExtrema(data, n_fields, mins, maxs):
-    mins, maxs = na.atleast_2d(mins, maxs)
+    mins, maxs = np.atleast_2d(mins, maxs)
     n_fields = mins.shape[1]
-    return [(na.min(mins[:,i]), na.max(maxs[:,i])) for i in range(n_fields)]
+    return [(np.min(mins[:,i]), np.max(maxs[:,i])) for i in range(n_fields)]
 add_quantity("Extrema", function=_Extrema, combine_function=_combExtrema,
              n_ret=3)
 
@@ -644,14 +644,14 @@
     """
     ma, maxi, mx, my, mz, mg = -1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        maxi = na.argmax(data[field])
+        maxi = np.argmax(data[field])
         ma = data[field][maxi]
         mx, my, mz = [data[ax][maxi] for ax in 'xyz']
         mg = data["GridIndices"][maxi]
     return (ma, maxi, mx, my, mz, mg)
 def _combMaxLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmax(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmax(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MaxLocation", function=_MaxLocation,
              combine_function=_combMaxLocation, n_ret = 6)
@@ -663,14 +663,14 @@
     """
     ma, mini, mx, my, mz, mg = 1e90, -1, -1, -1, -1, -1
     if data[field].size > 0:
-        mini = na.argmin(data[field])
+        mini = np.argmin(data[field])
         ma = data[field][mini]
         mx, my, mz = [data[ax][mini] for ax in 'xyz']
         mg = data["GridIndices"][mini]
     return (ma, mini, mx, my, mz, mg)
 def _combMinLocation(data, *args):
-    args = [na.atleast_1d(arg) for arg in args]
-    i = na.argmin(args[0]) # ma is arg[0]
+    args = [np.atleast_1d(arg) for arg in args]
+    i = np.argmin(args[0]) # ma is arg[0]
     return [arg[i] for arg in args]
 add_quantity("MinLocation", function=_MinLocation,
              combine_function=_combMinLocation, n_ret = 6)
@@ -691,8 +691,8 @@
         totals.append(data[field].sum())
     return len(fields), totals
 def _combTotalQuantity(data, n_fields, totals):
-    totals = na.atleast_2d(totals)
+    totals = np.atleast_2d(totals)
     n_fields = totals.shape[1]
-    return [na.sum(totals[:,i]) for i in range(n_fields)]
+    return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -30,7 +30,7 @@
 import copy
 import itertools
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -151,8 +151,8 @@
         self.ActiveDimensions = [nd,nd,nd]
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
-        self.dds = na.ones(3, "float64")
-        self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
+        self.dds = np.ones(3, "float64")
+        self['dx'] = self['dy'] = self['dz'] = np.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
 
@@ -161,8 +161,8 @@
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
-            pf.domain_left_edge = na.zeros(3, 'float64')
-            pf.domain_right_edge = na.ones(3, 'float64')
+            pf.domain_left_edge = np.zeros(3, 'float64')
+            pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3
         self.pf = pf
 
@@ -180,12 +180,12 @@
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd, nd, nd), dtype='float64')
-                + 1e-4*na.random.random((nd, nd, nd)))
+                lambda: np.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*np.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd * nd * nd), dtype='float64')
-                + 1e-4*na.random.random((nd * nd * nd)))
+                lambda: np.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*np.random.random((nd * nd * nd)))
 
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
@@ -215,13 +215,13 @@
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(field_name) and FI[field_name].particle_type:
             self.requested.append(field_name)
-            return na.ones(self.NumberOfParticles)
+            return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
-            return na.random.random(3) * 1e-2
+            return np.random.random(3) * 1e-2
         else:
             return 0.0
     _num_ghost_zones = 0


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -27,7 +27,7 @@
 import pdb
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
@@ -79,11 +79,11 @@
         if self.Parent == None:
             left = self.LeftEdge - self.pf.domain_left_edge
             start_index = left / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
 
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
+                       np.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
         self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -184,15 +184,15 @@
                 if self.pf.field_info[field].particle_type and \
                    self.NumberOfParticles == 0:
                     # because this gets upcast to float
-                    self[field] = na.array([],dtype='int64')
+                    self[field] = np.array([],dtype='int64')
                     return self.field_data[field]
                 try:
                     temp = self.hierarchy.io.pop(self, field)
-                    self[field] = na.multiply(temp, conv_factor, temp)
+                    self[field] = np.multiply(temp, conv_factor, temp)
                 except self.hierarchy.io._read_exception, exc:
                     if field in self.pf.field_info:
                         if self.pf.field_info[field].not_in_all:
-                            self[field] = na.zeros(self.ActiveDimensions, dtype='float64')
+                            self[field] = np.zeros(self.ActiveDimensions, dtype='float64')
                         else:
                             raise
                     else: raise
@@ -209,14 +209,14 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
+        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
-        return na.array([ # Unroll!
+        return np.array([ # Unroll!
             [self.LeftEdge[0],  self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.LeftEdge[1],  self.LeftEdge[2]],
             [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
@@ -237,9 +237,9 @@
         x = x_dict[axis]
         y = y_dict[axis]
         cond = self.RightEdge[x] >= LE[:,x]
-        cond = na.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
-        cond = na.logical_and(cond, self.RightEdge[y] >= LE[:,y])
-        cond = na.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
+        cond = np.logical_and(cond, self.RightEdge[y] >= LE[:,y])
+        cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
         return cond
 
     def __repr__(self):
@@ -278,19 +278,19 @@
         self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
     def __len__(self):
-        return na.prod(self.ActiveDimensions)
+        return np.prod(self.ActiveDimensions)
 
     def find_max(self, field):
         """ Returns value, index of maximum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmax()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def find_min(self, field):
         """ Returns value, index of minimum value of *field* in this grid. """
         coord1d = (self[field] * self.child_mask).argmin()
-        coord = na.unravel_index(coord1d, self[field].shape)
+        coord = np.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
@@ -369,8 +369,8 @@
     def __fill_child_mask(self, child, mask, tofill):
         rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = na.maximum(0, cgi / rf - gi)
-        endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],
@@ -383,7 +383,7 @@
         thus, where higher resolution data is available).
 
         """
-        self._child_mask = na.ones(self.ActiveDimensions, 'int32')
+        self._child_mask = np.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
             self.__fill_child_mask(child, self._child_mask, 0)
         if self.OverlappingSiblings is not None:
@@ -398,7 +398,7 @@
         and otherwise has the ID of the grid that resides there.
 
         """
-        self._child_index_mask = na.zeros(self.ActiveDimensions, 'int32') - 1
+        self._child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
             self.__fill_child_mask(child, self._child_index_mask,
                                    child.id)
@@ -425,8 +425,8 @@
         Creates self.coords, which is of dimensions (3, ActiveDimensions)
 
         """
-        ind = na.indices(self.ActiveDimensions)
-        left_shaped = na.reshape(self.LeftEdge, (3, 1, 1, 1))
+        ind = np.indices(self.ActiveDimensions)
+        left_shaped = np.reshape(self.LeftEdge, (3, 1, 1, 1))
         self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
 
     child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
@@ -462,7 +462,7 @@
         return cube
 
     def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
             of = self[field]
@@ -474,9 +474,9 @@
             new_field[1:,:-1,1:] += of
             new_field[1:,1:,:-1] += of
             new_field[1:,1:,1:] += of
-            na.multiply(new_field, 0.125, new_field)
+            np.multiply(new_field, 0.125, new_field)
             if self.pf.field_info[field].take_log:
-                new_field = na.log10(new_field)
+                new_field = np.log10(new_field)
 
             new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
             new_field[:,:, 0]  = 2.0*new_field[:,:,1] - new_field[:,:,2]
@@ -486,17 +486,17 @@
             new_field[0,:,:]  = 2.0*new_field[1,:,:] - new_field[2,:,:]
 
             if self.pf.field_info[field].take_log:
-                na.power(10.0, new_field, new_field)
+                np.power(10.0, new_field, new_field)
         else:
             cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            na.multiply(new_field, 0.125, new_field)
+            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
+            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
+            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
+            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
+            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
+            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
+            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
+            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
+            np.multiply(new_field, 0.125, new_field)
 
         return new_field


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import string, re, gc, time, cPickle, pdb
 import weakref
 
@@ -116,11 +116,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _setup_classes(self, dd):
         # Called by subclass
@@ -172,7 +172,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -361,13 +361,13 @@
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]
         self.level_stats['numcells'] = [0 for i in range(MAXLEVEL)]
         for level in xrange(self.max_level+1):
-            self.level_stats[level]['numgrids'] = na.sum(self.grid_levels == level)
+            self.level_stats[level]['numgrids'] = np.sum(self.grid_levels == level)
             li = (self.grid_levels[:,0] == level)
             self.level_stats[level]['numcells'] = self.grid_dimensions[li,:].prod(axis=1).sum()
 
     @property
     def grid_corners(self):
-        return na.array([
+        return np.array([
           [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
           [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.lib import \
@@ -38,15 +38,15 @@
         along *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the two edges, we win!
-        na.choose(na.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
-        na.choose(na.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
+        np.choose(np.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_max(self, field, finest_levels = 3):
@@ -70,18 +70,18 @@
         max_val, maxi, mx, my, mz, mg = \
             source.quantities["MaxLocation"]( field, lazy_reader=True)
         max_grid = self.grids[mg]
-        mc = na.unravel_index(maxi, max_grid.ActiveDimensions)
+        mc = np.unravel_index(maxi, max_grid.ActiveDimensions)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s %s", \
               max_val, mx, my, mz, max_grid, max_grid.Level, mc)
         self.parameters["Max%sValue" % (field)] = max_val
         self.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
-        return max_grid, mc, max_val, na.array((mx,my,mz), dtype='float64')
+        return max_grid, mc, max_val, np.array((mx,my,mz), dtype='float64')
 
     def find_min(self, field):
         """
         Returns (value, center) of location of minimum for a given field
         """
-        gI = na.where(self.grid_levels >= 0) # Slow but pedantic
+        gI = np.where(self.grid_levels >= 0) # Slow but pedantic
         minVal = 1e100
         for grid in self.grids[gI[0]]:
             mylog.debug("Checking %s (level %s)", grid.id, grid.Level)
@@ -90,7 +90,7 @@
                 minCoord = coord
                 minVal = val
                 minGrid = grid
-        mc = na.array(minCoord)
+        mc = np.array(minCoord)
         pos=minGrid.get_position(mc)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", \
               minVal, pos[0], pos[1], pos[2], minGrid, minGrid.Level)
@@ -103,11 +103,11 @@
         """
         Returns the (objects, indices) of grids containing an (x,y,z) point
         """
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         for i in xrange(len(coord)):
-            na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-            na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-        ind = na.where(mask == 1)
+            np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+            np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_field_value_at_point(self, fields, coord):
@@ -134,7 +134,7 @@
         # Get the most-refined grid at this coordinate.
         this = self.find_point(coord)[0][-1]
         cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions
-        mark = na.zeros(3).astype('int')
+        mark = np.zeros(3).astype('int')
         # Find the index for the cell containing this point.
         for dim in xrange(len(coord)):
             mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim])
@@ -151,15 +151,15 @@
         *axis*
         """
         # Let's figure out which grids are on the slice
-        mask=na.ones(self.num_grids)
+        mask=np.ones(self.num_grids)
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the edges, we win!
-        #ind = na.where( na.logical_and(self.grid_right_edge[:,axis] > coord, \
+        #ind = np.where( np.logical_and(self.grid_right_edge[:,axis] > coord, \
                                        #self.grid_left_edge[:,axis] < coord))
-        na.choose(na.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
-        na.choose(na.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
-        ind = na.where(mask == 1)
+        np.choose(np.greater(self.grid_right_edge[:,axis],coord),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,axis],coord),(mask,0),mask)
+        ind = np.where(mask == 1)
         return self.grids[ind], ind
 
     def find_sphere_grids(self, center, radius):
@@ -167,29 +167,29 @@
         Returns objects, indices of grids within a sphere
         """
         centers = (self.grid_right_edge + self.grid_left_edge)/2.0
-        long_axis = na.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
-        t = na.abs(centers - center)
+        long_axis = np.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1)
+        t = np.abs(centers - center)
         DW = self.parameter_file.domain_right_edge \
            - self.parameter_file.domain_left_edge
-        na.minimum(t, na.abs(DW-t), t)
-        dist = na.sqrt(na.sum((t**2.0), axis=1))
-        gridI = na.where(dist < (radius + long_axis))
+        np.minimum(t, np.abs(DW-t), t)
+        dist = np.sqrt(np.sum((t**2.0), axis=1))
+        gridI = np.where(dist < (radius + long_axis))
         return self.grids[gridI], gridI
 
     def get_box_grids(self, left_edge, right_edge):
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = na.where((na.all(self.grid_right_edge > left_edge, axis=1)
-                         & na.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
+                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -203,26 +203,26 @@
                     nre = nle + db
                     g, gi = self.get_box_grids(nle, nre)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_box_grids_below_level(self, left_edge, right_edge, level,
                                   min_level = 0):
         # We discard grids if they are ABOVE the level
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
                             self.grid_levels.astype("int32"), mask, min_level)
         mask = mask.astype("bool")
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 
     def get_periodic_box_grids_below_level(self, left_edge, right_edge, level,
                                            min_level = 0):
-        mask = na.zeros(self.grids.shape, dtype='bool')
+        mask = np.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
         dw = dr - dl
         left_dist = left_edge - dl
         db = right_edge - left_edge
@@ -237,5 +237,5 @@
                     g, gi = self.get_box_grids_below_level(nle, nre,
                                             level, min_level)
                     mask[gi] = True
-        return self.grids[mask], na.where(mask)
+        return self.grids[mask], np.where(mask)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -86,7 +86,7 @@
         for field in fields:
             f = self.pf.field_info[field]
             to_add = f.get_dependencies(pf = self.pf).requested
-            to_add = list(na.unique(to_add))
+            to_add = list(np.unique(to_add))
             if len(to_add) != 1: raise KeyError
             fields_to_read += to_add
             if f._particle_convert_function is None:
@@ -95,9 +95,9 @@
                 func = f.particle_convert
             func = particle_converter(func)
             conv_factors.append(
-              na.fromiter((func(g) for g in grid_list),
+              np.fromiter((func(g) for g in grid_list),
                           count=len(grid_list), dtype='float64'))
-        conv_factors = na.array(conv_factors).transpose()
+        conv_factors = np.array(conv_factors).transpose()
         self.conv_factors = conv_factors
         rvs = self.pf.h.io._read_particles(
             fields_to_read, rtype, args, grid_list, count_list,
@@ -115,9 +115,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64') 
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64') 
-        args = (na.array(self.left_edge), na.array(self.right_edge), 
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64') 
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64') 
+        args = (np.array(self.left_edge), np.array(self.right_edge), 
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
@@ -140,9 +140,9 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        DLE = na.array(self.pf.domain_left_edge, dtype='float64')
-        DRE = na.array(self.pf.domain_right_edge, dtype='float64')
-        return (1, (na.array(self.center, dtype='float64'), self.radius,
+        DLE = np.array(self.pf.domain_left_edge, dtype='float64')
+        DRE = np.array(self.pf.domain_right_edge, dtype='float64')
+        return (1, (np.array(self.center, dtype='float64'), self.radius,
             1, DLE, DRE))
 
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
@@ -156,8 +156,8 @@
         ParticleIOHandler.__init__(self, pf, source)
     
     def _get_args(self):
-        args = (na.array(self.center, dtype='float64'),
-                na.array(self.normal, dtype='float64'),
+        args = (np.array(self.center, dtype='float64'),
+                np.array(self.normal, dtype='float64'),
                 self.radius, self.height)
         return (2, args)
         


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ b/yt/data_objects/particle_trajectories.py
@@ -25,7 +25,7 @@
 from yt.utilities.lib import sample_field_at_positions
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import h5py
 
 class ParticleTrajectoryCollection(object) :
@@ -112,16 +112,16 @@
         for pf in self.pfs :
             dd = pf.h.all_data()
             newtags = dd["particle_index"].astype("int")
-            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)) :
                 print "Not all requested particle ids contained in this file!"
                 raise IndexError
-            mask = na.in1d(newtags, indices, assume_unique=True)
-            sorts = na.argsort(newtags[mask])
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
             self.masks.append(mask)            
             self.sorts.append(sorts)
             self.times.append(pf.current_time)
 
-        self.times = na.array(self.times)
+        self.times = np.array(self.times)
 
         # Set up the derived field list and the particle field list
         # so that if the requested field is a particle field, we'll
@@ -226,7 +226,7 @@
         
         if not self.field_data.has_key(field):
             
-            particles = na.empty((0))
+            particles = np.empty((0))
 
             step = int(0)
                 
@@ -238,13 +238,13 @@
 
                     dd = pf.h.all_data()
                     pfield = dd[field][mask]
-                    particles = na.append(particles, pfield[sort])
+                    particles = np.append(particles, pfield[sort])
 
                 else :
 
                     # This is hard... must loop over grids
 
-                    pfield = na.zeros((self.num_indices))
+                    pfield = np.zeros((self.num_indices))
                     x = self["particle_position_x"][:,step]
                     y = self["particle_position_y"][:,step]
                     z = self["particle_position_z"][:,step]
@@ -258,7 +258,7 @@
                                                             grid.RightEdge,
                                                             x, y, z)
 
-                    particles = na.append(particles, pfield)
+                    particles = np.append(particles, pfield)
 
                 step += 1
                 
@@ -294,9 +294,9 @@
         >>> pl.savefig("orbit")
         """
         
-        mask = na.in1d(self.indices, (index,), assume_unique=True)
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
 
-        if not na.any(mask) :
+        if not np.any(mask) :
             print "The particle index %d is not in the list!" % (index)
             raise IndexError
 
@@ -376,7 +376,7 @@
 
         fields = [field for field in sorted(self.field_data.keys())]
         
-        fid.create_dataset("particle_indices", dtype=na.int32,
+        fid.create_dataset("particle_indices", dtype=np.int32,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -115,13 +115,13 @@
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
         #pbar.finish()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 self.__data[field][ub] /= self.__weight_data[field][ub]
                 self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
-            self["%s_std" % field] = na.sqrt(self.__std_data[field])
+            self["%s_std" % field] = np.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
         del self.__data, self.__std_data, self.__weight_data, self.__used
 
@@ -131,7 +131,7 @@
         for key in self.__data:
             my_mean[key] = self._get_empty_field()
             my_weight[key] = self._get_empty_field()
-        ub = na.where(self.__used)
+        ub = np.where(self.__used)
         for key in self.__data:
             my_mean[key][ub] = self.__data[key][ub] / self.__weight_data[key][ub]
             my_weight[key][ub] = self.__weight_data[key][ub]
@@ -151,7 +151,7 @@
                                          accumulation, self._args, check_cut = False)
             if weight:
                 f[u] /= w[u]
-                q[u] = na.sqrt(q[u] / w[u])
+                q[u] = np.sqrt(q[u] / w[u])
             self[field] = f
             self["%s_std" % field] = q
         self["UsedBins"] = u
@@ -202,7 +202,7 @@
                 else:
                     pointI = self._data_source._get_point_indices(source)
             data.append(source[field][pointI].ravel().astype('float64'))
-        return na.concatenate(data, axis=0)
+        return np.concatenate(data, axis=0)
 
     def _fix_pickle(self):
         if isinstance(self._data_source, tuple):
@@ -235,10 +235,10 @@
 
         # Get our bins
         if log_space:
-            func = na.logspace
-            lower_bound, upper_bound = na.log10(lower_bound), na.log10(upper_bound)
+            func = np.logspace
+            lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
         else:
-            func = na.linspace
+            func = np.linspace
 
         # These are the bin *edges*
         self._bins = func(lower_bound, upper_bound, n_bins + 1)
@@ -253,7 +253,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros(self[self.bin_field].size, dtype='float64')
+        return np.zeros(self[self.bin_field].size, dtype='float64')
 
     @preserve_source_parameters
     def _bin_field(self, source, field, weight, accumulation,
@@ -263,7 +263,7 @@
         # (i.e., lazy_reader)
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -282,7 +282,7 @@
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
-            binned_field = na.add.accumulate(binned_field)
+            binned_field = np.add.accumulate(binned_field)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -293,7 +293,7 @@
             raise EmptyProfileData()
         # Truncate at boundaries.
         if self.end_collect:
-            mi = na.ones_like(source_data).astype('bool')
+            mi = np.ones_like(source_data).astype('bool')
         else:
             mi = ((source_data > self._bins.min())
                &  (source_data < self._bins.max()))
@@ -301,9 +301,9 @@
         if sd.size == 0:
             raise EmptyProfileData()
         # Stick the bins into our fixed bins, set at initialization
-        bin_indices = na.digitize(sd, self._bins)
+        bin_indices = np.digitize(sd, self._bins)
         if self.end_collect: #limit the range of values to 0 and n_bins-1
-            bin_indices = na.clip(bin_indices, 0, self.n_bins - 1)
+            bin_indices = np.clip(bin_indices, 0, self.n_bins - 1)
         else: #throw away outside values
             bin_indices -= 1
           
@@ -319,7 +319,7 @@
         elif bin_style is 'left': x = x[:-1]
         elif bin_style is 'right': x = x[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
+            if self._x_log: x=np.log10(x)
             x = 0.5*(x[:-1] + x[1:])
             if self._x_log: x=10**x
         else:
@@ -337,11 +337,11 @@
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
-        field_data = na.array(self.choose_bins(bin_style)) 
+        field_data = np.array(self.choose_bins(bin_style)) 
         if bin_style is 'both':
-            field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field] for field in fields]), axis=0)
         else: 
-            field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
+            field_data = np.append([field_data], np.array([self.field_data[field][:-1] for field in fields]), axis=0)
         
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -409,18 +409,18 @@
         self.x_n_bins = x_n_bins
         self.y_n_bins = y_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])):
             mylog.error("Your min/max values for x, y have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -428,7 +428,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size), dtype='float64')
 
     @preserve_source_parameters
@@ -436,7 +436,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -456,9 +456,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -470,9 +470,9 @@
             raise EmptyProfileData()
 
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
-            mi = na.where( (source_data_x > self._x_bins.min())
+            mi = np.where( (source_data_x > self._x_bins.min())
                            & (source_data_x < self._x_bins.max())
                            & (source_data_y > self._y_bins.min())
                            & (source_data_y < self._y_bins.max()))
@@ -481,11 +481,11 @@
         if sd_x.size == 0 or sd_y.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y)
@@ -507,8 +507,8 @@
             x = x[1:]
             y = y[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             if self._x_log: x=10**x
@@ -531,7 +531,7 @@
         fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
                           + fields + ["\n"]))
         x,y = self.choose_bins(bin_style)
-        x,y = na.meshgrid(x,y)
+        x,y = np.meshgrid(x,y)
         field_data = [x.ravel(), y.ravel()]
         if bin_style is not 'both':
             field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
@@ -540,7 +540,7 @@
             field_data += [self.field_data[field].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
 
-        field_data = na.array(field_data)
+        field_data = np.array(field_data)
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -579,7 +579,7 @@
         return [self.x_bin_field, self.y_bin_field]
 
 def fix_bounds(upper, lower, logit):
-    if logit: return na.log10(upper), na.log10(lower)
+    if logit: return np.log10(upper), np.log10(lower)
     return upper, lower
 
 class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -599,7 +599,7 @@
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape, dtype='float64')
+        else: weight_data = np.ones(source_data.shape, dtype='float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -617,9 +617,9 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
         return binned_field, weight_field, used_field.astype('bool')
 
         
@@ -656,24 +656,24 @@
         self.y_n_bins = y_n_bins
         self.z_n_bins = z_n_bins
 
-        func = {True:na.logspace, False:na.linspace}[x_log]
+        func = {True:np.logspace, False:np.linspace}[x_log]
         bounds = fix_bounds(x_lower_bound, x_upper_bound, x_log)
         self._x_bins = func(bounds[0], bounds[1], x_n_bins + 1)
         self[x_bin_field] = self._x_bins
 
-        func = {True:na.logspace, False:na.linspace}[y_log]
+        func = {True:np.logspace, False:np.linspace}[y_log]
         bounds = fix_bounds(y_lower_bound, y_upper_bound, y_log)
         self._y_bins = func(bounds[0], bounds[1], y_n_bins + 1)
         self[y_bin_field] = self._y_bins
 
-        func = {True:na.logspace, False:na.linspace}[z_log]
+        func = {True:np.logspace, False:np.linspace}[z_log]
         bounds = fix_bounds(z_lower_bound, z_upper_bound, z_log)
         self._z_bins = func(bounds[0], bounds[1], z_n_bins + 1)
         self[z_bin_field] = self._z_bins
 
-        if na.any(na.isnan(self[x_bin_field])) \
-            or na.any(na.isnan(self[y_bin_field])) \
-            or na.any(na.isnan(self[z_bin_field])):
+        if np.any(np.isnan(self[x_bin_field])) \
+            or np.any(np.isnan(self[y_bin_field])) \
+            or np.any(np.isnan(self[z_bin_field])):
             mylog.error("Your min/max values for x, y or z have given me a nan.")
             mylog.error("Usually this means you are asking for log, with a zero bound.")
             raise ValueError
@@ -681,7 +681,7 @@
             self._args = self._get_bins(data_source)
 
     def _get_empty_field(self):
-        return na.zeros((self[self.x_bin_field].size,
+        return np.zeros((self[self.x_bin_field].size,
                          self[self.y_bin_field].size,
                          self[self.z_bin_field].size), dtype='float64')
 
@@ -689,9 +689,9 @@
     def _bin_field(self, source, field, weight, accumulation,
                    args, check_cut=False):
         source_data = self._get_field(source, field, check_cut)
-        weight_data = na.ones(source_data.shape).astype('float64')
+        weight_data = np.ones(source_data.shape).astype('float64')
         if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = na.ones(source_data.shape).astype('float64')
+        else: weight_data = np.ones(source_data.shape).astype('float64')
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
@@ -711,11 +711,11 @@
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
             if accumulation[0]:
-                binned_field = na.add.accumulate(binned_field, axis=0)
+                binned_field = np.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
-                binned_field = na.add.accumulate(binned_field, axis=1)
+                binned_field = np.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
-                binned_field = na.add.accumulate(binned_field, axis=2)
+                binned_field = np.add.accumulate(binned_field, axis=2)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
 
@@ -727,7 +727,7 @@
         if source_data_x.size == 0:
             raise EmptyProfileData()
         if self.end_collect:
-            mi = na.arange(source_data_x.size)
+            mi = np.arange(source_data_x.size)
         else:
             mi = ( (source_data_x > self._x_bins.min())
                  & (source_data_x < self._x_bins.max())
@@ -741,13 +741,13 @@
         if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
             raise EmptyProfileData()
 
-        bin_indices_x = na.digitize(sd_x, self._x_bins) - 1
-        bin_indices_y = na.digitize(sd_y, self._y_bins) - 1
-        bin_indices_z = na.digitize(sd_z, self._z_bins) - 1
+        bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
+        bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
+        bin_indices_z = np.digitize(sd_z, self._z_bins) - 1
         if self.end_collect:
-            bin_indices_x = na.minimum(na.maximum(1, bin_indices_x), self.x_n_bins) - 1
-            bin_indices_y = na.minimum(na.maximum(1, bin_indices_y), self.y_n_bins) - 1
-            bin_indices_z = na.minimum(na.maximum(1, bin_indices_z), self.z_n_bins) - 1
+            bin_indices_x = np.minimum(np.maximum(1, bin_indices_x), self.x_n_bins) - 1
+            bin_indices_y = np.minimum(np.maximum(1, bin_indices_y), self.y_n_bins) - 1
+            bin_indices_z = np.minimum(np.maximum(1, bin_indices_z), self.z_n_bins) - 1
 
         # Now we set up our inverse bin indices
         return (mi, bin_indices_x, bin_indices_y, bin_indices_z)
@@ -772,9 +772,9 @@
             y = y[1:]
             z = z[1:]
         elif bin_style is 'center':
-            if self._x_log: x=na.log10(x)
-            if self._y_log: y=na.log10(y)
-            if self._z_log: z=na.log10(z)
+            if self._x_log: x=np.log10(x)
+            if self._y_log: y=np.log10(y)
+            if self._z_log: z=np.log10(z)
             x = 0.5*(x[:-1] + x[1:])
             y = 0.5*(y[:-1] + y[1:])
             z = 0.5*(z[:-1] + z[1:])
@@ -853,7 +853,7 @@
             if field in set_attr.values(): continue
             order.append(field)
             values.append(self[field].ravel())
-        values = na.array(values).transpose()
+        values = np.array(values).transpose()
         self._data_source.hierarchy.save_data(values, "/Profiles", name,
                                               set_attr, force=force)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -26,7 +26,7 @@
 """
 
 import types
-import numpy as na
+import numpy as np
 import inspect
 import copy
 
@@ -61,66 +61,66 @@
 
 def _dx(field, data):
     return data.dds[0]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
 add_field('dx', function=_dx, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dy(field, data):
     return data.dds[1]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
 add_field('dy', function=_dy, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _dz(field, data):
     return data.dds[2]
-    return na.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
+    return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
 add_field('dz', function=_dz,
           display_field=False, validators=[ValidateSpatial(0)])
 
 def _coordX(field, data):
     dim = data.ActiveDimensions[0]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[0])[:,None,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[0])[:,None,None]
             +0.5) * data['dx'] + data.LeftEdge[0]
 add_field('x', function=_coordX, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordY(field, data):
     dim = data.ActiveDimensions[1]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[1])[None,:,None]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[1])[None,:,None]
             +0.5) * data['dy'] + data.LeftEdge[1]
 add_field('y', function=_coordY, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _coordZ(field, data):
     dim = data.ActiveDimensions[2]
-    return (na.ones(data.ActiveDimensions, dtype='float64')
-                   * na.arange(data.ActiveDimensions[2])[None,None,:]
+    return (np.ones(data.ActiveDimensions, dtype='float64')
+                   * np.arange(data.ActiveDimensions[2])[None,None,:]
             +0.5) * data['dz'] + data.LeftEdge[2]
 add_field('z', function=_coordZ, display_field=False,
           validators=[ValidateSpatial(0)])
 
 def _GridLevel(field, data):
-    return na.ones(data.ActiveDimensions)*(data.Level)
+    return np.ones(data.ActiveDimensions)*(data.Level)
 add_field("GridLevel", function=_GridLevel,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)])
 
 def _GridIndices(field, data):
-    return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
+    return np.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)], take_log=False)
 
 def _OnesOverDx(field, data):
-    return na.ones(data["Ones"].shape,
+    return np.ones(data["Ones"].shape,
                    dtype=data["Density"].dtype)/data['dx']
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
 
 def _Ones(field, data):
-    return na.ones(data.ActiveDimensions, dtype='float64')
+    return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
           validators=[ValidateSpatial(0)],
           projection_conversion="unitary",
@@ -130,7 +130,7 @@
 
 def _SoundSpeed(field, data):
     if data.pf["EOSType"] == 1:
-        return na.ones(data["Density"].shape, dtype='float64') * \
+        return np.ones(data["Density"].shape, dtype='float64') * \
                 data.pf["EOSSoundSpeed"]
     return ( data.pf["Gamma"]*data["Pressure"] / \
              data["Density"] )**(1.0/2.0)
@@ -139,7 +139,7 @@
 
 def _RadialMachNumber(field, data):
     """M{|v|/t_sound}"""
-    return na.abs(data["RadialVelocity"]) / data["SoundSpeed"]
+    return np.abs(data["RadialVelocity"]) / data["SoundSpeed"]
 add_field("RadialMachNumber", function=_RadialMachNumber)
 
 def _MachNumber(field, data):
@@ -157,7 +157,7 @@
     t3 = data['dz'] / (
         data["SoundSpeed"] + \
         abs(data["z-velocity"]))
-    return na.minimum(na.minimum(t1,t2),t3)
+    return np.minimum(np.minimum(t1,t2),t3)
 def _convertCourantTimeStep(data):
     # SoundSpeed and z-velocity are in cm/s, dx is in code
     return data.convert("cm")
@@ -169,7 +169,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["particle_velocity_x"]-bulk_velocity[0])**2.0 + \
              (data["particle_velocity_y"]-bulk_velocity[1])**2.0 + \
              (data["particle_velocity_z"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -181,7 +181,7 @@
     """M{|v|}"""
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
              (data["y-velocity"]-bulk_velocity[1])**2.0 + \
              (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
@@ -189,13 +189,13 @@
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _TangentialOverVelocityMagnitude(field, data):
-    return na.abs(data["TangentialVelocity"])/na.abs(data["VelocityMagnitude"])
+    return np.abs(data["TangentialVelocity"])/np.abs(data["VelocityMagnitude"])
 add_field("TangentialOverVelocityMagnitude",
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
 def _TangentialVelocity(field, data):
-    return na.sqrt(data["VelocityMagnitude"]**2.0
+    return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)
 add_field("TangentialVelocity", 
           function=_TangentialVelocity,
@@ -223,14 +223,14 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
     ## The spherical coordinates radius is simply the magnitude of the
     ## coords vector.
 
-    return na.sqrt(na.sum(coords**2,axis=-1))
+    return np.sqrt(np.sum(coords**2,axis=-1))
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,7 +245,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -254,11 +254,11 @@
     ## vector.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    JdotCoords = na.sum(J*coords,axis=-1)
+    JdotCoords = np.sum(J*coords,axis=-1)
     
-    return na.arccos( JdotCoords / na.sqrt(na.sum(coords**2,axis=-1)) )
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,7 +269,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
     
@@ -282,18 +282,18 @@
     ## The angle is then given by the arctan of the ratio of the
     ## yprime-component and the xprime-component of the coords vector.
 
-    xprime = na.cross([0.0,1.0,0.0],normal)
-    if na.sum(xprime) == 0: xprime = na.array([0.0, 0.0, 1.0])
-    yprime = na.cross(normal,xprime)
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = na.tile(xprime,tile_shape)
-    Jy = na.tile(yprime,tile_shape)
+    Jx = np.tile(xprime,tile_shape)
+    Jy = np.tile(yprime,tile_shape)
     
-    Px = na.sum(Jx*coords,axis=-1)
-    Py = na.sum(Jy*coords,axis=-1)
+    Px = np.sum(Jx*coords,axis=-1)
+    Py = np.sum(Jy*coords,axis=-1)
     
-    return na.arctan2(Py,Px)
+    return np.arctan2(Py,Px)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -305,7 +305,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -313,10 +313,10 @@
     ## gives a vector of magnitude equal to the cylindrical radius.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    JcrossCoords = na.cross(J,coords)
-    return na.sqrt(na.sum(JcrossCoords**2,axis=-1))
+    JcrossCoords = np.cross(J,coords)
+    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -331,7 +331,7 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = na.array([data['x'] - center[0],
+    coords = np.array([data['x'] - center[0],
                        data['y'] - center[1],
                        data['z'] - center[2]]).transpose()
 
@@ -339,9 +339,9 @@
     ## the cylindrical height.
     
     tile_shape = list(coords.shape)[:-1] + [1]
-    J = na.tile(normal,tile_shape)
+    J = np.tile(normal,tile_shape)
 
-    return na.sum(J*coords,axis=-1)  
+    return np.sum(J*coords,axis=-1)  
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -399,7 +399,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*np.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -502,7 +502,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*data['dx']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']*data['dx']
     return data["dx"]*data["dy"]*data["dz"]
@@ -520,7 +520,7 @@
           convert_function=_ConvertCellVolumeCGS)
 
 def _ChandraEmissivity(field, data):
-    logT0 = na.log10(data["Temperature"]) - 7
+    logT0 = np.log10(data["Temperature"]) - 7
     return ((data["NumberDensity"].astype('float64')**2.0) \
             *(10**(-0.0103*logT0**8 \
                    +0.0417*logT0**7 \
@@ -579,15 +579,15 @@
 
 def _AveragedDensity(field, data):
     nx, ny, nz = data["Density"].shape
-    new_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    weight_field = na.zeros((nx-2,ny-2,nz-2), dtype='float64')
-    i_i, j_i, k_i = na.mgrid[0:3,0:3,0:3]
+    new_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    weight_field = np.zeros((nx-2,ny-2,nz-2), dtype='float64')
+    i_i, j_i, k_i = np.mgrid[0:3,0:3,0:3]
     for i,j,k in zip(i_i.ravel(),j_i.ravel(),k_i.ravel()):
         sl = [slice(i,nx-(2-i)),slice(j,ny-(2-j)),slice(k,nz-(2-k))]
         new_field += data["Density"][sl] * data["CellMass"][sl]
         weight_field += data["CellMass"][sl]
     # Now some fancy footwork
-    new_field2 = na.zeros((nx,ny,nz))
+    new_field2 = np.zeros((nx,ny,nz))
     new_field2[1:-1,1:-1,1:-1] = new_field/weight_field
     return new_field2
 add_field("AveragedDensity",
@@ -615,7 +615,7 @@
         ds = div_fac * data['dz'].flat[0]
         f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
         f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = f
     return new_field
 def _convertDivV(data):
@@ -627,12 +627,12 @@
           convert_function=_convertDivV)
 
 def _AbsDivV(field, data):
-    return na.abs(data['DivV'])
+    return np.abs(data['DivV'])
 add_field("AbsDivV", function=_AbsDivV,
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return -na.ones_like(data["Ones"])
+    return -np.ones_like(data["Ones"])
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
 add_field("tempContours", function=_Contours,
@@ -642,7 +642,7 @@
 def obtain_velocities(data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["x-velocity"] - bv[0]
     yv = data["y-velocity"] - bv[1]
     zv = data["z-velocity"] - bv[2]
@@ -694,18 +694,18 @@
     """
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     xv = data["particle_velocity_x"] - bv[0]
     yv = data["particle_velocity_y"] - bv[1]
     zv = data["particle_velocity_z"] - bv[2]
     center = data.get_field_parameter('center')
-    coords = na.array([data['particle_position_x'],
+    coords = np.array([data['particle_position_x'],
                        data['particle_position_y'],
                        data['particle_position_z']], dtype='float64')
     new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - na.reshape(center,new_shape)
-    v_vec = na.array([xv,yv,zv], dtype='float64')
-    return na.cross(r_vec, v_vec, axis=0)
+    r_vec = coords - np.reshape(center,new_shape)
+    v_vec = np.array([xv,yv,zv], dtype='float64')
+    return np.cross(r_vec, v_vec, axis=0)
 #add_field("ParticleSpecificAngularMomentum",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
@@ -720,7 +720,7 @@
 def _ParticleSpecificAngularMomentumX(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     y = data["particle_position_y"] - center[1]
     z = data["particle_position_z"] - center[2]
@@ -730,7 +730,7 @@
 def _ParticleSpecificAngularMomentumY(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     z = data["particle_position_z"] - center[2]
@@ -740,7 +740,7 @@
 def _ParticleSpecificAngularMomentumZ(field, data):
     if data.has_field_parameter("bulk_velocity"):
         bv = data.get_field_parameter("bulk_velocity")
-    else: bv = na.zeros(3, dtype='float64')
+    else: bv = np.zeros(3, dtype='float64')
     center = data.get_field_parameter('center')
     x = data["particle_position_x"] - center[0]
     y = data["particle_position_y"] - center[1]
@@ -788,20 +788,20 @@
 def _ParticleRadius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["particle_position_x"].shape, dtype='float64')
+    radius = np.zeros(data["particle_position_x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data["particle_position_%s" % ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data["particle_position_%s" % ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _Radius(field, data):
     center = data.get_field_parameter("center")
     DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = na.zeros(data["x"].shape, dtype='float64')
+    radius = np.zeros(data["x"].shape, dtype='float64')
     for i, ax in enumerate('xyz'):
-        r = na.abs(data[ax] - center[i])
-        radius += na.minimum(r, na.abs(DW[i]-r))**2.0
-    na.sqrt(radius, radius)
+        r = np.abs(data[ax] - center[i])
+        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
+    np.sqrt(radius, radius)
     return radius
 def _ConvertRadiusCGS(data):
     return data.convert("cm")
@@ -886,16 +886,16 @@
     center = data.get_field_parameter("center")
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
+        bulk_velocity = np.zeros(3)
     new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
                 + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
                 + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
                 )/data["RadiusCode"]
-    if na.any(na.isnan(new_field)): # to fix center = point
-        new_field[na.isnan(new_field)] = 0.0
+    if np.any(np.isnan(new_field)): # to fix center = point
+        new_field[np.isnan(new_field)] = 0.0
     return new_field
 def _RadialVelocityABS(field, data):
-    return na.abs(_RadialVelocity(field, data))
+    return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
     return 1e-5
 add_field("RadialVelocity", function=_RadialVelocity,
@@ -916,10 +916,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(x_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(x_vec, v_vec)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -929,10 +929,10 @@
                            for ax in 'xyz']
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
-        bulk_velocity = na.zeros(3)
-    v_vec = na.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,na.newaxis]
-    return na.dot(y_vec, v_vec)
+        bulk_velocity = np.zeros(3)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
+                - bulk_velocity[...,np.newaxis]
+    return np.dot(y_vec, v_vec)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -955,16 +955,16 @@
 def _convertDensity(data):
     return data.convert("Density")
 def _pdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                 data["particle_position_y"].astype(na.float64),
-                 data["particle_position_z"].astype(na.float64),
-                 data["particle_mass"].astype(na.float32),
-                 na.int64(data.NumberOfParticles),
-                 blank, na.array(data.LeftEdge).astype(na.float64),
-                 na.array(data.ActiveDimensions).astype(na.int32),
-                 na.float64(data['dx']))
+    CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                 data["particle_position_y"].astype(np.float64),
+                 data["particle_position_z"].astype(np.float64),
+                 data["particle_mass"].astype(np.float32),
+                 np.int64(data.NumberOfParticles),
+                 blank, np.array(data.LeftEdge).astype(np.float64),
+                 np.array(data.ActiveDimensions).astype(np.int32),
+                 np.float64(data['dx']))
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
@@ -993,7 +993,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape)
+    new_field = np.zeros(data["x-velocity"].shape)
     dvzdy = (data["z-velocity"][1:-1,sl_right,1:-1] -
              data["z-velocity"][1:-1,sl_left,1:-1]) \
              / (div_fac*data["dy"].flat[0])
@@ -1018,7 +1018,7 @@
              / (div_fac*data["dy"].flat[0])
     new_field[1:-1,1:-1,1:-1] += (dvydx - dvxdy)**2.0
     del dvydx, dvxdy
-    new_field = na.abs(new_field)
+    new_field = np.abs(new_field)
     return new_field
 def _convertVorticitySquared(data):
     return data.convert("cm")**-2.0
@@ -1038,7 +1038,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
@@ -1053,7 +1053,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
@@ -1068,7 +1068,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["Pressure"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
@@ -1083,7 +1083,7 @@
               units=r"\rm{dyne}/\rm{cm}^{3}")
 
 def _gradPressureMagnitude(field, data):
-    return na.sqrt(data["gradPressureX"]**2 +
+    return np.sqrt(data["gradPressureX"]**2 +
                    data["gradPressureY"]**2 +
                    data["gradPressureZ"]**2)
 add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
@@ -1100,7 +1100,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dx'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
@@ -1115,7 +1115,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dy'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
@@ -1130,7 +1130,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    new_field = np.zeros(data["Density"].shape, dtype='float64')
     ds = div_fac * data['dz'].flat[0]
     new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
     new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
@@ -1145,7 +1145,7 @@
               units=r"\rm{g}/\rm{cm}^{4}")
 
 def _gradDensityMagnitude(field, data):
-    return na.sqrt(data["gradDensityX"]**2 +
+    return np.sqrt(data["gradDensityX"]**2 +
                    data["gradDensityY"]**2 +
                    data["gradDensityZ"]**2)
 add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
@@ -1171,7 +1171,7 @@
           units=r"\rm{s}^{-1}")
 
 def _BaroclinicVorticityMagnitude(field, data):
-    return na.sqrt(data["BaroclinicVorticityX"]**2 +
+    return np.sqrt(data["BaroclinicVorticityX"]**2 +
                    data["BaroclinicVorticityY"]**2 +
                    data["BaroclinicVorticityZ"]**2)
 add_field("BaroclinicVorticityMagnitude",
@@ -1189,7 +1189,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
                                  data["z-velocity"][1:-1,sl_left,1:-1]) \
                                  / (div_fac*data["dy"].flat[0])
@@ -1207,7 +1207,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["z-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
                                  data["x-velocity"][1:-1,1:-1,sl_left]) \
                                  / (div_fac*data["dz"].flat[0])
@@ -1225,7 +1225,7 @@
         sl_left = slice(None,-2,None)
         sl_right = slice(2,None,None)
         div_fac = 2.0
-    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field = np.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
                                  data["y-velocity"][sl_left,1:-1,1:-1]) \
                                  / (div_fac*data["dx"].flat[0])
@@ -1244,7 +1244,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityMagnitude(field, data):
-    return na.sqrt(data["VorticityX"]**2 +
+    return np.sqrt(data["VorticityX"]**2 +
                    data["VorticityY"]**2 +
                    data["VorticityZ"]**2)
 add_field("VorticityMagnitude", function=_VorticityMagnitude,
@@ -1263,7 +1263,7 @@
     add_field(n, function=eval("_%s" % n),
               validators=[ValidateSpatial(0)])
 def _VorticityStretchingMagnitude(field, data):
-    return na.sqrt(data["VorticityStretchingX"]**2 +
+    return np.sqrt(data["VorticityStretchingX"]**2 +
                    data["VorticityStretchingY"]**2 +
                    data["VorticityStretchingZ"]**2)
 add_field("VorticityStretchingMagnitude", 
@@ -1285,13 +1285,13 @@
                           ["x-velocity", "y-velocity", "z-velocity"])],
               units=r"\rm{s}^{-2}")
 def _VorticityGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityGrowthX"]**2 +
+    result = np.sqrt(data["VorticityGrowthX"]**2 +
                      data["VorticityGrowthY"]**2 +
                      data["VorticityGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1299,7 +1299,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityGrowthX"]**2 +
+    return np.sqrt(data["VorticityGrowthX"]**2 +
                    data["VorticityGrowthY"]**2 +
                    data["VorticityGrowthZ"]**2)
 add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
@@ -1311,7 +1311,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],
@@ -1344,7 +1344,7 @@
               units=r"\rm{s}^{-1}")
 
 def _VorticityRadPressureMagnitude(field, data):
-    return na.sqrt(data["VorticityRadPressureX"]**2 +
+    return np.sqrt(data["VorticityRadPressureX"]**2 +
                    data["VorticityRadPressureY"]**2 +
                    data["VorticityRadPressureZ"]**2)
 add_field("VorticityRadPressureMagnitude",
@@ -1369,13 +1369,13 @@
                        ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
               units=r"\rm{s}^{-1}")
 def _VorticityRPGrowthMagnitude(field, data):
-    result = na.sqrt(data["VorticityRPGrowthX"]**2 +
+    result = np.sqrt(data["VorticityRPGrowthX"]**2 +
                      data["VorticityRPGrowthY"]**2 +
                      data["VorticityRPGrowthZ"]**2)
-    dot = na.zeros(result.shape)
+    dot = np.zeros(result.shape)
     for ax in "XYZ":
         dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
-    result = na.sign(dot) * result
+    result = np.sign(dot) * result
     return result
 add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
           validators=[ValidateSpatial(1, 
@@ -1383,7 +1383,7 @@
           units=r"\rm{s}^{-1}",
           take_log=False)
 def _VorticityRPGrowthMagnitudeABS(field, data):
-    return na.sqrt(data["VorticityRPGrowthX"]**2 +
+    return np.sqrt(data["VorticityRPGrowthX"]**2 +
                    data["VorticityRPGrowthY"]**2 +
                    data["VorticityRPGrowthZ"]**2)
 add_field("VorticityRPGrowthMagnitudeABS", 
@@ -1396,7 +1396,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
-    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 import cPickle
@@ -106,7 +106,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -120,10 +120,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -141,7 +141,7 @@
         #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
         
@@ -180,9 +180,9 @@
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_info = np.array(self.pf.level_info)        
         self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
+        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
         self.pf.level_art_child_masks = {}
@@ -192,10 +192,10 @@
         del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
-                        na.zeros(3, dtype='int64'), # left index of PSG
+                        np.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
-                        na.zeros((1,3), dtype='int64'), # left edges of grids
-                        na.zeros((1,6), dtype='int64') # empty
+                        np.zeros((1,3), dtype='int64'), # left edges of grids
+                        np.zeros((1,6), dtype='int64') # empty
                         )
         
         self.proto_grids = [[root_psg],]
@@ -224,8 +224,8 @@
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
-            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
-                              na.log10(2))
+            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                              np.log10(2))
             hilbert_indices = _ramses_reader.get_hilbert_indices(
                                     level + base_level, left_index)
             #print base_level, hilbert_indices.max(),
@@ -234,7 +234,7 @@
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             
@@ -260,15 +260,15 @@
                 #why would we ever have non-unique octs?
                 #perhaps the hilbert ordering may visit the same
                 #oct multiple times - review only unique octs 
-                #for idomain in na.unique(ddfl[:,1]):
+                #for idomain in np.unique(ddfl[:,1]):
                 #dom_ind = ddfl[:,1] == idomain
                 #dleft_index = ddleft_index[dom_ind,:]
                 #dfl = ddfl[dom_ind,:]
                 
                 dleft_index = ddleft_index
                 dfl = ddfl
-                initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                initial_left = np.min(dleft_index, axis=0)
+                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                 #this creates a grid patch that doesn't cover the whole level
                 #necessarily, but with other patches covers all the regions
                 #with octs. This object automatically shrinks its size
@@ -298,8 +298,8 @@
                 
                 step+=1
                 pbar.update(step)
-            eff_mean = na.mean(psg_eff)
-            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_mean = np.mean(psg_eff)
+            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
             eff_nall = len(psg_eff)
             mylog.info("Average subgrid efficiency %02.1f %%",
                         eff_mean*100.0)
@@ -345,14 +345,14 @@
                 self.grid_right_edge[gi,:] = props[1,:] / dds
                 self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                child_mask = na.zeros(props[2,:],'uint8')
+                child_mask = np.zeros(props[2,:],'uint8')
                 amr_utils.fill_child_mask(fl,props[0],
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*na.array(correction).astype('int64')))
+                    props*np.array(correction).astype('int64')))
                 gi += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         
 
         if self.pf.file_particle_data:
@@ -372,7 +372,7 @@
             pbar.update(1)
             npa,npb=0,0
             npb = lspecies[-1]
-            clspecies = na.concatenate(([0,],lspecies))
+            clspecies = np.concatenate(([0,],lspecies))
             if self.pf.only_particle_type is not None:
                 npb = lspecies[0]
                 if type(self.pf.only_particle_type)==type(5):
@@ -388,13 +388,13 @@
             self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = na.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            self.pf.particle_type         = np.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = np.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
@@ -461,17 +461,17 @@
             init = self.pf.particle_position.shape[0]
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
-            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
             if type(self.pf.grid_particles) == type(5):
                 particle_level = min(self.pf.max_level,self.pf.grid_particles)
             else:
                 particle_level = 2
-            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
+            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
 
             pbar = get_pbar("Gridding Particles ",init)
             assignment,ilists = amr_utils.assign_particles_to_cell_lists(
                     self.grid_levels.ravel().astype('int32'),
-                    na.zeros(len(pos[:,0])).astype('int32')-1,
+                    np.zeros(len(pos[:,0])).astype('int32')-1,
                     particle_level, #dont grid particles past this
                     self.grid_left_edge.astype('float32'),
                     self.grid_right_edge.astype('float32'),
@@ -500,10 +500,10 @@
             
 
     def _get_grid_parents(self, grid, LE, RE):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
         return self.grids[mask]
 
     def _populate_grid_objects(self):
@@ -519,7 +519,7 @@
         self.max_level = self.grid_levels.max()
 
     # def _populate_grid_objects(self):
-    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     mask = np.empty(self.grids.size, dtype='int32')
     #     pb = get_pbar("Populating grids", len(self.grids))
     #     for gi,g in enumerate(self.grids):
     #         pb.update(gi)
@@ -609,7 +609,7 @@
         self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
-            self.limit_level = na.inf
+            self.limit_level = np.inf
         else:
             limit_level = int(limit_level)
             mylog.info("Using maximum level: %i",limit_level)
@@ -685,7 +685,7 @@
         wmu = self["wmu"]
         #ng = self.domain_dimensions[0]
         #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + na.sqrt(self.omega_matter))
+        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
         #v0 = r0 / t0
         #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
         #e0 = v0**2.0
@@ -696,7 +696,7 @@
         hubble = self.hubble_constant
         ng = self.domain_dimensions[0]
         self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * na.sqrt(self.omega_matter)  #cm/s
+        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         self.t0 = self.r0/self.v0
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
@@ -730,8 +730,8 @@
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
-        self.domain_left_edge = na.zeros(3, dtype="float64")
-        self.domain_right_edge = na.ones(3, dtype="float64")
+        self.domain_left_edge = np.zeros(3, dtype="float64")
+        self.domain_right_edge = np.ones(3, dtype="float64")
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters = {}
@@ -812,10 +812,10 @@
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
         # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
+        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
         # integrand_arr = integrand(spacings)
-        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
         # self.current_time *= self.hubble_time
         self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
@@ -824,7 +824,7 @@
         
         Om0 = self.parameters['Om0']
         hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * na.sqrt(Om0)
+        dummy = 100.0 * hubble * np.sqrt(Om0)
         ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
         boxh = header_vals['boxh'] 
@@ -836,7 +836,7 @@
         self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
         #velocity velocity units in km/s
         self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                na.sqrt(self.parameters["Om0"])
+                np.sqrt(self.parameters["Om0"])
         #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
         self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
         rho0 = self.parameters["rho0"]
@@ -857,10 +857,10 @@
     
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = int(na.rint(self.ncell**(1.0/3.0)))
+        est = int(np.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64')*est 
+        self.domain_dimensions = np.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
         #_skip_record(f) # iOctCh
@@ -927,8 +927,8 @@
         seek_extras = 137
         fh.seek(seek_extras)
         n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
         self.parameters['wspecies'] = self.parameters['wspecies'][:n]
         self.parameters['lspecies'] = self.parameters['lspecies'][:n]
         fh.close()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -44,7 +44,7 @@
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as na
+import numpy as np
 
 #these are just the hydro fields
 known_art_fields = [ 'Density','TotalEnergy',
@@ -178,7 +178,7 @@
     di = dd==0.0
     #dd[di] = -1.0
     tr = dg/dd
-    #tr[na.isnan(tr)] = 0.0
+    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
     #    import pdb;pdb.set_trace()
     tr /= data.pf.conversion_factors["GasEnergy"]
@@ -186,7 +186,7 @@
     tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
-    #assert na.all(na.isfinite(tr))
+    #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
     x = data.pf.conversion_factors["Temperature"]
@@ -258,9 +258,9 @@
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
     tr  = data["Ones"] #create a grid in the right size
-    if na.sum(idx)>0:
-        tr /= na.prod(tr.shape) #divide by the volume
-        tr *= na.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+    if np.sum(idx)>0:
+        tr /= np.prod(tr.shape) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
         return tr
     else:
         return tr*0.0


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import struct
 
 import os
@@ -93,9 +93,9 @@
         f.seek(self.level_offsets[level])
         ncells = 8*self.level_info[level]
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
-        arr = na.fromfile(f, dtype='>f', count=nvals)
+        arr = np.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
         if field==None:
             self.level_data[level] = arr.astype('float32')
@@ -108,13 +108,13 @@
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
+        hvar = np.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
-        na.fromfile(f,dtype='>i',count=2) #throw away the pads
+        np.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
+        var = np.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
-        arr = na.concatenate((hvar,var))
+        arr = np.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
@@ -122,9 +122,9 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = na.array(grid.particle_indices)
+        idx = np.array(grid.particle_indices)
         if field == 'particle_index':
-            return na.array(idx)
+            return np.array(idx)
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
@@ -168,10 +168,10 @@
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
             return tr.swapaxes(0, 2).astype("float64")
-        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
-        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        filled = np.zeros(grid.ActiveDimensions, dtype='uint8')
         to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
@@ -198,9 +198,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -242,20 +242,20 @@
     #fortran indices start at 1
     
     #Skip all the oct hierarchy data
-    le     = na.zeros((nLevel,3),dtype='int64')
-    fl     = na.ones((nLevel,6),dtype='int64')
-    iocts  = na.zeros(nLevel+1,dtype='int64')
+    le     = np.zeros((nLevel,3),dtype='int64')
+    fl     = np.ones((nLevel,6),dtype='int64')
+    iocts  = np.zeros(nLevel+1,dtype='int64')
     idxa,idxb = 0,0
     chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
     left = nLevel
     while left > 0 :
         this_chunk = min(chunk,left)
         idxb=idxa+this_chunk
-        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data = np.fromfile(f,dtype='>i',count=this_chunk*15)
         data=data.reshape(this_chunk,15)
         left-=this_chunk
         le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        fl[idxa:idxb,1] = np.arange(idxa,idxb)
         #pad byte is last, LL2, then ioct right before it
         iocts[idxa:idxb] = data[:,-3] 
         idxa=idxa+this_chunk
@@ -272,12 +272,12 @@
     #now correct iocts for fortran indices start @ 1
     iocts = iocts-1
 
-    assert na.unique(iocts).shape[0] == nLevel
+    assert np.unique(iocts).shape[0] == nLevel
     
     #ioct tries to access arrays much larger than le & fl
     #just make sure they appear in the right order, skipping
     #the empty space in between
-    idx = na.argsort(iocts)
+    idx = np.argsort(iocts)
     
     #now rearrange le & fl in order of the ioct
     le = le[idx]
@@ -294,7 +294,7 @@
     #now read the hvars and vars arrays
     #we are looking for iOctCh
     #we record if iOctCh is >0, in which it is subdivided
-    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
     
     
     
@@ -309,9 +309,9 @@
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
-    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
-    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    f = np.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = np.vsplit(np.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
 def read_stars(file,nstars,Nrow):
@@ -332,8 +332,8 @@
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
     nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
-    ioctch = na.zeros(nLevel,dtype='uint8')
-    idc = na.zeros(nLevel,dtype='int32')
+    ioctch = np.zeros(nLevel,dtype='uint8')
+    idc = np.zeros(nLevel,dtype='int32')
     
     chunk = long(1e6)
     left = nLevel
@@ -342,9 +342,9 @@
     while left > 0:
         chunk = min(chunk,left)
         b += chunk
-        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = np.fromfile(f, dtype='>i', count=chunk*width)
         arr = arr.reshape((width, chunk), order="F")
-        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
         ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
         #zero in the mask means there is refinement available
@@ -354,12 +354,12 @@
     return idc,ioctch
     
 nchem=8+2
-dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
                 ",>%sf4"%(2)+",>i4")
 def _read_art_child(f, level_child_offsets,level,nLevel,field):
     pos=f.tell()
     f.seek(level_child_offsets[level])
-    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = np.fromfile(f, dtype='>f', count=nLevel * 8)
     arr = arr.reshape((nLevel,16), order="F")
     arr = arr[3:-1,:].astype("float64")
     f.seek(pos)
@@ -372,8 +372,8 @@
 
 def _read_frecord(f,fmt):
     s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    count = s1/na.dtype(fmt).itemsize
-    ss = na.fromfile(f,fmt,count=count)
+    count = s1/np.dtype(fmt).itemsize
+    ss = np.fromfile(f,fmt,count=count)
     s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
     assert s1==s2
     return ss
@@ -406,14 +406,14 @@
 
 #All of these functions are to convert from hydro time var to 
 #proper time
-sqrt = na.sqrt
-sign = na.sign
+sqrt = np.sqrt
+sign = np.sign
 
 def find_root(f,a,b,tol=1e-6):
     c = (a+b)/2.0
-    last = -na.inf
+    last = -np.inf
     assert(sign(f(a)) != sign(f(b)))  
-    while na.abs(f(c)-last) > tol:
+    while np.abs(f(c)-last) > tol:
         last=f(c)
         if sign(last)==sign(f(b)):
             b=c
@@ -423,9 +423,9 @@
     return c
 
 def quad(fintegrand,xmin,xmax,n=1e4):
-    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    spacings = np.logspace(np.log10(xmin),np.log10(xmax),n)
     integrand_arr = fintegrand(spacings)
-    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    val = np.trapz(integrand_arr,dx=np.diff(spacings))
     return val
 
 def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
@@ -450,14 +450,14 @@
     integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
     #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
     current_time = quad(integrand,1e-4,at)
-    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #spacings = np.logspace(-5,np.log10(at),1e5)
     #integrand_arr = integrand(spacings)
-    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
     current_time *= 9.779/h
     return current_time
 
 def b2t(tb,n = 1e2,logger=None,**kwargs):
-    tb = na.array(tb)
+    tb = np.array(tb)
     if type(tb) == type(1.1): 
         return a2t(b2a(tb))
     if tb.shape == (): 
@@ -465,14 +465,14 @@
     if len(tb) < n: n= len(tb)
     age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
     age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
-    tbs  = -1.*na.logspace(na.log10(-tb.min()),
-                          na.log10(-tb.max()),n)
+    tbs  = -1.*np.logspace(np.log10(-tb.min()),
+                          np.log10(-tb.max()),n)
     ages = []
     for i,tbi in enumerate(tbs):
         ages += a2t(b2a(tbi)),
         if logger: logger(i)
-    ages = na.array(ages)
-    fb2t = na.interp(tb,tbs,ages)
+    ages = np.array(ages)
+    fb2t = np.interp(tb,tbs,ages)
     #fb2t = interp1d(tbs,ages)
     return fb2t
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -109,7 +109,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
@@ -174,12 +174,12 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #na.array(map(int, self._global_header_lines[counter].split()))
+        #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         #domain_re.search(self._global_header_lines[counter]).groups()
@@ -187,9 +187,9 @@
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -273,8 +273,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                                        level, gfn, gfo, dims,
@@ -296,7 +296,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
 
         self.field_list += castro_particle_field_names[:]
@@ -311,7 +311,7 @@
 
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = na.fromiter((int(i)
+        grid_info = np.fromiter((int(i)
                                  for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
@@ -347,15 +347,15 @@
         self._dtype = dtype
 
     def _calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.fullplotdir
 
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
@@ -367,9 +367,9 @@
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids * [self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
                                   for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
@@ -384,9 +384,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -405,7 +405,7 @@
             grid._setup_dx()
 
     def _setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -424,10 +424,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -439,7 +439,7 @@
             except:
                 continue
 
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
 
         for field in self.field_list:
@@ -473,11 +473,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -620,9 +620,9 @@
                     else:
                         self.parameters[paramName] = t
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals.split()])
+                self.domain_right_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals.split()])
+                self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 from yt.utilities.lib import \
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
             castro_particle_field_names.index(field),
             len(castro_particle_field_names),
@@ -85,8 +85,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
+            start = np.array(map(int, start.split(',')))
+            stop = np.array(map(int, stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -126,7 +126,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
+        field = np.fromfile(inFile, count=nElements, dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
      defaultdict
@@ -81,10 +81,10 @@
         if self.Parent == []:
             iLE = self.LeftEdge - self.pf.domain_left_edge
             start_index = iLE / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -97,7 +97,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -137,18 +137,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py                                                                                                             
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
@@ -182,8 +182,8 @@
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
             for level_id, box in enumerate(boxes):
-                si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
                 pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
                 grids.append(pg)
@@ -193,9 +193,9 @@
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-#        self.grids = na.array(self.grids, dtype='object')
+#        self.grids = np.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -212,7 +212,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
@@ -316,21 +316,21 @@
     def __calc_left_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         fileh.close()
         return LE
 
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
                   
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
-        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
-        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         return R_index - L_index
  
     @classmethod


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,7 +33,7 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-import numpy as na
+import numpy as np
 
 KnownChomboFields = FieldInfoContainer()
 add_chombo_field = KnownChomboFields.add_field
@@ -131,7 +131,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
         


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,7 +25,7 @@
 """
 import h5py
 import re
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -108,4 +108,4 @@
                     if ( (grid.LeftEdge < coord).all() and
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import weakref
-import numpy as na
+import numpy as np
 import os
 import stat
 import string
@@ -90,7 +90,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -179,7 +179,7 @@
                 if self.pf.field_info[field].particle_type: continue
                 temp = self.hierarchy.io._read_raw_data_set(self, field)
                 temp = temp.swapaxes(0, 2)
-                cube.field_data[field] = na.multiply(temp, conv_factor, temp)[sl]
+                cube.field_data[field] = np.multiply(temp, conv_factor, temp)[sl]
         return cube
 
 class EnzoHierarchy(AMRHierarchy):
@@ -291,7 +291,7 @@
         f = open(self.hierarchy_filename, "rb")
         self.grids = [self.grid(1, self)]
         self.grids[0].Level = 0
-        si, ei, LE, RE, fn, np = [], [], [], [], [], []
+        si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
         pbar = get_pbar("Parsing Hierarchy", self.num_grids)
         for grid_id in xrange(self.num_grids):
@@ -304,29 +304,29 @@
             nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
             fn.append(["-1"])
             if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
-            np.append(int(_next_token_line("NumberOfParticles", f)[0]))
-            if nb == 0 and np[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
+            npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
+            if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
             for line in f:
                 if len(line) < 2: break
                 if line.startswith("Pointer:"):
                     vv = patt.findall(line)[0]
                     self.__pointer_handler(vv)
         pbar.finish()
-        self._fill_arrays(ei, si, LE, RE, np)
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        self._fill_arrays(ei, si, LE, RE, npart)
+        temp_grids = np.empty(self.num_grids, dtype='object')
         temp_grids[:] = self.grids
         self.grids = temp_grids
         self.filenames = fn
         self._store_binary_hierarchy()
         t2 = time.time()
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions.flat[:] = ei
-        self.grid_dimensions -= na.array(si, self.float_type)
+        self.grid_dimensions -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge.flat[:] = LE
         self.grid_right_edge.flat[:] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
 
     def __pointer_handler(self, m):
         sgi = int(m[2])-1
@@ -379,7 +379,7 @@
             if Pid > -1:
                 grids[Pid-1]._children_ids.append(grid.id)
             self.filenames.append(pmap[P])
-        self.grids = na.array(grids, dtype='object')
+        self.grids = np.array(grids, dtype='object')
         f.close()
         mylog.info("Finished with binary hierarchy reading")
         return True
@@ -408,9 +408,9 @@
             procs.append(int(self.filenames[i][0][-4:]))
             levels.append(g.Level)
 
-        parents = na.array(parents, dtype='int64')
-        procs = na.array(procs, dtype='int64')
-        levels = na.array(levels, dtype='int64')
+        parents = np.array(parents, dtype='int64')
+        procs = np.array(procs, dtype='int64')
+        levels = np.array(levels, dtype='int64')
         f.create_dataset("/ParentIDs", data=parents)
         f.create_dataset("/Processor", data=procs)
         f.create_dataset("/Level", data=levels)
@@ -425,7 +425,7 @@
         mylog.info("Rebuilding grids on level %s", level)
         cmask = (self.grid_levels.flat == (level + 1))
         cmsum = cmask.sum()
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         for grid in self.select_grids(level):
             mask[:] = 0
             LE = self.grid_left_edge[grid.id - grid._id_offset]
@@ -477,20 +477,20 @@
 
     def _generate_random_grids(self):
         if self.num_grids > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(self.grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
             # We also add in a bit to make sure that some of the grids have
             # particles
             gwp = self.grid_particle_count > 0
-            if na.any(gwp) and not na.any(gwp[(random_sample,)]):
+            if np.any(gwp) and not np.any(gwp[(random_sample,)]):
                 # We just add one grid.  This is not terribly efficient.
-                first_grid = na.where(gwp)[0][0]
+                first_grid = np.where(gwp)[0][0]
                 random_sample.resize((21,))
                 random_sample[-1] = first_grid
                 mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(self.grids),1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
@@ -518,7 +518,7 @@
         pstore = []
         for level in range(self.max_level, -1, -1):
             for grid in self.select_grids(level):
-                index = na.where(grid['particle_type'] == ptype)[0]
+                index = np.where(grid['particle_type'] == ptype)[0]
                 total += len(index)
                 pstore.append(index)
                 if total >= max_num: break
@@ -527,7 +527,7 @@
         if total > 0:
             result = {}
             for p in pfields:
-                result[p] = na.zeros(total, 'float64')
+                result[p] = np.zeros(total, 'float64')
             # Now we retrieve data for each field
             ig = count = 0
             for level in range(self.max_level, -1, -1):
@@ -590,7 +590,7 @@
                 grids[pid-1]._children_ids.append(grids[-1].id)
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for i, grid in enumerate(grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -601,7 +601,7 @@
 
     def _initialize_grid_arrays(self):
         EnzoHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def _copy_hierarchy_structure(self):
         # Dimensions are important!
@@ -638,35 +638,35 @@
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
-            starter = na.random.randint(0, 20)
-            random_sample = na.mgrid[starter:len(my_grids)-1:20j].astype("int32")
+            starter = np.random.randint(0, 20)
+            random_sample = np.mgrid[starter:len(my_grids)-1:20j].astype("int32")
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
+            random_sample = np.mgrid[0:max(len(my_grids)-1,1)].astype("int32")
         return my_grids[(random_sample,)]
 
 class EnzoHierarchy1D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions[:,:1] = ei
-        self.grid_dimensions[:,:1] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:1] = LE
         self.grid_right_edge[:,:1] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         self.grid_left_edge[:,1:] = 0.0
         self.grid_right_edge[:,1:] = 1.0
         self.grid_dimensions[:,1:] = 1
 
 class EnzoHierarchy2D(EnzoHierarchy):
 
-    def _fill_arrays(self, ei, si, LE, RE, np):
+    def _fill_arrays(self, ei, si, LE, RE, npart):
         self.grid_dimensions[:,:2] = ei
-        self.grid_dimensions[:,:2] -= na.array(si, self.float_type)
+        self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
         self.grid_dimensions += 1
         self.grid_left_edge[:,:2] = LE
         self.grid_right_edge[:,:2] = RE
-        self.grid_particle_count.flat[:] = np
+        self.grid_particle_count.flat[:] = npart
         self.grid_left_edge[:,2] = 0.0
         self.grid_right_edge[:,2] = 1.0
         self.grid_dimensions[:,2] = 1
@@ -702,39 +702,22 @@
         StaticOutput.__init__(self, filename, data_style, file_style=file_style)
         if "InitialTime" not in self.parameters:
             self.current_time = 0.0
-        rp = os.path.join(self.directory, "rates.out")
-        if os.path.exists(rp):
-            try:
-                self.rates = EnzoTable(rp, rates_out_key)
-            except:
-                pass
-        cp = os.path.join(self.directory, "cool_rates.out")
-        if os.path.exists(cp):
-            try:
-                self.cool = EnzoTable(cp, cool_out_key)
-            except:
-                pass
-
-        # Now fixes for different types of Hierarchies
-        # This includes changing the fieldinfo class!
-        if self["TopGridRank"] == 1: self._setup_1d()
-        elif self["TopGridRank"] == 2: self._setup_2d()
 
     def _setup_1d(self):
         self._hierarchy_class = EnzoHierarchy1D
         self._fieldinfo_fallback = Enzo1DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
+            np.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
-            na.concatenate([[self.domain_right_edge], [1.0, 1.0]])
+            np.concatenate([[self.domain_right_edge], [1.0, 1.0]])
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
         self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
-            na.concatenate([self["DomainLeftEdge"], [0.0]])
+            np.concatenate([self.domain_left_edge, [0.0]])
         self.domain_right_edge = \
-            na.concatenate([self["DomainRightEdge"], [1.0]])
+            np.concatenate([self.domain_right_edge, [1.0]])
 
     def get_parameter(self,parameter,type=None):
         """
@@ -827,7 +810,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
@@ -842,17 +825,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         self.current_time = self.parameters["InitialTime"]
         # To be enabled when we can break old pickles:
@@ -870,6 +853,11 @@
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
+        if self.dimensionality == 1:
+            self._setup_1d()
+        elif self.dimensionality == 2:
+            self._setup_2d()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -937,7 +925,7 @@
         with fortran code.
         """
         k = {}
-        k["utim"] = 2.52e17/na.sqrt(self.omega_matter)\
+        k["utim"] = 2.52e17/np.sqrt(self.omega_matter)\
                        / self.hubble_constant \
                        / (1+self.parameters["CosmologyInitialRedshift"])**1.5
         k["urho"] = 1.88e-29 * self.omega_matter \
@@ -949,8 +937,8 @@
                (1.0 + self.current_redshift)
         k["uaye"] = 1.0/(1.0 + self.parameters["CosmologyInitialRedshift"])
         k["uvel"] = 1.225e7*self.parameters["CosmologyComovingBoxSize"] \
-                      *na.sqrt(self.omega_matter) \
-                      *na.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
+                      *np.sqrt(self.omega_matter) \
+                      *np.sqrt(1+ self.parameters["CosmologyInitialRedshift"])
         k["utem"] = 1.88e6 * (self.parameters["CosmologyComovingBoxSize"]**2) \
                       * self.omega_matter \
                       * (1.0 + self.parameters["CosmologyInitialRedshift"])
@@ -990,7 +978,7 @@
         self.conversion_factors.update(enzo.conversion_factors)
         for i in self.parameters:
             if isinstance(self.parameters[i], types.TupleType):
-                self.parameters[i] = na.array(self.parameters[i])
+                self.parameters[i] = np.array(self.parameters[i])
             if i.endswith("Units") and not i.startswith("Temperature"):
                 dataType = i[:-5]
                 self.conversion_factors[dataType] = self.parameters[i]
@@ -998,7 +986,7 @@
         self.domain_right_edge = self.parameters["DomainRightEdge"].copy()
         for i in self.conversion_factors:
             if isinstance(self.conversion_factors[i], types.TupleType):
-                self.conversion_factors[i] = na.array(self.conversion_factors[i])
+                self.conversion_factors[i] = np.array(self.conversion_factors[i])
         for p, v in self._parameter_override.items():
             self.parameters[p] = v
         for p, v in self._conversion_override.items():


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
@@ -193,7 +193,7 @@
     # We can assume that we at least have Density
     # We should actually be guaranteeing the presence of a .shape attribute,
     # but I am not currently implementing that
-    fieldData = na.zeros(data["Density"].shape,
+    fieldData = np.zeros(data["Density"].shape,
                          dtype = data["Density"].dtype)
     if data.pf["MultiSpecies"] == 0:
         if data.has_field_parameter("mu"):
@@ -249,7 +249,7 @@
 KnownEnzoFields["z-velocity"].projection_conversion='1'
 
 def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
+    return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
     f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
@@ -324,39 +324,39 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
     else:
-        filter = na.ones(data.NumberOfParticles, dtype='bool')
+        filter = np.ones(data.NumberOfParticles, dtype='bool')
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                           data["particle_position_y"][filter].astype(na.float64),
-                           data["particle_position_z"][filter].astype(na.float64),
-                           data["particle_mass"][filter].astype(na.float32),
-                           na.int64(na.where(filter)[0].size),
-                           blank, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter].astype(np.float32),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     return blank
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
@@ -367,28 +367,28 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           particle_field_data.astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           top, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           particle_field_data.astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           top, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                           data["particle_position_y"].astype(na.float64),
-                           data["particle_position_z"].astype(na.float64),
-                           data["particle_mass"].astype(na.float32),
-                           na.int64(data.NumberOfParticles),
-                           bottom, na.array(data.LeftEdge).astype(na.float64),
-                           na.array(data.ActiveDimensions).astype(na.int32), 
-                           na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
+                           data["particle_position_y"].astype(np.float64),
+                           data["particle_position_z"].astype(np.float64),
+                           data["particle_mass"].astype(np.float32),
+                           np.int64(data.NumberOfParticles),
+                           bottom, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -406,30 +406,30 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
     particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          particle_field_data.astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          top, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          particle_field_data.astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          top, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     del particle_field_data
 
-    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
-                          data["particle_position_y"][filter].astype(na.float64),
-                          data["particle_position_z"][filter].astype(na.float64),
-                          data["particle_mass"][filter].astype(na.float32),
-                          na.int64(na.where(filter)[0].size),
-                          bottom, na.array(data.LeftEdge).astype(na.float64),
-                          na.array(data.ActiveDimensions).astype(na.int32), 
-                          na.float64(data['dx']))
+    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                          data["particle_position_y"][filter].astype(np.float64),
+                          data["particle_position_z"][filter].astype(np.float64),
+                          data["particle_mass"][filter].astype(np.float32),
+                          np.int64(np.where(filter)[0].size),
+                          bottom, np.array(data.LeftEdge).astype(np.float64),
+                          np.array(data.ActiveDimensions).astype(np.int32), 
+                          np.float64(data['dx']))
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -466,7 +466,7 @@
           projection_conversion="1")
 
 def _StarAge(field, data):
-    star_age = na.zeros(data['StarCreationTimeYears'].shape)
+    star_age = np.zeros(data['StarCreationTimeYears'].shape)
     with_stars = data['StarCreationTimeYears'] > 0
     star_age[with_stars] = data.pf.time_units['years'] * \
         data.pf.current_time - \
@@ -485,7 +485,7 @@
 def _Bmag(field, data):
     """ magnitude of bvec
     """
-    return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
+    return np.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
 add_field("Bmag", function=_Bmag,display_name=r"$|B|$",units=r"\rm{Gauss}")
 
@@ -495,7 +495,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         try:
             return io._read_data_set(data, p_field).astype(dtype)
         except io._read_exception:
@@ -555,13 +555,13 @@
 def _convertParticleMass(data):
     return data.convert("Density")*(data.convert("cm")**3.0)
 def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
     return cf
 def _convertParticleMassMsun(data):
     return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
 def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
     cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
     return cf
 add_field("ParticleMass",
@@ -584,7 +584,7 @@
     if data['dx'].size == 1:
         try:
             return data['dx']*data['dy']*\
-                na.ones(data.ActiveDimensions, dtype='float64')
+                np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
             return data['dx']*data['dy']
     return data["dx"]*data["dy"]
@@ -606,11 +606,10 @@
         Enzo2DFieldInfo["CellArea%s" % a]
 
 def _zvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_2d_field("z-velocity", function=_zvel)
 
-
 #
 # Now we do overrides for 1D fields
 #
@@ -638,7 +637,7 @@
         Enzo1DFieldInfo["CellLength%s" % a]
 
 def _yvel(field, data):
-    return na.zeros(data["x-velocity"].shape,
+    return np.zeros(data["x-velocity"].shape,
                     dtype='float64')
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -25,7 +25,7 @@
 
 from yt.funcs import *
 
-import numpy as na
+import numpy as np
 import glob
 import os
 
@@ -236,8 +236,8 @@
             else:
                 my_final_time = self.final_time
 
-            my_times = na.array(map(lambda a:a['time'], my_all_outputs))
-            my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+            my_times = np.array(map(lambda a:a['time'], my_all_outputs))
+            my_indices = np.digitize([my_initial_time, my_final_time], my_times)
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
@@ -294,7 +294,7 @@
             elif len(vals) == 1:
                 vals = pcast(vals[0])
             else:
-                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+                vals = np.array([pcast(i) for i in vals if i != "-99999"])
             self.parameters[param] = vals
         self.refine_by = self.parameters["RefineBy"]
         self.dimensionality = self.parameters["TopGridRank"]
@@ -303,17 +303,17 @@
             if len(self.domain_dimensions) < 3:
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
-                self.domain_dimensions = na.array(tmp)
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                self.domain_dimensions = np.array(tmp)
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64").copy()
         else:
-            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+            self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64")
-            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+            self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
-            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+            self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
 
         if self.parameters["ComovingCoordinates"]:
             cosmo_attr = {'box_size': 'CosmologyComovingBoxSize',
@@ -374,7 +374,7 @@
                     current_time * self.enzo_cosmology.TimeUnits)
 
             self.all_time_outputs.append(output)
-            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            if np.abs(self.final_time - current_time) / self.final_time < 1e-4: break
             current_time += self.parameters['dtDataDump']
             index += 1
 
@@ -476,8 +476,8 @@
         self.parameters['RedshiftDumpDir'] = "RD"
         self.parameters['ComovingCoordinates'] = 0
         self.parameters['TopGridRank'] = 3
-        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
-        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
         self.parameters['Refineby'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
@@ -491,7 +491,7 @@
 
         self.time_units = {}
         if self.cosmological_simulation:
-            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+            self.parameters['TimeUnits'] = 2.52e17 / np.sqrt(self.omega_matter) \
                 / self.hubble_constant / (1 + self.initial_redshift)**1.5
         self.time_units['1'] = 1.
         self.time_units['seconds'] = self.parameters['TimeUnits']
@@ -586,8 +586,8 @@
             outputs = self.all_outputs
         my_outputs = []
         for value in values:
-            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
-            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+            outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
+            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
                     and outputs[0] not in my_outputs:
                 my_outputs.append(outputs[0])
             else:
@@ -649,7 +649,7 @@
 
         """
 
-        times = na.array(times) / self.time_units[time_units]
+        times = np.array(times) / self.time_units[time_units]
         return self._get_outputs_by_key('time', times, tolerance=tolerance,
                                         outputs=outputs)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -25,7 +25,7 @@
 
 import h5py
 import stat
-import numpy as na
+import numpy as np
 import weakref
 
 from yt.funcs import *
@@ -42,7 +42,7 @@
 from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
-     ValidateDataField
+     ValidateDataField, TranslationFunc
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -70,7 +70,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -123,36 +123,39 @@
             self.grid_particle_count[:] = f["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
-        self._particle_indices = na.zeros(self.num_grids + 1, dtype='int64')
-        na.add.accumulate(self.grid_particle_count.squeeze(),
+        self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
+        np.add.accumulate(self.grid_particle_count.squeeze(),
                           out=self._particle_indices[1:])
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
         self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i+1, self, self.grid_levels[i,0])
         
 
         # This is a possibly slow and verbose fix, and should be re-examined!
-        rdx = (self.parameter_file.domain_right_edge -
-                self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+        rdx = (self.parameter_file.domain_width /
+                self.parameter_file.domain_dimensions)
         nlevels = self.grid_levels.max()
-        dxs = na.zeros((nlevels+1,3),dtype='float64')
+        dxs = np.ones((nlevels+1,3),dtype='float64')
         for i in range(nlevels+1):
-            dxs[i] = rdx/self.parameter_file.refine_by**i
+            dxs[i,:ND] = rdx[:ND]/self.parameter_file.refine_by**i
        
+        if ND < 3:
+            dxs[:,ND:] = rdx[ND:]
+
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = na.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = na.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)
         
         offset = 7
-        ii = na.argsort(self.grid_levels.flat)
+        ii = np.argsort(self.grid_levels.flat)
         gid = self._handle["/gid"][:]
         first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
         for g in self.grids[ii].flat:
@@ -184,11 +187,16 @@
                 self.derived_field_list.append(field)
             if (field not in KnownFLASHFields and
                 field.startswith("particle")) :
-                self.parameter_file.field_info.add_field(field,
-                                                         function=NullFunc,
-                                                         take_log=False,
-                                                         validators = [ValidateDataField(field)],
-                                                         particle_type=True)
+                self.parameter_file.field_info.add_field(
+                        field, function=NullFunc, take_log=False,
+                        validators = [ValidateDataField(field)],
+                        particle_type=True)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
                 
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
@@ -203,6 +211,7 @@
                  storage_filename = None,
                  conversion_override = None):
 
+        if self._handle is not None: return
         self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
@@ -364,9 +373,9 @@
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
-        self.domain_left_edge = na.array(
+        self.domain_left_edge = np.array(
             [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
-        self.domain_right_edge = na.array(
+        self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         self.min_level = self.parameters.get("lrefine_min", 1) - 1
 
@@ -392,7 +401,7 @@
         nblockz = self.parameters["nblockz"]
         self.dimensionality = dimensionality
         self.domain_dimensions = \
-            na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
+            np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
         try:
             self.parameters["Gamma"] = self.parameters["gamma"]
         except:


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import h5py
 
 from yt.utilities.io_handler import \
@@ -54,7 +54,7 @@
     def _read_data_set(self, grid, field):
         f = self._handle
         if field in self._particle_fields:
-            if grid.NumberOfParticles == 0: return na.array([], dtype='float64')
+            if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -26,7 +26,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 from itertools import izip
 
 from yt.funcs import *
@@ -104,7 +104,7 @@
         
     def _parse_hierarchy(self):
         f = self._handle # shortcut
-        npa = na.array
+        npa = np.array
         DLE = self.parameter_file.domain_left_edge
         DRE = self.parameter_file.domain_right_edge
         DW = (DRE - DLE)
@@ -119,12 +119,12 @@
                                 + dxs *(1 + self.grid_dimensions)
         self.grid_particle_count.flat[:] = f['/grid_particle_count'][:].astype("int32")
         grid_parent_id = f['/grid_parent_id'][:]
-        self.max_level = na.max(self.grid_levels)
+        self.max_level = np.max(self.grid_levels)
         
         args = izip(xrange(self.num_grids), self.grid_levels.flat,
                     grid_parent_id, LI,
                     self.grid_dimensions, self.grid_particle_count.flat)
-        self.grids = na.empty(len(args), dtype='object')
+        self.grids = np.empty(len(args), dtype='object')
         for gi, (j,lvl,p, le, d, n) in enumerate(args):
             self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
         


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import \


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,9 +38,9 @@
             address = '/data/grid_%010i/particles/%s/%s' % (grid.id, ptype, field)
             data.append(fh[address][:])
         if len(data) > 0:
-            data = na.concatenate(data)
+            data = np.concatenate(data)
         fh.close()
-        return na.array(data)
+        return np.array(data)
     def _read_field_names(self,grid): 
         adr = grid.Address
         fh = h5py.File(grid.filename,mode='r')


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -28,7 +28,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -71,7 +71,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -108,11 +108,11 @@
     def _parse_hierarchy(self):
         f = self._fhandle
         dxs = []
-        self.grids = na.empty(self.num_grids, dtype='object')
+        self.grids = np.empty(self.num_grids, dtype='object')
         levels = (f['grid_level'][:]).copy()
         glis = (f['grid_left_index'][:]).copy()
         gdims = (f['grid_dimensions'][:]).copy()
-        active_dims = ~((na.max(gdims, axis=0) == 1) &
+        active_dims = ~((np.max(gdims, axis=0) == 1) &
                         (self.parameter_file.domain_dimensions == 1))
 
         for i in range(levels.shape[0]):
@@ -125,7 +125,7 @@
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
             dxs.append(dx)
-        dx = na.array(dxs)
+        dx = np.array(dxs)
         self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
@@ -147,7 +147,7 @@
         self.derived_field_list = []
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -28,7 +28,7 @@
 import re
 import os
 import weakref
-import numpy as na
+import numpy as np
 
 from collections import \
     defaultdict
@@ -110,7 +110,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -170,9 +170,9 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
         self.refinementFactor_unnecessary = self.__global_header_lines[counter].split()
         counter += 1
@@ -181,9 +181,9 @@
         counter += 1 # unused line in Maestro BoxLib
         
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
 
         counter += self.n_levels # unused line in Maestro BoxLib
         
@@ -259,8 +259,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -304,17 +304,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
         self.grid_levels = self.grid_levels.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -325,9 +325,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -354,10 +354,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -367,7 +367,7 @@
                 fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -381,11 +381,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -494,9 +494,9 @@
                 t = parameterTypes[paramName](val)
                 exec("self.%s = %s" % (paramName,t))
 
-        self.domain_dimensions = na.array([_n_cellx,_n_celly,_n_cellz])
-        self.domain_left_edge = na.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
-        self.domain_right_edge = na.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
+        self.domain_dimensions = np.array([_n_cellx,_n_celly,_n_cellz])
+        self.domain_left_edge = np.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
+        self.domain_right_edge = np.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
         
         self.cosmological_simulation = self.current_redshift = \
             self.omega_matter = self.omega_lambda = self.hubble_constant = 0


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/maestro/io.py
--- a/yt/frontends/maestro/io.py
+++ b/yt/frontends/maestro/io.py
@@ -28,7 +28,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -72,8 +72,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -113,7 +113,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
 
         # we can/should also check against the max and min in the header file


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -35,7 +35,7 @@
 from string import strip, rstrip
 import weakref
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import AMRGridPatch
@@ -108,7 +108,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE - LE) / self.ActiveDimensions)
+            self.dds = np.array((RE - LE) / self.ActiveDimensions)
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
@@ -172,20 +172,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #na.array(map(int, self._global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #np.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         counter += 1
         self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
-        self.dx = na.zeros((self.n_levels, 3))
+        self.dx = np.zeros((self.n_levels, 3))
         for i, line in enumerate(self._global_header_lines[counter:counter + self.n_levels]):
-            self.dx[i] = na.array(map(float, line.split()))
+            self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
@@ -269,8 +269,8 @@
                 counter += 1
                 zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                lo = na.array([xlo, ylo, zlo])
-                hi = na.array([xhi, yhi, zhi])
+                lo = np.array([xlo, ylo, zlo])
+                hi = np.array([xhi, yhi, zhi])
                 dims, start, stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
                                              level, gfn, gfo, dims, start, stop,
@@ -290,7 +290,7 @@
     def read_particle_header(self):
         # We need to get particle offsets and particle counts
         if not self.parameter_file.use_particles:
-            self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
         self.field_list += nyx_particle_field_names[:]
         header = open(os.path.join(self.parameter_file.path, "DM", "Header"))
@@ -304,7 +304,7 @@
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel + 1):dummy = header.readline()
 
-        grid_info = na.fromiter((int(i) for line in header.readlines()
+        grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
                                 dtype='int64',
                                 count=3*self.num_grids).reshape((self.num_grids, 3))
@@ -341,15 +341,15 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self, start_stop):
-        start = na.array(map(int, start_stop[0].split(',')))
-        stop = na.array(map(int, start_stop[1].split(',')))
+        start = np.array(map(int, start_stop[0].split(',')))
+        stop = np.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension, start, stop
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
 
-        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.grids = np.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.path
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
             g.particle_filename = os.path.join(basedir, "DM",
@@ -361,9 +361,9 @@
         self.grid_particle_count[:, 0] = self.pgrid_info[:, 1]
         del self.pgrid_info
 
-        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
+        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids, 1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]]
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]]
                                    for level in self.levels], axis=0)
 
         self.grid_dxs = grid_dcs[:, 0].reshape((self.num_grids, 1))
@@ -378,9 +378,9 @@
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
 
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]  # why the same thing twice?
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -398,7 +398,7 @@
             grid._setup_dx()
 
     def __setup_grid_tree(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
                                 self.grid_left_edge, self.grid_right_edge,
@@ -415,10 +415,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level + 1)).flat)
         return self.grids[mask]
 
     def _setup_field_list(self):
@@ -444,11 +444,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids, 3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids, 3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids, 3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids, 1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids, 1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids, 3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids, 3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids, 3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids, 1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids, 1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -464,7 +464,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -607,9 +607,9 @@
                         self.parameters[param_name] = vals
 
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = na.array([float(i) for i in vals])
+                self.domain_right_edge = np.array([float(i) for i in vals])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = na.array([float(i) for i in vals])
+                self.domain_left_edge = np.array([float(i) for i in vals])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals[0])
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -27,7 +27,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.lib import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
@@ -46,7 +46,7 @@
         offset = grid._particle_offset
         filen = os.path.expanduser(grid.particle_filename)
         off = grid._particle_offset
-        tr = na.zeros(grid.NumberOfParticles, dtype='float64')
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
         read_castro_particles(filen, off,
                               nyx_particle_field_names.index(field),
                               len(nyx_particle_field_names), tr)
@@ -68,7 +68,7 @@
         offset2 = int(nElements*bytesPerReal*field_index)
 
         dtype = grid.hierarchy._dtype
-        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        field = np.empty(nElements, dtype=grid.hierarchy._dtype)
         read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
         field = field.reshape(grid.ActiveDimensions, order='F')
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -31,7 +31,7 @@
 from string import strip, rstrip
 from stat import ST_CTIME
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
@@ -107,7 +107,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -139,7 +139,7 @@
         simply add it to the if/elif/else block.
 
         """
-        self.grid_particle_count = na.zeros(len(self.grids))
+        self.grid_particle_count = np.zeros(len(self.grids))
 
         for particle_filename in ["StarParticles", "SinkParticles"]:
             fn = os.path.join(self.pf.fullplotdir, particle_filename)
@@ -160,18 +160,18 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=na.ones(self.num_grids)
+                mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = na.where(mask == 1)
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
                 # so, we want to assign the particle to the finest of
                 # the grids we just found
                 if len(selected_grids) != 0:
                     grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
+                    ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
@@ -211,20 +211,20 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int,self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #np.array(map(int,self.__global_header_lines[counter].split()))
         counter += 1
         self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
         #domain_re.search(self.__global_header_lines[counter]).groups()
         counter += 1
         self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+        self.dx = np.zeros((self.n_levels,3))
         for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = na.array(map(float,line.split()))
+            self.dx[i] = np.array(map(float,line.split()))
         counter += self.n_levels
         self.geometry = int(self.__global_header_lines[counter])
         if self.geometry != 0:
@@ -302,8 +302,8 @@
                 counter+=1
                 zlo,zhi = map(float,self.__global_header_lines[counter].split())
                 counter+=1
-                lo = na.array([xlo,ylo,zlo])
-                hi = na.array([xhi,yhi,zhi])
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
                 dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
                 self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
                 grid_counter += 1 # this is global, and shouldn't be reset
@@ -347,17 +347,17 @@
         self._dtype = dtype
 
     def __calculate_grid_dimensions(self,start_stop):
-        start = na.array(map(int,start_stop[0].split(',')))
-        stop = na.array(map(int,start_stop[1].split(',')))
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
         return dimension,start,stop
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = na.concatenate([level.grids for level in self.levels])
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = na.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grid_levels = np.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
@@ -368,9 +368,9 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = na.array(left_edges)
-        self.grid_right_edge = na.array(right_edges)
-        self.grid_dimensions = na.array(dims)
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
@@ -399,10 +399,10 @@
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
-        mask = na.zeros(self.num_grids, dtype='bool')
+        mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
-        mask = na.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
         return self.grids[mask]
 
     def _count_grids(self):
@@ -413,11 +413,11 @@
 
     def _initialize_grid_arrays(self):
         mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = na.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = na.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = na.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = na.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = na.zeros((self.num_grids,1), 'int32')
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
 
     def _parse_hierarchy(self):
         pass
@@ -551,14 +551,14 @@
                 
             elif param.startswith("geometry.prob_hi"):
                 self.domain_right_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
                 self.domain_left_edge = \
-                    na.array([float(i) for i in vals.split()])
+                    np.array([float(i) for i in vals.split()])
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
-        self.domain_dimensions = na.array(self.parameters["TopGridDimensions"],dtype='int32')
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"],dtype='int32')
         self.refine_by = self.parameters["RefineBy"]
 
         if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.utilities.physical_constants import \
     mh, kboltz
@@ -146,7 +146,7 @@
     def _Particles(field, data):
         io = data.hierarchy.io
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
+            return np.array([], dtype=dtype)
         else:
             return io._read_particles(data, p_field).astype(dtype)
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -25,7 +25,7 @@
 """
 
 import os
-import numpy as na
+import numpy as np
 from yt.utilities.io_handler import \
            BaseIOHandler
 
@@ -76,7 +76,7 @@
                     if ( (grid.LeftEdge < coord).all() and 
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
-        return na.array(particles)
+        return np.array(particles)
 
     def _read_data_set(self,grid,field):
         """
@@ -109,8 +109,8 @@
             dtype += ('f%i'% bytesPerReal) #always a floating point
 
             # determine size of FAB
-            start = na.array(map(int,start.split(',')))
-            stop = na.array(map(int,stop.split(',')))
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
 
             gridSize = stop - start + 1
 
@@ -150,7 +150,7 @@
             fieldname = field
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile,count=nElements,dtype=dtype)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
         field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import stat
 import weakref
 
@@ -79,7 +79,7 @@
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -93,10 +93,10 @@
             return self.start_index
         if len(self.Parent) == 0:
             start_index = self.LeftEdge / self.dds
-            return na.rint(start_index).astype('int64').ravel()
+            return np.rint(start_index).astype('int64').ravel()
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
         self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
@@ -116,7 +116,7 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.tree_proxy = pf.ramses_tree
 
-        self.float_type = na.float64
+        self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
     def _initialize_data_storage(self):
@@ -153,12 +153,12 @@
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         level_info = self.tree_proxy.count_zones()
         num_ogrids = sum(level_info)
-        ogrid_left_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_right_edge = na.zeros((num_ogrids,3), dtype='float64')
-        ogrid_levels = na.zeros((num_ogrids,1), dtype='int32')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        ogrid_hilbert_indices = na.zeros(num_ogrids, dtype='uint64')
-        ochild_masks = na.zeros((num_ogrids, 8), dtype='int32')
+        ogrid_left_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_right_edge = np.zeros((num_ogrids,3), dtype='float64')
+        ogrid_levels = np.zeros((num_ogrids,1), dtype='int32')
+        ogrid_file_locations = np.zeros((num_ogrids,6), dtype='int64')
+        ogrid_hilbert_indices = np.zeros(num_ogrids, dtype='uint64')
+        ochild_masks = np.zeros((num_ogrids, 8), dtype='int32')
         self.tree_proxy.fill_hierarchy_arrays(
             self.pf.domain_dimensions,
             ogrid_left_edge, ogrid_right_edge,
@@ -180,7 +180,7 @@
             if level_info[level] == 0: continue
             # Get the indices of grids on this level
             ggi = (ogrid_levels == level).ravel()
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2 
+            dims = np.ones((ggi.sum(), 3), dtype='int64') * 2 
             mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
             nd = self.pf.domain_dimensions * 2**level
             fl = ogrid_file_locations[ggi,:]
@@ -189,7 +189,7 @@
             # We want grids that cover no more than MAX_EDGE cells in every direction
             psgs = []
             # left_index is integers of the index, with respect to this level
-            left_index = na.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
+            left_index = np.rint((ogrid_left_edge[ggi,:]) * nd / DW ).astype('int64')
             # we've got octs, so it's +2
             pbar = get_pbar("Re-gridding ", left_index.shape[0])
             dlp = [None, None, None]
@@ -203,18 +203,18 @@
             #print level, hilbert_indices.min(), hilbert_indices.max()
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
-            unique_indices = na.unique(hilbert_indices)
+            unique_indices = np.unique(hilbert_indices)
             mylog.debug("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
             for ddleft_index, ddfl in zip(lefts, locs):
-                for idomain in na.unique(ddfl[:,0]):
+                for idomain in np.unique(ddfl[:,0]):
                     dom_ind = ddfl[:,0] == idomain
                     dleft_index = ddleft_index[dom_ind,:]
                     dfl = ddfl[dom_ind,:]
-                    initial_left = na.min(dleft_index, axis=0)
-                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
                     psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
                                     dleft_index, dfl)
                     if psg.efficiency <= 0: continue
@@ -226,12 +226,12 @@
             pbar.finish()
             self.proto_grids.append(psgs)
             print sum(len(psg.grid_file_locations) for psg in psgs)
-            sums = na.zeros(3, dtype='int64')
+            sums = np.zeros(3, dtype='int64')
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
             #for g in self.proto_grids[level]:
             #    sums += [s.sum() for s in g.sigs]
-            #assert(na.all(sums == dims.prod(axis=1).sum()))
+            #assert(np.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
     def _parse_hierarchy(self):
@@ -251,11 +251,11 @@
                 grids.append(self.grid(gi, self, level, fl, props[0,:]))
                 gi += 1
         self.proto_grids = []
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
 
     def _populate_grid_objects(self):
-        mask = na.empty(self.grids.size, dtype='int32')
+        mask = np.empty(self.grids.size, dtype='int32')
         print self.grid_levels.dtype
         for gi,g in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[gi,:],
@@ -346,10 +346,10 @@
         rheader = self.ramses_tree.get_file_info()
         self.parameters.update(rheader)
         self.current_time = self.parameters['time'] * self.parameters['unit_t']
-        self.domain_right_edge = na.ones(3, dtype='float64') \
+        self.domain_right_edge = np.ones(3, dtype='float64') \
                                            * rheader['boxlen']
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_dimensions = na.ones(3, dtype='int32') * 2
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_dimensions = np.ones(3, dtype='int32') * 2
         # This is likely not true, but I am not sure how to otherwise
         # distinguish them.
         mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,7 +24,7 @@
 """
 
 from collections import defaultdict
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -38,8 +38,8 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
 
     def _read_data_set(self, grid, field):
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
+        tr = np.zeros(grid.ActiveDimensions, dtype='float64')
+        filled = np.zeros(grid.ActiveDimensions, dtype='int32')
         to_fill = grid.ActiveDimensions.prod()
         grids = [grid]
         l_delta = 0


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -24,7 +24,7 @@
 """
 
 import weakref
-import numpy as na
+import numpy as np
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
@@ -71,7 +73,7 @@
         my_ind = self.id - self._id_offset
         le = self.LeftEdge
         self.dds = self.Parent.dds/rf
-        ParentLeftIndex = na.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
+        ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)
         self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')
         self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex
         self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds
@@ -152,7 +154,6 @@
             self.pf.field_info.add_field(
                     field, lambda a, b: None,
                     convert_function=cf, take_log=False)
-            
 
     def _parse_hierarchy(self):
         self.grid_dimensions = self.stream_handler.dimensions
@@ -180,7 +181,7 @@
             self._reconstruct_parent_child()
         self.max_level = self.grid_levels.max()
         mylog.debug("Preparing grids")
-        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids = np.empty(self.num_grids, dtype='object')
         for i, grid in enumerate(self.grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid.filename = None
@@ -191,7 +192,7 @@
         mylog.debug("Prepared")
 
     def _reconstruct_parent_child(self):
-        mask = na.empty(len(self.grids), dtype='int32')
+        mask = np.empty(len(self.grids), dtype='int32')
         mylog.debug("First pass; identifying child grids")
         for i, grid in enumerate(self.grids):
             get_box_grids_level(self.grid_left_edge[i,:],
@@ -199,7 +200,7 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = na.where(mask.astype("bool"))
+            ids = np.where(mask.astype("bool"))
             grid._children_ids = ids[0] # where is a tuple
         mylog.debug("Second pass; identifying parents")
         for i, grid in enumerate(self.grids): # Second pass
@@ -208,7 +209,7 @@
 
     def _initialize_grid_arrays(self):
         AMRHierarchy._initialize_grid_arrays(self)
-        self.grid_procs = na.zeros((self.num_grids,1),'int32')
+        self.grid_procs = np.zeros((self.num_grids,1),'int32')
 
     def save_data(self, *args, **kwargs):
         pass
@@ -224,7 +225,7 @@
                             pf = self.parameter_file)
             except:
                 continue
-            available = na.all([f in self.field_list for f in fd.requested])
+            available = np.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -296,8 +297,8 @@
     @property
     def all_fields(self): return self[0].keys()
 
-def load_uniform_grid(data, domain_dimensions, domain_size_in_cm,
-                      sim_time=0.0, number_of_particles=0):
+def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                      nprocs=1, sim_time=0.0, number_of_particles=0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -313,55 +314,66 @@
     ----------
     data : dict
         This is a dict of numpy arrays, where the keys are the field names.
-    domain_dimensiosn : array_like
+    domain_dimensions : array_like
         This is the domain dimensions of the grid
-    domain_size_in_cm : float
-        The size of the domain, in centimeters
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    nprocs: integer, optional
+        If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
     number_of_particles : int, optional
         If particle fields are included, set this to the number of particles
-        
+
     Examples
     --------
 
-    >>> arr = na.random.random((256, 256, 256))
+    >>> arr = np.random.random((128, 128, 129))
     >>> data = dict(Density = arr)
-    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
-                
+    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+    >>> pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
     """
+
+    domain_dimensions = np.array(domain_dimensions)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
     sfh = StreamDictFieldHandler()
-    sfh.update({0:data})
-    domain_dimensions = na.array(domain_dimensions)
-    if na.unique(domain_dimensions).size != 1:
-        print "We don't support variably sized domains yet."
-        raise RuntimeError
-    domain_left_edge = na.zeros(3, 'float64')
-    domain_right_edge = na.ones(3, 'float64')
-    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
-    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
 
-    grid_levels = na.array([0], dtype='int32').reshape((1,1))
-    grid_dimensions = grid_right_edges - grid_left_edges
-
-    grid_left_edges  = grid_left_edges.astype("float64")
-    grid_left_edges /= domain_dimensions*2**grid_levels
-    grid_left_edges *= domain_right_edge - domain_left_edge
-    grid_left_edges += domain_left_edge
-
-    grid_right_edges  = grid_right_edges.astype("float64")
-    grid_right_edges /= domain_dimensions*2**grid_levels
-    grid_right_edges *= domain_right_edge - domain_left_edge
-    grid_right_edges += domain_left_edge
+    if nprocs > 1:
+        temp = {}
+        new_data = {}
+        for key in data.keys():
+            psize = get_psize(np.array(data[key].shape), nprocs)
+            grid_left_edges, grid_right_edges, temp[key] = \
+                decompose_array(data[key], psize, bbox)
+            grid_dimensions = np.array([grid.shape for grid in temp[key]])
+        for gid in range(nprocs):
+            new_data[gid] = {}
+            for key in temp.keys():
+                new_data[gid].update({key:temp[key][gid]})
+        sfh.update(new_data)
+        del new_data, temp
+    else:
+        sfh.update({0:data})
+        grid_left_edges = domain_left_edge
+        grid_right_edges = domain_right_edge
+        grid_dimensions = domain_dimensions.reshape(nprocs,3)
 
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
-        na.array([-1], dtype='int64'),
-        number_of_particles*na.ones(1, dtype='int64').reshape((1,1)),
-        na.zeros(1).reshape((1,1)),
+        -np.ones(nprocs, dtype='int64'),
+        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
     )
 
@@ -375,10 +387,10 @@
     handler.cosmology_simulation = 0
 
     spf = StreamStaticOutput(handler)
-    spf.units["cm"] = domain_size_in_cm
+    spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0
-    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -44,15 +44,15 @@
         self.RightEdge = right_edge
         self.Level = 0
         self.NumberOfParticles = 0
-        self.left_dims = na.array(left_dims, dtype='int32')
-        self.right_dims = na.array(right_dims, dtype='int32')
+        self.left_dims = np.array(left_dims, dtype='int32')
+        self.right_dims = np.array(right_dims, dtype='int32')
         self.ActiveDimensions = self.right_dims - self.left_dims
         self.Parent = None
         self.Children = []
 
     @property
     def child_mask(self):
-        return na.ones(self.ActiveDimensions, dtype='int32')
+        return np.ones(self.ActiveDimensions, dtype='int32')
 
     def __repr__(self):
         return "TigerGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -70,7 +70,7 @@
         # Tiger is unigrid
         self.ngdims = [i/j for i,j in
                 izip(self.pf.root_size, self.pf.max_grid_size)]
-        self.num_grids = na.prod(self.ngdims)
+        self.num_grids = np.prod(self.ngdims)
         self.max_level = 0
 
     def _setup_classes(self):
@@ -87,18 +87,18 @@
         DW = DRE - DLE
         gds = DW / self.ngdims
         rd = [self.pf.root_size[i]-self.pf.max_grid_size[i] for i in range(3)]
-        glx, gly, glz = na.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
+        glx, gly, glz = np.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
                                  DLE[1]:DRE[1]-gds[1]:self.ngdims[1]*1j,
                                  DLE[2]:DRE[2]-gds[2]:self.ngdims[2]*1j]
-        gdx, gdy, gdz = na.mgrid[0:rd[0]:self.ngdims[0]*1j,
+        gdx, gdy, gdz = np.mgrid[0:rd[0]:self.ngdims[0]*1j,
                                  0:rd[1]:self.ngdims[1]*1j,
                                  0:rd[2]:self.ngdims[2]*1j]
         LE, RE, levels, counts = [], [], [], []
         i = 0
         for glei, gldi in izip(izip(glx.flat, gly.flat, glz.flat),
                                izip(gdx.flat, gdy.flat, gdz.flat)):
-            gld = na.array(gldi)
-            gle = na.array(glei)
+            gld = np.array(gldi)
+            gle = np.array(glei)
             gre = gle + gds
             g = self.grid(i, self, gle, gre, gld, gld+self.pf.max_grid_size)
             grids.append(g)
@@ -108,13 +108,13 @@
             levels.append(g.Level)
             counts.append(g.NumberOfParticles)
             i += 1
-        self.grids = na.empty(len(grids), dtype='object')
+        self.grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
-        self.grid_dimensions[:] = na.array(dims, dtype='int64')
-        self.grid_left_edge[:] = na.array(LE, dtype='float64')
-        self.grid_right_edge[:] = na.array(RE, dtype='float64')
-        self.grid_levels.flat[:] = na.array(levels, dtype='int32')
-        self.grid_particle_count.flat[:] = na.array(counts, dtype='int32')
+        self.grid_dimensions[:] = np.array(dims, dtype='int64')
+        self.grid_left_edge[:] = np.array(LE, dtype='float64')
+        self.grid_right_edge[:] = np.array(RE, dtype='float64')
+        self.grid_levels.flat[:] = np.array(levels, dtype='int32')
+        self.grid_particle_count.flat[:] = np.array(counts, dtype='int32')
 
     def _populate_grid_objects(self):
         # We don't need to do anything here
@@ -186,8 +186,8 @@
         self.parameters['RefineBy'] = 2
 
     def _set_units(self):
-        self.domain_left_edge = na.zeros(3, dtype='float64')
-        self.domain_right_edge = na.ones(3, dtype='float64')
+        self.domain_left_edge = np.zeros(3, dtype='float64')
+        self.domain_right_edge = np.ones(3, dtype='float64')
         self.units = {}
         self.time_units = {}
         self.time_units['1'] = 1


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/frontends/tiger/io.py
--- a/yt/frontends/tiger/io.py
+++ b/yt/frontends/tiger/io.py
@@ -36,17 +36,17 @@
 
     def _read_data_set(self, grid, field):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64')
-        SS = na.array(grid.ActiveDimensions, dtype='int64')
-        RS = na.array(grid.pf.root_size, dtype='int64')
+        LD = np.array(grid.left_dims, dtype='int64')
+        SS = np.array(grid.ActiveDimensions, dtype='int64')
+        RS = np.array(grid.pf.root_size, dtype='int64')
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
         return data
 
     def _read_data_slice(self, grid, field, axis, coord):
         fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
-        LD = na.array(grid.left_dims, dtype='int64').copy()
-        SS = na.array(grid.ActiveDimensions, dtype='int64').copy()
-        RS = na.array(grid.pf.root_size, dtype='int64').copy()
+        LD = np.array(grid.left_dims, dtype='int64').copy()
+        SS = np.array(grid.ActiveDimensions, dtype='int64').copy()
+        RS = np.array(grid.pf.root_size, dtype='int64').copy()
         LD[axis] += coord
         SS[axis] = 1
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/opengl_widgets/mip_viewer.py
--- a/yt/gui/opengl_widgets/mip_viewer.py
+++ b/yt/gui/opengl_widgets/mip_viewer.py
@@ -31,7 +31,7 @@
 import OpenGL.GL.ARB.framebuffer_object as GL_fbo
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 from small_apps import ViewHandler3D, GenericGLUTScene
@@ -85,8 +85,8 @@
                     yield s[v][i]
 
     def _get_texture_vertices(self):
-        vs = [na.zeros(3, dtype='float32'),
-              na.ones(3, dtype='float32')]
+        vs = [np.zeros(3, dtype='float32'),
+              np.ones(3, dtype='float32')]
         #vs.reverse()
         for b in self.hv.bricks:
             shape = b.my_data[0].shape
@@ -126,7 +126,7 @@
 
         DW = self.hv.pf.domain_right_edge - self.hv.pf.domain_left_edge
         dds = ((brick.RightEdge - brick.LeftEdge) /
-               (na.array([ix,iy,iz], dtype='float32')-1)) / DW
+               (np.array([ix,iy,iz], dtype='float32')-1)) / DW
         BLE = brick.LeftEdge / DW - 0.5
         self._brick_textures.append(
             (id_field, (ix-1,iy-1,iz-1), dds, BLE))
@@ -135,7 +135,7 @@
 
     def _setup_colormap(self):
 
-        buffer = na.mgrid[0.0:1.0:256j]
+        buffer = np.mgrid[0.0:1.0:256j]
         colors = map_to_colors(buffer, "algae")
         
         GL.glActiveTexture(GL.GL_TEXTURE1)
@@ -165,17 +165,17 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(hv.bricks) * 6 * 4
-        self.v = na.fromiter(self._get_brick_vertices(offset),
+        self.v = np.fromiter(self._get_brick_vertices(offset),
                              dtype = 'float32', count = num * 3)
         self.vertices = vbo.VBO(self.v)
 
-        self.t = na.fromiter(self._get_texture_vertices(),
+        self.t = np.fromiter(self._get_texture_vertices(),
                              dtype = 'float32', count = num * 3)
         self.tvertices = vbo.VBO(self.t)
 
         self.ng = len(hv.bricks)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_bricks()
@@ -373,8 +373,8 @@
 
     def reset_view(self):   
         print "RESETTING"
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float') + 30
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float') + 30
         self.position[2] = -2 # Offset backwards a bit
 
     def translate(self, axis, value):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/opengl_widgets/small_apps.py
--- a/yt/gui/opengl_widgets/small_apps.py
+++ b/yt/gui/opengl_widgets/small_apps.py
@@ -30,7 +30,7 @@
 from OpenGL.arrays import vbo, ArrayDatatype
 import Image
 import glob
-import numpy as na
+import numpy as np
 import time
 
 ESCAPE = '\033'
@@ -235,7 +235,7 @@
 
     @classmethod
     def from_image_file(cls, fn, tex_unit = GL.GL_TEXTURE0):
-        buffer = na.array(Image.open(fn))
+        buffer = np.array(Image.open(fn))
         print "Uploading buffer", buffer.min(), buffer.max(), buffer.shape, buffer.dtype
         obj = cls(tex_unit)
         obj.upload_image(buffer)
@@ -260,8 +260,8 @@
     @classmethod
     def from_image_files(cls, left_fn, right_fn, tex_unit = GL.GL_TEXTURE0):
         print "Uploading pairs from %s and %s" % (left_fn, right_fn)
-        left_buffer = na.array(Image.open(left_fn))
-        right_buffer = na.array(Image.open(right_fn))
+        left_buffer = np.array(Image.open(left_fn))
+        right_buffer = np.array(Image.open(right_fn))
         obj = cls(tex_unit)
         obj.left_image.upload_image(left_buffer)
         obj.right_image.upload_image(right_buffer)
@@ -294,7 +294,7 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
@@ -408,7 +408,7 @@
 
         GL.glActiveTexture(GL.GL_TEXTURE0)
         id_field = GL.glGenTextures(1)
-        upload = na.log10(grid["Density"].astype("float32")).copy()
+        upload = np.log10(grid["Density"].astype("float32")).copy()
         self.mi = min(upload.min(), self.mi)
         self.ma = max(upload.max(), self.ma)
         #upload = (255*(upload - -31.0) / (-25.0 - -31.0)).astype("uint8")
@@ -452,13 +452,13 @@
         GenericGLUTScene.__init__(self, 800, 800)
 
         num = len(pf.h.grids) * 6 * 4
-        self.v = na.fromiter(self._get_grid_vertices(offset),
+        self.v = np.fromiter(self._get_grid_vertices(offset),
                              dtype = 'float32', count = num * 3)
 
         self.vertices = vbo.VBO(self.v)
         self.ng = len(pf.h.grids)
-        self.position = na.zeros(3, dtype='float')
-        self.rotation = na.zeros(3, dtype='float')
+        self.position = np.zeros(3, dtype='float')
+        self.rotation = np.zeros(3, dtype='float')
         self.position[2] = -2 # Offset backwards a bit
 
         self._setup_grids()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -29,7 +29,7 @@
 import logging, threading
 import sys
 import urllib, urllib2
-import numpy as na
+import numpy as np
 
 from yt.utilities.bottle import \
     server_names, debug, route, run, request, ServerAdapter, response
@@ -134,7 +134,7 @@
         bp['binary'] = []
         for bkey in bkeys:
             bdata = bp.pop(bkey) # Get the binary data
-            if isinstance(bdata, na.ndarray):
+            if isinstance(bdata, np.ndarray):
                 bdata = bdata.tostring()
             bpserver = BinaryDelivery(bdata, bkey)
             self.binary_payloads.append(bpserver)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -30,7 +30,7 @@
 import cStringIO
 import logging
 import uuid
-import numpy as na
+import numpy as np
 import time
 import urllib
 import urllib2


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import os
-import numpy as na
+import numpy as np
 import zipfile
 import sys
 
@@ -92,9 +92,9 @@
                                     dd*DW[0] / (64*256),
                                     dd*DW[0])
         if self.pf.field_info[self.field].take_log:
-            cmi = na.log10(cmi)
-            cma = na.log10(cma)
-            to_plot = apply_colormap(na.log10(frb[self.field]), color_bounds = (cmi, cma))
+            cmi = np.log10(cmi)
+            cma = np.log10(cma)
+            to_plot = apply_colormap(np.log10(frb[self.field]), color_bounds = (cmi, cma))
         else:
             to_plot = apply_colormap(frb[self.field], color_bounds = (cmi, cma))
         rv = write_png_to_string(to_plot)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/pyro_queue.py
--- a/yt/gui/reason/pyro_queue.py
+++ b/yt/gui/reason/pyro_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/widget_builders.py
--- a/yt/gui/reason/widget_builders.py
+++ b/yt/gui/reason/widget_builders.py
@@ -35,7 +35,7 @@
         self._tf = tf
 
         self.center = self.pf.domain_center
-        self.normal_vector = na.array([0.7,1.0,0.3])
+        self.normal_vector = np.array([0.7,1.0,0.3])
         self.north_vector = [0.,0.,1.]
         self.steady_north = True
         self.fields = ['Density']
@@ -54,7 +54,7 @@
             roi = self.pf.h.region(self.center, self.center-self.width, self.center+self.width)
             self.mi, self.ma = roi.quantities['Extrema'](self.fields[0])[0]
             if self.log_fields[0]:
-                self.mi, self.ma = na.log10(self.mi), na.log10(self.ma)
+                self.mi, self.ma = np.log10(self.mi), np.log10(self.ma)
 
         self._tf = ColorTransferFunction((self.mi-2, self.ma+2), nbins=nbins)
 
@@ -87,10 +87,10 @@
     dd = pf.h.all_data()
     if value is None or rel_val:
         if value is None: value = 0.5
-        mi, ma = na.log10(dd.quantities["Extrema"]("Density")[0])
+        mi, ma = np.log10(dd.quantities["Extrema"]("Density")[0])
         value = 10.0**(value*(ma - mi) + mi)
     vert = dd.extract_isocontours("Density", value)
-    na.multiply(vert, 100, vert)
+    np.multiply(vert, 100, vert)
     return vert
 
 def get_streamlines(pf):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -70,7 +70,7 @@
         if onmax: 
             center = pf.h.find_max('Density')[1]
         else:
-            center = na.array(center)
+            center = np.array(center)
         axis = inv_axis_names[axis.lower()]
         coord = center[axis]
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
@@ -203,7 +203,7 @@
     def _pf_info(self):
         tr = {}
         for k, v in self.pf._mrep._attrs.items():
-            if isinstance(v, na.ndarray):
+            if isinstance(v, np.ndarray):
                 tr[k] = v.tolist()
             else:
                 tr[k] = v
@@ -237,9 +237,9 @@
     def deliver_isocontour(self, field, value, rel_val = False):
         ph = PayloadHandler()
         vert = get_isocontour(self.pf, field, value, rel_val)
-        normals = na.empty(vert.shape)
+        normals = np.empty(vert.shape)
         for i in xrange(vert.shape[0]/3):
-            n = na.cross(vert[i*3,:], vert[i*3+1,:])
+            n = np.cross(vert[i*3,:], vert[i*3+1,:])
             normals[i*3:i*3+3,:] = n[None,:]
         ph.widget_payload(self, {'ptype':'isocontour',
                                  'binary': ['vert', 'normals'],
@@ -260,20 +260,20 @@
         # Assume that path comes in as a list of matrice
         # Assume original vector is (0., 0., 1.), up is (0., 1., 0.)
         
-        views = [na.array(view).transpose() for view in views]
+        views = [np.array(view).transpose() for view in views]
 
-        times = na.linspace(0.0,1.0,len(times))
+        times = np.linspace(0.0,1.0,len(times))
                 
         # This is wrong.
-        reflect = na.array([[1,0,0],[0,1,0],[0,0,-1]])
+        reflect = np.array([[1,0,0],[0,1,0],[0,0,-1]])
 
-        rots = na.array([R[0:3,0:3] for R in views])
+        rots = np.array([R[0:3,0:3] for R in views])
 
-        rots = na.array([na.dot(reflect,rot) for rot in rots])
+        rots = np.array([np.dot(reflect,rot) for rot in rots])
 
-        centers = na.array([na.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
+        centers = np.array([np.dot(rot,R[0:3,3]) for R,rot in zip(views,rots)])
 
-        ups = na.array([na.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
+        ups = np.array([np.dot(rot,R[0:3,1]) for R,rot in zip(views,rots)])
 
         #print 'views'
         #for view in views: print view
@@ -284,12 +284,12 @@
         #print 'ups'
         #for up in ups: print up
 
-        pos = na.empty((N,3), dtype="float64")
-        uv = na.empty((N,3), dtype="float64")
-        f = na.zeros((N,3), dtype="float64")
+        pos = np.empty((N,3), dtype="float64")
+        uv = np.empty((N,3), dtype="float64")
+        f = np.zeros((N,3), dtype="float64")
         for i in range(3):
-            pos[:,i] = create_spline(times, centers[:,i], na.linspace(0.0,1.0,N))
-            uv[:,i] = create_spline(times, ups[:,i], na.linspace(0.0,1.0,N))
+            pos[:,i] = create_spline(times, centers[:,i], np.linspace(0.0,1.0,N))
+            uv[:,i] = create_spline(times, ups[:,i], np.linspace(0.0,1.0,N))
     
         path = [pos.tolist(), f.tolist(), uv.tolist()]
     


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -33,6 +33,7 @@
 # First module imports
 import sys, types, os, glob, cPickle, time
 import numpy as na # For historical reasons
+import numpy as np # For modern purposes
 import numpy # In case anyone wishes to use it by name
 
 # This next item will handle most of the actual startup procedures, but it will
@@ -52,7 +53,7 @@
 if __level >= int(ytcfgDefaults["loglevel"]):
     # This won't get displayed.
     mylog.debug("Turning off NumPy error reporting")
-    na.seterr(all = 'ignore')
+    np.seterr(all = 'ignore')
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -25,7 +25,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.visualization.image_writer import write_image, write_bitmap
@@ -61,7 +61,7 @@
 def _rchild_id(id): return (id<<1) + 2
 def _parent_id(id): return (id-1)>>1
 
-steps = na.array([[-1, -1, -1],
+steps = np.array([[-1, -1, -1],
                   [-1, -1,  0],
                   [-1, -1,  1],
                   [-1,  0, -1],
@@ -319,31 +319,31 @@
         if l_max is None:
             self.l_max = self.pf.hierarchy.max_level+1
         else:
-            self.l_max = na.min([l_max,self.pf.hierarchy.max_level+1])
+            self.l_max = np.min([l_max,self.pf.hierarchy.max_level+1])
 
         if le is None:
             self.domain_left_edge = pf.domain_left_edge
         else:
-            self.domain_left_edge = na.array(le)
+            self.domain_left_edge = np.array(le)
 
         if re is None:
             self.domain_right_edge = pf.domain_right_edge
         else:
-            self.domain_right_edge = na.array(re)
+            self.domain_right_edge = np.array(re)
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
 
         levels = pf.hierarchy.get_levels()
         root_grids = levels.next()
         covering_grids = root_grids
-        vol_needed = na.prod(self.domain_right_edge-self.domain_left_edge)
+        vol_needed = np.prod(self.domain_right_edge-self.domain_left_edge)
 
         for i in range(self.pf.hierarchy.max_level):
-            root_l_data = na.clip(na.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
-            root_r_data = na.clip(na.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_l_data = np.clip(np.array([grid.LeftEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
+            root_r_data = np.clip(np.array([grid.RightEdge for grid in root_grids]),self.domain_left_edge, self.domain_right_edge)
             
-            vol = na.prod(root_r_data-root_l_data,axis=1).sum()
+            vol = np.prod(root_r_data-root_l_data,axis=1).sum()
             if vol >= vol_needed:
                 covering_grids = root_grids
                 root_grids = levels.next()
@@ -356,18 +356,18 @@
         self.domain_left_edge = ((self.domain_left_edge)/rgdds).astype('int64')*rgdds
         self.domain_right_edge = (((self.domain_right_edge)/rgdds).astype('int64')+1)*rgdds
 
-        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
-        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_left_edge = np.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = np.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
         
         self.my_l_corner = self.domain_left_edge
         self.my_r_corner = self.domain_right_edge
 
         #mylog.info('Making kd tree from le %s to %s'% (self.domain_left_edge, self.domain_right_edge))
         
-        root_l_data = na.array([grid.LeftEdge for grid in root_grids])
-        root_r_data = na.array([grid.RightEdge for grid in root_grids])
-        root_we_want = na.all(root_l_data < self.my_r_corner,axis=1)*\
-                       na.all(root_r_data > self.my_l_corner,axis=1)
+        root_l_data = np.array([grid.LeftEdge for grid in root_grids])
+        root_r_data = np.array([grid.RightEdge for grid in root_grids])
+        root_we_want = np.all(root_l_data < self.my_r_corner,axis=1)*\
+                       np.all(root_r_data > self.my_l_corner,axis=1)
         
         root_grids = root_grids[root_we_want]
 
@@ -550,7 +550,7 @@
         center cell (i,j,k) is ommitted.
         
         """
-        position = na.array(position)
+        position = np.array(position)
         grid = self.locate_brick(position).grid
         ci = ((position-grid.LeftEdge)/grid.dds).astype('int64')
         return self.locate_neighbors(grid,ci)
@@ -583,20 +583,20 @@
         center cell (i,j,k) is ommitted.
         
         """
-        ci = na.array(ci)
+        ci = np.array(ci)
         center_dds = grid.dds
-        position = grid.LeftEdge + (na.array(ci)+0.5)*grid.dds
-        grids = na.empty(26, dtype='object')
-        cis = na.empty([26,3], dtype='int64')
+        position = grid.LeftEdge + (np.array(ci)+0.5)*grid.dds
+        grids = np.empty(26, dtype='object')
+        cis = np.empty([26,3], dtype='int64')
         offs = 0.5*(center_dds + self.sdx)
 
         new_cis = ci + steps
-        in_grid = na.all((new_cis >=0)*
+        in_grid = np.all((new_cis >=0)*
                          (new_cis < grid.ActiveDimensions),axis=1)
         new_positions = position + steps*offs
         grids[in_grid] = grid
                 
-        get_them = na.argwhere(in_grid != True).ravel()
+        get_them = np.argwhere(in_grid != True).ravel()
         cis[in_grid] = new_cis[in_grid]
 
         if (in_grid != True).sum()>0:
@@ -668,7 +668,7 @@
                     dds = []
                     for i,field in enumerate(self.fields):
                         vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                        if self.log_fields[i]: vcd = na.log10(vcd)
+                        if self.log_fields[i]: vcd = np.log10(vcd)
                         dds.append(vcd)
                     current_saved_grids.append(current_node.grid)
                     current_vcds.append(dds)
@@ -677,7 +677,7 @@
                           current_node.li[1]:current_node.ri[1]+1,
                           current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
                 
-                if na.any(current_node.r_corner-current_node.l_corner == 0):
+                if np.any(current_node.r_corner-current_node.l_corner == 0):
                     current_node.brick = None
                 else:
                     current_node.brick = PartitionedGrid(current_node.grid.id, data,
@@ -686,8 +686,8 @@
                                                          current_node.dims.astype('int64'))
                 self.bricks.append(current_node.brick)
                 self.brick_dimensions.append(current_node.dims)
-        self.bricks = na.array(self.bricks)
-        self.brick_dimensions = na.array(self.brick_dimensions)
+        self.bricks = np.array(self.bricks)
+        self.brick_dimensions = np.array(self.brick_dimensions)
         del current_saved_grids, current_vcds
         self.bricks_loaded = True
 
@@ -701,7 +701,7 @@
             dds = []
             for i,field in enumerate(self.fields):
                 vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = na.log10(vcd)
+                if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(current_node.grid)
                 self.current_vcds.append(dds)
@@ -734,14 +734,14 @@
         dds = thisnode.grid.dds
         gle = thisnode.grid.LeftEdge
         gre = thisnode.grid.RightEdge
-        thisnode.li = na.rint((thisnode.l_corner-gle)/dds).astype('int32')
-        thisnode.ri = na.rint((thisnode.r_corner-gle)/dds).astype('int32')
+        thisnode.li = np.rint((thisnode.l_corner-gle)/dds).astype('int32')
+        thisnode.ri = np.rint((thisnode.r_corner-gle)/dds).astype('int32')
         thisnode.dims = (thisnode.ri - thisnode.li).astype('int32')
         # Here the cost is actually inversely proportional to 4**Level (empirical)
-        #thisnode.cost = (na.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
+        #thisnode.cost = (np.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
         thisnode.cost = 1.0
         # Here is the old way
-        # thisnode.cost = na.prod(thisnode.dims).astype('int64')
+        # thisnode.cost = np.prod(thisnode.dims).astype('int64')
 
     def initialize_leafs(self):
         for node in self.depth_traverse():
@@ -754,7 +754,7 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(self.comm.size))
+        par_tree_depth = long(np.log2(self.comm.size))
         for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
                 # There are self.comm.size nodes that meet this criteria
@@ -767,7 +767,7 @@
                 del node.grids
             except:
                 pass
-            if not na.isreal(node.grid):
+            if not np.isreal(node.grid):
                 node.grid = node.grid.id
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
@@ -942,7 +942,7 @@
         v = 0.0
         for node in self.depth_traverse():
             if node.grid is not None:
-                v += na.prod(node.r_corner - node.l_corner)
+                v += np.prod(node.r_corner - node.l_corner)
         return v
 
     def count_cells(self):
@@ -957,10 +957,10 @@
         Total volume of the tree.
         
         """
-        c = na.int64(0)
+        c = np.int64(0)
         for node in self.depth_traverse():
             if node.grid is not None:
-                c += na.prod(node.ri - node.li).astype('int64')
+                c += np.prod(node.ri - node.li).astype('int64')
         return c
 
     def _build(self, grids, parent, l_corner, r_corner):
@@ -994,12 +994,12 @@
         current_node.r_corner = r_corner
         # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(self.comm.size))
+        par_tree_depth = int(np.log2(self.comm.size))
         anprocs = 2**par_tree_depth
 
         volume_partitioned = 0.0
         pbar = get_pbar("Building kd-Tree",
-                na.prod(self.domain_right_edge-self.domain_left_edge))
+                np.prod(self.domain_right_edge-self.domain_left_edge))
 
         while current_node is not None:
             pbar.update(volume_partitioned)
@@ -1034,12 +1034,12 @@
                     if len(thisgrid.Children) > 0 and thisgrid.Level < self.l_max:
                         # Get the children that are actually in the current volume
                         children = [child.id - self._id_offset for child in thisgrid.Children  
-                                    if na.all(child.LeftEdge < current_node.r_corner) & 
-                                    na.all(child.RightEdge > current_node.l_corner)]
+                                    if np.all(child.LeftEdge < current_node.r_corner) & 
+                                    np.all(child.RightEdge > current_node.l_corner)]
 
                         # If we have children, get all the new grids, and keep building the tree
                         if len(children) > 0:
-                            current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
+                            current_node.grids = self.pf.hierarchy.grids[np.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
                             #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
@@ -1048,7 +1048,7 @@
                     # Else make a leaf node (brick container)
                     #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    volume_partitioned += na.prod(current_node.r_corner-current_node.l_corner)
+                    volume_partitioned += np.prod(current_node.r_corner-current_node.l_corner)
                     # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
@@ -1078,7 +1078,7 @@
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1089,7 +1089,7 @@
         left and right children.
         '''
 
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        data = np.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
 
@@ -1106,8 +1106,8 @@
         current_node.split_pos = split
         #less_ids0 = (data[:,0] < split)
         #greater_ids0 = (split < data[:,1])
-        #assert(na.all(less_ids0 == less_ids))
-        #assert(na.all(greater_ids0 == greater_ids))
+        #assert(np.all(less_ids0 == less_ids))
+        #assert(np.all(greater_ids0 == greater_ids))
 
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
@@ -1143,7 +1143,7 @@
             Position of the back center from which to start moving forward.
         front_center: array_like
             Position of the front center to which the traversal progresses.
-        image: na.array
+        image: np.array
             Image plane to contain resulting ray cast.
 
         Returns
@@ -1176,12 +1176,12 @@
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(self.comm.size))
+        rounds = int(np.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+self.comm.rank)
+        path = np.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1215,7 +1215,7 @@
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    ta = 1.0 - na.sum(self.image,axis=2)
+                    ta = 1.0 - np.sum(self.image,axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1237,8 +1237,8 @@
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
-                    #ta = na.exp(-na.sum(arr2,axis=2))
-                    ta = 1.0 - na.sum(arr2, axis=2)
+                    #ta = np.exp(-np.sum(arr2,axis=2))
+                    ta = 1.0 - np.sum(arr2, axis=2)
                     ta[ta<0.0] = 0.0
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1292,8 +1292,8 @@
                     self.bricks.append(node.brick)
                     self.brick_dimensions.append(node.dims)
 
-            self.bricks = na.array(self.bricks)
-            self.brick_dimensions = na.array(self.brick_dimensions)
+            self.bricks = np.array(self.bricks)
+            self.brick_dimensions = np.array(self.brick_dimensions)
 
             self.bricks_loaded=True
             f.close()
@@ -1333,12 +1333,12 @@
         raise NotImplementedError()
         f = h5py.File(fn,"w")
         Nkd = len(self.tree)
-        kd_l_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_r_corners = na.zeros( (Nkd, 3), dtype='float64')
-        kd_grids = na.zeros( (Nkd) )
-        kd_split_axs = na.zeros( (Nkd), dtype='int32')
-        kd_split_pos = na.zeros( (Nkd), dtype='float64')
-        kd_owners = na.zeros( (Nkd), dtype='int32')
+        kd_l_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_r_corners = np.zeros( (Nkd, 3), dtype='float64')
+        kd_grids = np.zeros( (Nkd) )
+        kd_split_axs = np.zeros( (Nkd), dtype='int32')
+        kd_split_pos = np.zeros( (Nkd), dtype='float64')
+        kd_owners = np.zeros( (Nkd), dtype='int32')
         f.create_group("/bricks")
         for i, tree_item in enumerate(self.tree.iteritems()):
             kdid = tree_item[0]
@@ -1369,17 +1369,17 @@
         f.close()
         
     def corners_to_line(self,lc, rc):
-        x = na.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
+        x = np.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
                        rc[0], rc[0], rc[0], rc[0], rc[0],
                        rc[0], lc[0], lc[0], rc[0],
                        rc[0], lc[0], lc[0] ])
         
-        y = na.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
+        y = np.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1], lc[1],
                        lc[1], lc[1], rc[1], rc[1],
                        rc[1], rc[1], lc[1] ])
         
-        z = na.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
+        z = np.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
                        lc[2], rc[2], rc[2], lc[2], lc[2],
                        rc[2], rc[2], rc[2], rc[2],
                        lc[2], lc[2], lc[2] ])


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ b/yt/utilities/answer_testing/hydro_tests.py
@@ -99,11 +99,11 @@
     field = None
 
     def run(self):
-        na.random.seed(4333)
-        start_point = na.random.random(self.pf.dimensionality) * \
+        np.random.seed(4333)
+        start_point = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
-        end_point   = na.random.random(self.pf.dimensionality) * \
+        end_point   = np.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -55,10 +55,10 @@
 
 class ArrayDelta(ValueDelta):
     def __repr__(self):
-        nabove = len(na.where(self.delta > self.acceptable)[0])
+        nabove = len(np.where(self.delta > self.acceptable)[0])
         return "ArrayDelta: Delta max of %s, acceptable of %s.\n" \
                "%d of %d points above the acceptable limit" % \
-               (na.nanmax(self.delta), self.acceptable, nabove,
+               (np.nanmax(self.delta), self.acceptable, nabove,
                 self.delta.size)
 
 class ShapeMismatch(RegressionTestException):
@@ -122,8 +122,8 @@
         """
         if a1.shape != a2.shape:
             raise ShapeMismatch(a1, a2)
-        delta = na.abs(a1 - a2).astype("float64")/(a1 + a2)
-        if na.nanmax(delta) > acceptable:
+        delta = np.abs(a1 - a2).astype("float64")/(a1 + a2)
+        if np.nanmax(delta) > acceptable:
             raise ArrayDelta(delta, acceptable)
         return True
 
@@ -134,7 +134,7 @@
         difference is greater than `acceptable` it is considered a failure and
         an appropriate exception is raised.
         """
-        delta = na.abs(v1 - v2)/(v1 + v2)
+        delta = np.abs(v1 - v2)/(v1 + v2)
         if delta > acceptable:
             raise ValueDelta(delta, acceptable)
         return True


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ b/yt/utilities/answer_testing/particle_tests.py
@@ -32,13 +32,13 @@
         # Tests to make sure there are no particle positions aren't changing
         # drastically. This is very unlikely to be a problem.
         all = self.pf.h.all_data()
-        min = na.empty(3,dtype='float64')
+        min = np.empty(3,dtype='float64')
         max = min.copy()
         dims = ["particle_position_x","particle_position_y",
             "particle_position_z"]
         for i in xrange(3):
-            min[i] = na.min(all[dims[i]])
-            max[i] = na.max(all[dims[i]])
+            min[i] = np.min(all[dims[i]])
+            max[i] = np.max(all[dims[i]])
         self.result = (min,max)
     
     def compare(self, old_result):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -152,7 +152,7 @@
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
                    action="store", type=str,
-                   dest="unit", default='unitary',
+                   dest="unit", default='1',
                    help="Desired units"),
     center  = dict(short="-c", long="--center",
                    action="store", type=float,
@@ -1212,7 +1212,7 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
         if args.axis == 4:
             axes = range(3)
         else:
@@ -1266,12 +1266,12 @@
             v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
-        center = na.array(center)
+        center = np.array(center)
 
         L = args.viewpoint
         if L is None:
             L = [1.]*3
-        L = na.array(args.viewpoint)
+        L = np.array(args.viewpoint)
 
         unit = args.unit
         if unit is None:
@@ -1302,7 +1302,7 @@
             roi = pf.h.region(center, center-width, center+width)
             mi, ma = roi.quantities['Extrema'](field)[0]
             if log:
-                mi, ma = na.log10(mi), na.log10(ma)
+                mi, ma = np.log10(mi), np.log10(ma)
         else:
             mi, ma = myrange[0], myrange[1]
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 c_kms = 2.99792458e5 # c in km/s
 G = 6.67259e-8 # cgs
@@ -49,40 +49,40 @@
 
     def ComovingTransverseDistance(self,z_i,z_f):
          if (self.OmegaCurvatureNow > 0):
-             return (self.HubbleDistance() / na.sqrt(self.OmegaCurvatureNow) * 
-                     na.sinh(na.sqrt(self.OmegaCurvatureNow) * 
+             return (self.HubbleDistance() / np.sqrt(self.OmegaCurvatureNow) * 
+                     np.sinh(np.sqrt(self.OmegaCurvatureNow) * 
                           self.ComovingRadialDistance(z_i,z_f) / 
                           self.HubbleDistance()))
          elif (self.OmegaCurvatureNow < 0):
-             return (self.HubbleDistance() / na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
-                     sin(na.sqrt(na.fabs(self.OmegaCurvatureNow)) * 
+             return (self.HubbleDistance() / np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
+                     sin(np.sqrt(np.fabs(self.OmegaCurvatureNow)) * 
                          self.ComovingRadialDistance(z_i,z_f) / self.HubbleDistance()))
          else:
              return self.ComovingRadialDistance(z_i,z_f)
 
     def ComovingVolume(self,z_i,z_f):
         if (self.OmegaCurvatureNow > 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / self.OmegaCurvatureNow * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      ana.sinh(na.fabs(self.OmegaCurvatureNow) * 
+                      anp.sinh(np.fabs(self.OmegaCurvatureNow) * 
                             self.ComovingTransverseDistance(z_i,z_f) / 
-                            self.HubbleDistance()) / na.sqrt(self.OmegaCurvatureNow)) / 1e9)
+                            self.HubbleDistance()) / np.sqrt(self.OmegaCurvatureNow)) / 1e9)
         elif (self.OmegaCurvatureNow < 0):
-             return (2 * na.pi * na.power(self.HubbleDistance(), 3) / 
-                     na.fabs(self.OmegaCurvatureNow) * 
+             return (2 * np.pi * np.power(self.HubbleDistance(), 3) / 
+                     np.fabs(self.OmegaCurvatureNow) * 
                      (self.ComovingTransverseDistance(z_i,z_f) / self.HubbleDistance() * 
-                      na.sqrt(1 + self.OmegaCurvatureNow * 
+                      np.sqrt(1 + self.OmegaCurvatureNow * 
                            sqr(self.ComovingTransverseDistance(z_i,z_f) / 
                                self.HubbleDistance())) - 
-                      asin(na.fabs(self.OmegaCurvatureNow) * 
+                      asin(np.fabs(self.OmegaCurvatureNow) * 
                            self.ComovingTransverseDistance(z_i,z_f) / 
                            self.HubbleDistance()) / 
-                      na.sqrt(na.fabs(self.OmegaCurvatureNow))) / 1e9)
+                      np.sqrt(np.fabs(self.OmegaCurvatureNow))) / 1e9)
         else:
-             return (4 * na.pi * na.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
+             return (4 * np.pi * np.power(self.ComovingTransverseDistance(z_i,z_f), 3) / 
                      3 / 1e9)
 
     def AngularDiameterDistance(self,z_i,z_f):
@@ -100,18 +100,18 @@
         return (romberg(self.AgeIntegrand,z,1000) / self.HubbleConstantNow * kmPerMpc)
 
     def AngularScale_1arcsec_kpc(self,z_i,z_f):
-        return (self.AngularDiameterDistance(z_i,z_f) / 648. * na.pi)
+        return (self.AngularDiameterDistance(z_i,z_f) / 648. * np.pi)
 
     def CriticalDensity(self,z):
-        return (3.0 / 8.0 / na.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
+        return (3.0 / 8.0 / np.pi * sqr(self.HubbleConstantNow / kmPerMpc) / G *
                 (self.OmegaLambdaNow + ((1 + z)**3.0) * self.OmegaMatterNow))
 
     def AgeIntegrand(self,z):
         return (1 / (z + 1) / self.ExpansionFactor(z))
 
     def ExpansionFactor(self,z):
-        return na.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
-                    self.OmegaCurvatureNow * na.sqrt(1 + z) + 
+        return np.sqrt(self.OmegaMatterNow * ((1 + z)**3.0) + 
+                    self.OmegaCurvatureNow * np.sqrt(1 + z) + 
                     self.OmegaLambdaNow)
 
     def InverseExpansionFactor(self,z):
@@ -162,8 +162,8 @@
         """
         # Changed 2.52e17 to 2.52e19 because H_0 is in km/s/Mpc, 
         # instead of 100 km/s/Mpc.
-        return 2.52e19 / na.sqrt(self.OmegaMatterNow) / \
-            self.HubbleConstantNow / na.power(1 + self.InitialRedshift,1.5)
+        return 2.52e19 / np.sqrt(self.OmegaMatterNow) / \
+            self.HubbleConstantNow / np.power(1 + self.InitialRedshift,1.5)
 
     def ComputeRedshiftFromTime(self,time):
         """
@@ -183,18 +183,18 @@
  
         # 1) For a flat universe with OmegaMatterNow = 1, it's easy.
  
-        if ((na.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
+        if ((np.fabs(self.OmegaMatterNow-1) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            a = na.power(time/self.InitialTime,2.0/3.0)
+            a = np.power(time/self.InitialTime,2.0/3.0)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
         #    Actually, this is a little tricky since we must solve an equation
-        #    of the form eta - na.sinh(eta) + x = 0..
+        #    of the form eta - np.sinh(eta) + x = 0..
  
         if ((self.OmegaMatterNow < 1) and 
             (self.OmegaLambdaNow < OMEGA_TOLERANCE)):
-            x = 2*TimeHubble0*na.power(1.0 - self.OmegaMatterNow, 1.5) / \
+            x = 2*TimeHubble0*np.power(1.0 - self.OmegaMatterNow, 1.5) / \
                 self.OmegaMatterNow;
  
             # Compute eta in a three step process, first from a third-order
@@ -203,12 +203,12 @@
             # eta.  This works well because parts 1 & 2 are an excellent approximation
             # when x is small and part 3 converges quickly when x is large. 
  
-            eta = na.power(6*x,1.0/3.0)                # part 1
-            eta = na.power(120*x/(20+eta*eta),1.0/3.0) # part 2
+            eta = np.power(6*x,1.0/3.0)                # part 1
+            eta = np.power(120*x/(20+eta*eta),1.0/3.0) # part 2
             for i in range(40):                      # part 3
                 eta_old = eta
-                eta = na.arcsinh(eta + x)
-                if (na.fabs(eta-eta_old) < ETA_TOLERANCE): 
+                eta = np.arcsinh(eta + x)
+                if (np.fabs(eta-eta_old) < ETA_TOLERANCE): 
                     break
                 if (i == 39):
                     print "No convergence after %d iterations." % i
@@ -216,7 +216,7 @@
             # Now use eta to compute the expansion factor (eq. 13-10, part 2).
  
             a = self.OmegaMatterNow/(2.0*(1.0 - self.OmegaMatterNow))*\
-                (na.cosh(eta) - 1.0)
+                (np.cosh(eta) - 1.0)
 
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
         #    Easy, but skip it for now.
@@ -228,10 +228,10 @@
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
+        if ((np.fabs(OmegaCurvatureNow) < OMEGA_TOLERANCE) and
             (self.OmegaLambdaNow > OMEGA_TOLERANCE)):
-            a = na.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
-                na.power(na.sinh(1.5 * na.sqrt(1.0 - self.OmegaMatterNow)*\
+            a = np.power(self.OmegaMatterNow / (1 - self.OmegaMatterNow),1.0/3.0) * \
+                np.power(np.sinh(1.5 * np.sqrt(1.0 - self.OmegaMatterNow)*\
                                      TimeHubble0),2.0/3.0)
 
 
@@ -249,29 +249,29 @@
         # 1) For a flat universe with OmegaMatterNow = 1, things are easy.
  
         if ((self.OmegaMatterNow == 1.0) and (self.OmegaLambdaNow == 0.0)):
-            TimeHubble0 = 2.0/3.0/na.power(1+z,1.5)
+            TimeHubble0 = 2.0/3.0/np.power(1+z,1.5)
  
         # 2) For OmegaMatterNow < 1 and OmegaLambdaNow == 0 see
         #    Peebles 1993, eq. 13-3, 13-10.
  
         if ((self.OmegaMatterNow < 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (na.sinh(eta) - eta)
+            eta = np.arccosh(1 + 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (np.sinh(eta) - eta)
  
         # 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
  
         if ((self.OmegaMatterNow > 1) and (self.OmegaLambdaNow == 0)):
-            eta = na.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
-            TimeHubble0 = self.OmegaMatterNow/(2*na.power(1.0-self.OmegaMatterNow, 1.5))*\
-                (eta - na.sin(eta))
+            eta = np.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+            TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
+                (eta - np.sin(eta))
  
         # 4) For flat universe, with non-zero OmegaLambdaNow, see eq. 13-20.
  
-        if ((na.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
-            TimeHubble0 = 2.0/3.0/na.sqrt(1-self.OmegaMatterNow)*\
-                na.arcsinh(na.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
-                               na.power(1+z,1.5))
+        if ((np.fabs(OmegaCurvatureNow) < 1.0e-3) and (self.OmegaLambdaNow != 0)):
+            TimeHubble0 = 2.0/3.0/np.sqrt(1-self.OmegaMatterNow)*\
+                np.arcsinh(np.sqrt((1-self.OmegaMatterNow)/self.OmegaMatterNow)/ \
+                               np.power(1+z,1.5))
   
         # Now convert from Time * H0 to time.
   


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/decompose.py
--- /dev/null
+++ b/yt/utilities/decompose.py
@@ -0,0 +1,156 @@
+"""
+Automagical cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Author: Artur Gawryszczak <gawrysz at gmail.com>
+Affiliation: PCSS
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+SIEVE_PRIMES = \
+    lambda l: l and l[:1] + SIEVE_PRIMES([n for n in l if n % l[0]])
+
+
+def decompose_to_primes(max_prime):
+    """ Decompose number into the primes """
+    for prime in SIEVE_PRIMES(range(2, max_prime)):
+        if prime * prime > max_prime:
+            break
+        while max_prime % prime == 0:
+            yield prime
+            max_prime /= prime
+    if max_prime > 1:
+        yield max_prime
+
+
+def decompose_array(arr, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = arr.shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    patches = split_array(arr, psize)
+    return grid_left_edges, grid_right_edges, patches
+
+
+def evaluate_domain_decomposition(n_d, pieces, ldom):
+    """ Evaluate longest to shortest edge ratio
+        BEWARE: lot's of magic here """
+    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
+    bsize = int(np.sum(
+        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    load_balance = float(np.product(n_d)) / \
+        (float(pieces) * np.product((n_d - 1) / ldom + 1))
+
+    # 0.25 is magic number
+    quality = load_balance / (1 + 0.25 * (bsize / ideal_bsize - 1.0))
+    # \todo add a factor that estimates lower cost when x-direction is
+    # not chopped too much
+    # \deprecated estimate these magic numbers
+    quality *= (1. - (0.001 * ldom[0] + 0.0001 * ldom[1]) / pieces)
+    if np.any(ldom > n_d):
+        quality = 0
+
+    return quality
+
+
+def factorize_number(pieces):
+    """ Return array consiting of prime, its power and number of different
+        decompositions in three dimensions for this prime
+    """
+    factors = [factor for factor in decompose_to_primes(pieces)]
+    temp = np.bincount(factors)
+    return np.array(
+        [(prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) / 2)
+         for prime in np.unique(factors)]
+    )
+
+
+def get_psize(n_d, pieces):
+    """ Calculate the best division of array into px*py*pz subarrays.
+        The goal is to minimize the ratio of longest to shortest edge
+        to minimize the amount of inter-process communication.
+    """
+    fac = factorize_number(pieces)
+    nfactors = len(fac[:, 2])
+    best = 0.0
+    while np.all(fac[:, 2] > 0):
+        ldom = np.ones(3, dtype=np.int)
+        for nfac in range(nfactors):
+            i = int(np.sqrt(0.25 + 2 * (fac[nfac, 2] - 1)) - 0.5)
+            k = fac[nfac, 2] - int(1 + i * (i + 1) / 2)
+            i = fac[nfac, 1] - i
+            j = fac[nfac, 1] - (i + k)
+            ldom *= fac[nfac, 0] ** np.array([i, j, k])
+
+        quality = evaluate_domain_decomposition(n_d, pieces, ldom)
+        if quality > best:
+            best = quality
+            p_size = ldom
+        # search for next unique combination
+        for j in range(nfactors):
+            if fac[j, 2] > 1:
+                fac[j, 2] -= 1
+                break
+            else:
+                if (j < nfactors - 1):
+                    fac[j, 2] = int((fac[j, 1] + 1) * (fac[j, 1] + 2) / 2)
+                else:
+                    fac[:, 2] = 0  # no more combinations to try
+
+    return p_size
+
+
+def split_array(tab, psize):
+    """ Split array into px*py*pz subarrays using internal numpy routine. """
+    temp = [np.array_split(array, psize[1], axis=1)
+            for array in np.array_split(tab, psize[2], axis=2)]
+    temp = [item for sublist in temp for item in sublist]
+    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
+    temp = [item for sublist in temp for item in sublist]
+    return temp
+
+
+if __name__ == "__main__":
+
+    NPROC = 12
+    ARRAY = np.zeros((128, 128, 129))
+    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
+    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
+
+    for idx in range(NPROC):
+        print LE[idx, :], RE[idx, :], DATA[idx].shape


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -1,6 +1,6 @@
 import os
 import weakref
-import numpy as na
+import numpy as np
 import h5py as h5
 from conversion_abc import *
 from glob import glob
@@ -55,11 +55,11 @@
             grid['domain'] = int(splitup[8].rstrip(','))
             self.current_time = grid['time']
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -94,12 +94,12 @@
         proc_names = glob(self.source_dir+'id*')
         #print 'Reading a dataset from %i Processor Files' % len(proc_names)
         N = len(proc_names)
-        grid_dims = na.empty([N,3],dtype='int64')
-        grid_left_edges = na.empty([N,3],dtype='float64')
-        grid_dds = na.empty([N,3],dtype='float64')
-        grid_levels = na.zeros(N,dtype='int64')
-        grid_parent_ids = -1*na.ones(N,dtype='int64')
-        grid_particle_counts = na.zeros([N,1],dtype='int64')
+        grid_dims = np.empty([N,3],dtype='int64')
+        grid_left_edges = np.empty([N,3],dtype='float64')
+        grid_dds = np.empty([N,3],dtype='float64')
+        grid_levels = np.zeros(N,dtype='int64')
+        grid_parent_ids = -1*np.ones(N,dtype='int64')
+        grid_particle_counts = np.zeros([N,1],dtype='int64')
 
         for i in range(N):
             if i == 0:
@@ -128,12 +128,12 @@
 
             if len(line) == 0: break
             
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
                 grid['dimensions'][grid['dimensions']==0]=1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             # Append all hierachy info before reading this grid's data
@@ -149,7 +149,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -159,8 +159,8 @@
 
         gles = grid_left_edges
         gdims = grid_dims
-        dle = na.min(gles,axis=0)
-        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        dle = np.min(gles,axis=0)
+        dre = np.max(gles+grid_dims*grid_dds,axis=0)
         glis = ((gles - dle)/grid_dds).astype('int64')
         gris = glis + gdims
 
@@ -183,17 +183,17 @@
 
         ## --------- Done with top level nodes --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = ddims
         pars_g.attrs['current_time'] = self.current_time
         pars_g.attrs['domain_left_edge'] = dle
         pars_g.attrs['domain_right_edge'] = dre
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(1)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(1)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         # pars_g.attrs['n_cells'] = grid['ncells']
@@ -224,18 +224,18 @@
                 splitup = line.strip().split()
 
                 if "DIMENSIONS" in splitup:
-                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    grid_dims = np.array(splitup[-3:]).astype('int')
                     line = f.readline()
                     continue
                 elif "CELL_DATA" in splitup:
                     grid_ncells = int(splitup[-1])
                     line = f.readline()
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         grid_dims -= 1
                         grid_dims[grid_dims==0]=1
-                    if na.prod(grid_dims) != grid_ncells:
+                    if np.prod(grid_dims) != grid_ncells:
                         print 'product of dimensions %i not equal to number of cells %i' % \
-                              (na.prod(grid_dims), grid_ncells)
+                              (np.prod(grid_dims), grid_ncells)
                         raise TypeError
                     break
                 else:
@@ -250,7 +250,7 @@
                     if not read_table:
                         line = f.readline() # Read the lookup table line
                         read_table = True
-                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
                     if i == 0:
                         self.fields.append(field)
                     # print 'writing field %s' % field
@@ -259,7 +259,7 @@
 
                 elif 'VECTORS' in splitup:
                     field = splitup[1]
-                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
                     data_x = data[0::3].reshape(grid_dims,order='F')
                     data_y = data[1::3].reshape(grid_dims,order='F')
                     data_z = data[2::3].reshape(grid_dims,order='F')
@@ -291,7 +291,7 @@
             if name in self.field_conversions.keys():
                 this_field.attrs['field_to_cgs'] = self.field_conversions[name]
             else:
-                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+                this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
             
 
     def convert(self, hierarchy=True, data=True):
@@ -327,11 +327,11 @@
         elif "Really" in splitup:
             grid['time'] = splitup[-1]
         elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
         elif "ORIGIN" in splitup:
-            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
         elif "SPACING" in splitup:
-            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+            grid['dds'] = np.array(splitup[-3:]).astype('float64')
         elif "CELL_DATA" in splitup:
             grid["ncells"] = int(splitup[-1])
         elif "SCALARS" in splitup:
@@ -365,19 +365,19 @@
             #    print line
 
             if len(line) == 0: break
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 grid['dimensions'] -= 1
-            if na.prod(grid['dimensions']) != grid['ncells']:
+            if np.prod(grid['dimensions']) != grid['ncells']:
                 print 'product of dimensions %i not equal to number of cells %i' % \
-                      (na.prod(grid['dimensions']), grid['ncells'])
+                      (np.prod(grid['dimensions']), grid['ncells'])
                 raise TypeError
 
             if grid['read_type'] is 'scalar':
                 grid[grid['read_field']] = \
-                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                    np.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
                 self.fields.append(grid['read_field'])
             elif grid['read_type'] is 'vector':
-                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                data = np.fromfile(f, dtype='>f4', count=3*grid['ncells'])
                 grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
                 grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
@@ -398,7 +398,7 @@
 
         ## --------- Begin level nodes --------- ##
         g = f.create_group('gridded_data_format')
-        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['format_version']=np.float32(1.0)
         g.attrs['data_software']='athena'
         data_g = f.create_group('data')
         field_g = f.create_group('field_types')
@@ -406,8 +406,8 @@
         pars_g = f.create_group('simulation_parameters')
 
         dle = grid['left_edge'] # True only in this case of one grid for the domain
-        gles = na.array([grid['left_edge']])
-        gdims = na.array([grid['dimensions']])
+        gles = np.array([grid['left_edge']])
+        gdims = np.array([grid['dimensions']])
         glis = ((gles - dle)/grid['dds']).astype('int64')
         gris = glis + gdims
 
@@ -416,18 +416,18 @@
         # grid_dimensions
         gdim = f.create_dataset('grid_dimensions',data=gdims)
 
-        levels = na.array([0]).astype('int64') # unigrid example
+        levels = np.array([0]).astype('int64') # unigrid example
         # grid_level
         level = f.create_dataset('grid_level',data=levels)
 
         ## ----------QUESTIONABLE NEXT LINE--------- ##
         # This data needs two dimensions for now. 
-        n_particles = na.array([[0]]).astype('int64')
+        n_particles = np.array([[0]]).astype('int64')
         #grid_particle_count
         part_count = f.create_dataset('grid_particle_count',data=n_particles)
 
         # Assume -1 means no parent.
-        parent_ids = na.array([-1]).astype('int64')
+        parent_ids = np.array([-1]).astype('int64')
         # grid_parent_id
         pids = f.create_dataset('grid_parent_id',data=parent_ids)
 
@@ -451,8 +451,8 @@
 
         ## --------- Attribute Tables --------- ##
 
-        pars_g.attrs['refine_by'] = na.int64(1)
-        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['refine_by'] = np.int64(1)
+        pars_g.attrs['dimensionality'] = np.int64(3)
         pars_g.attrs['domain_dimensions'] = grid['dimensions']
         try:
             pars_g.attrs['current_time'] = grid['time']
@@ -461,10 +461,10 @@
         pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
         pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
         pars_g.attrs['unique_identifier'] = 'athenatest'
-        pars_g.attrs['cosmological_simulation'] = na.int64(0)
-        pars_g.attrs['num_ghost_zones'] = na.int64(0)
-        pars_g.attrs['field_ordering'] = na.int64(0)
-        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+        pars_g.attrs['cosmological_simulation'] = np.int64(0)
+        pars_g.attrs['num_ghost_zones'] = np.int64(0)
+        pars_g.attrs['field_ordering'] = np.int64(0)
+        pars_g.attrs['boundary_conditions'] = np.int64([0]*6) # For Now
 
         # Extra pars:
         pars_g.attrs['n_cells'] = grid['ncells']
@@ -481,7 +481,7 @@
         if name in self.field_conversions.keys():
             this_field.attrs['field_to_cgs'] = self.field_conversions[name]
         else:
-            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            this_field.attrs['field_to_cgs'] = np.float64('1.0') # For Now
 
         # Add particle types
         # Nothing to do here


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -83,11 +83,11 @@
     g.attrs["unique_identifier"] = pf.unique_identifier
     g.attrs["cosmological_simulation"] = pf.cosmological_simulation
     # @todo: Where is this in the yt API?
-    #g.attrs["num_ghost_zones"] = pf...
+    g.attrs["num_ghost_zones"] = 0
     # @todo: Where is this in the yt API?
-    #g.attrs["field_ordering"] = pf...
+    g.attrs["field_ordering"] = 0
     # @todo: not yet supported by yt.
-    #g.attrs["boundary_conditions"] = pf...
+    g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')
 
     if pf.cosmological_simulation:
         g.attrs["current_redshift"] = pf.current_redshift
@@ -136,10 +136,12 @@
     # root datasets -- info about the grids
     ###
     f["grid_dimensions"] = pf.h.grid_dimensions
-    f["grid_left_index"] = pf.h.grid_left_edge
+    f["grid_left_index"] = np.array(
+            [g.get_global_startindex() for g in pf.h.grids]
+    ).reshape(pf.h.grid_dimensions.shape[0], 3)
     f["grid_level"] = pf.h.grid_levels
-    # @todo: Do we need to loop over the grids for this?
-    f["grid_parent_id"] = -1
+    # @todo: Fill with proper values
+    f["grid_parent_id"] = -np.ones(pf.h.grid_dimensions.shape[0])
     f["grid_particle_count"] = pf.h.grid_particle_count
 
     ###


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -24,7 +24,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import yt.utilities.lib as lib
@@ -35,23 +35,23 @@
         self.truncate = truncate
         x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
         return my_vals.reshape(orig_shape)
 
@@ -61,28 +61,28 @@
         self.truncate = truncate
         x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
         y_vals = data_object[self.y_name].ravel().astype('float64')
 
-        x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
-        y_i = (na.digitize(y_vals, self.y_bins) - 1).astype('int32')
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
+        x_i = (np.digitize(x_vals, self.x_bins) - 1).astype('int32')
+        y_i = (np.digitize(y_vals, self.y_bins) - 1).astype('int32')
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.BilinearlyInterpolate(self.table,
                                  x_vals, y_vals, self.x_bins, self.y_bins,
                                  x_i, y_i, my_vals)
@@ -94,9 +94,9 @@
         self.truncate = truncate
         x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = na.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = na.linspace(z0, z1, table.shape[2]).astype('float64')
+        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -104,23 +104,23 @@
         y_vals = data_object[self.y_name].ravel().astype('float64')
         z_vals = data_object[self.z_name].ravel().astype('float64')
 
-        x_i = na.digitize(x_vals, self.x_bins) - 1
-        y_i = na.digitize(y_vals, self.y_bins) - 1
-        z_i = na.digitize(z_vals, self.z_bins) - 1
-        if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
-            or na.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
-            or na.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
+        x_i = np.digitize(x_vals, self.x_bins) - 1
+        y_i = np.digitize(y_vals, self.y_bins) - 1
+        z_i = np.digitize(z_vals, self.z_bins) - 1
+        if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
+            or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
+            or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
             if not self.truncate:
                 mylog.error("Sorry, but your values are outside" + \
                             " the table!  Dunno what to do, so dying.")
                 mylog.error("Error was in: %s", data_object)
                 raise ValueError
             else:
-                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
-                y_i = na.minimum(na.maximum(y_i,0), len(self.y_bins)-2)
-                z_i = na.minimum(na.maximum(z_i,0), len(self.z_bins)-2)
+                x_i = np.minimum(np.maximum(x_i,0), len(self.x_bins)-2)
+                y_i = np.minimum(np.maximum(y_i,0), len(self.y_bins)-2)
+                z_i = np.minimum(np.maximum(z_i,0), len(self.z_bins)-2)
 
-        my_vals = na.zeros(x_vals.shape, dtype='float64')
+        my_vals = np.zeros(x_vals.shape, dtype='float64')
         lib.TrilinearlyInterpolate(self.table,
                                  x_vals, y_vals, z_vals,
                                  self.x_bins, self.y_bins, self.z_bins,
@@ -135,11 +135,11 @@
         xm = (self.x_bins[x_i+1] - x_vals) / (self.x_bins[x_i+1] - self.x_bins[x_i])
         ym = (self.y_bins[y_i+1] - y_vals) / (self.y_bins[y_i+1] - self.y_bins[y_i])
         zm = (self.z_bins[z_i+1] - z_vals) / (self.z_bins[z_i+1] - self.z_bins[z_i])
-        if na.any(na.isnan(self.table)):
+        if np.any(np.isnan(self.table)):
             raise ValueError
-        if na.any(na.isnan(x) | na.isnan(y) | na.isnan(z)):
+        if np.any(np.isnan(x) | np.isnan(y) | np.isnan(z)):
             raise ValueError
-        if na.any(na.isnan(xm) | na.isnan(ym) | na.isnan(zm)):
+        if np.any(np.isnan(xm) | np.isnan(ym) | np.isnan(zm)):
             raise ValueError
         my_vals  = self.table[x_i  ,y_i  ,z_i  ] * (xm*ym*zm)
         my_vals += self.table[x_i+1,y_i  ,z_i  ] * (x *ym*zm)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import math
 
 def periodic_dist(a, b, period):
@@ -48,20 +48,20 @@
 
     Examples
     --------
-    >>> a = na.array([0.1, 0.1, 0.1])
-    >>> b = na.array([0.9, 0,9, 0.9])
+    >>> a = np.array([0.1, 0.1, 0.1])
+    >>> b = np.array([0.9, 0,9, 0.9])
     >>> period = 1.
     >>> dist = periodic_dist(a, b, 1.)
     >>> dist
     0.3464102
     """
-    a = na.array(a)
-    b = na.array(b)
+    a = np.array(a)
+    b = np.array(b)
     if a.size != b.size: RunTimeError("Arrays must be the same shape.")
-    c = na.empty((2, a.size), dtype="float64")
+    c = np.empty((2, a.size), dtype="float64")
     c[0,:] = abs(a - b)
     c[1,:] = period - abs(a - b)
-    d = na.amin(c, axis=0)**2
+    d = np.amin(c, axis=0)**2
     return math.sqrt(d.sum())
 
 def rotate_vector_3D(a, dim, angle):
@@ -87,8 +87,8 @@
     
     Examples
     --------
-    >>> a = na.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
-    >>> b = rotate_vector_3D(a, 2, na.pi/2)
+    >>> a = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
+    >>> b = rotate_vector_3D(a, 2, np.pi/2)
     >>> print b
     [[  1.00000000e+00  -1.00000000e+00   0.00000000e+00]
     [  6.12323400e-17  -1.00000000e+00   1.00000000e+00]
@@ -100,27 +100,27 @@
     mod = False
     if len(a.shape) == 1:
         mod = True
-        a = na.array([a])
+        a = np.array([a])
     if a.shape[1] !=3:
         raise SyntaxError("The second dimension of the array a must be == 3!")
     if dim == 0:
-        R = na.array([[1, 0,0],
-            [0, na.cos(angle), na.sin(angle)],
-            [0, -na.sin(angle), na.cos(angle)]])
+        R = np.array([[1, 0,0],
+            [0, np.cos(angle), np.sin(angle)],
+            [0, -np.sin(angle), np.cos(angle)]])
     elif dim == 1:
-        R = na.array([[na.cos(angle), 0, -na.sin(angle)],
+        R = np.array([[np.cos(angle), 0, -np.sin(angle)],
             [0, 1, 0],
-            [na.sin(angle), 0, na.cos(angle)]])
+            [np.sin(angle), 0, np.cos(angle)]])
     elif dim == 2:
-        R = na.array([[na.cos(angle), na.sin(angle), 0],
-            [-na.sin(angle), na.cos(angle), 0],
+        R = np.array([[np.cos(angle), np.sin(angle), 0],
+            [-np.sin(angle), np.cos(angle), 0],
             [0, 0, 1]])
     else:
         raise SyntaxError("dim must be 0, 1, or 2!")
     if mod:
-        return na.dot(R, a.T).T[0]
+        return np.dot(R, a.T).T[0]
     else:
-        return na.dot(R, a.T).T
+        return np.dot(R, a.T).T
     
 
 def modify_reference_frame(CoM, L, P, V):
@@ -164,9 +164,9 @@
     
     Examples
     --------
-    >>> CoM = na.array([0.5, 0.5, 0.5])
-    >>> L = na.array([1, 0, 0])
-    >>> P = na.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
+    >>> CoM = np.array([0.5, 0.5, 0.5])
+    >>> L = np.array([1, 0, 0])
+    >>> P = np.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
     >>> V = p.copy()
     >>> LL, PP, VV = modify_reference_frame(CoM, L, P, V)
     >>> LL
@@ -183,7 +183,7 @@
            [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
 
     """
-    if (L == na.array([0, 0, 1.])).all():
+    if (L == np.array([0, 0, 1.])).all():
         # Whew! Nothing to do!
         return L, P, V
     # First translate the positions to center of mass reference frame.
@@ -191,7 +191,7 @@
     # Now find the angle between modified L and the x-axis.
     LL = L.copy()
     LL[2] = 0.
-    theta = na.arccos(na.inner(LL, [1.,0,0])/na.inner(LL,LL)**.5)
+    theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
     if L[1] < 0:
         theta = -theta
     # Now rotate all the position, velocity, and L vectors by this much around
@@ -200,7 +200,7 @@
     V = rotate_vector_3D(V, 2, theta)
     L = rotate_vector_3D(L, 2, theta)
     # Now find the angle between L and the z-axis.
-    theta = na.arccos(na.inner(L, [0,0,1])/na.inner(L,L)**.5)
+    theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
     # This time we rotate around the y axis.
     P = rotate_vector_3D(P, 1, theta)
     V = rotate_vector_3D(V, 1, theta)
@@ -241,10 +241,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> circV = compute_rotational_velocity(CoM, L, P, V)
     >>> circV
     array([ 1.        ,  0.        ,  0.        ,  1.41421356])
@@ -254,13 +254,13 @@
     L, P, V = modify_reference_frame(CoM, L, P, V)
     # Find the vector in the plane of the galaxy for each position point
     # that is perpendicular to the radial vector.
-    radperp = na.cross([0, 0, 1], P)
+    radperp = np.cross([0, 0, 1], P)
     # Find the component of the velocity along the radperp vector.
     # Unf., I don't think there's a better way to do this.
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rp in enumerate(radperp):
-        temp = na.dot(rp, V[i]) / na.dot(rp, rp) * rp
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rp, V[i]) / np.dot(rp, rp) * rp
+        res[i] = np.dot(temp, temp)**0.5
     return res
     
 def compute_parallel_velocity(CoM, L, P, V):
@@ -296,10 +296,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> paraV = compute_parallel_velocity(CoM, L, P, V)
     >>> paraV
     array([10, -1,  1, -1])
@@ -342,10 +342,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> radV = compute_radial_velocity(CoM, L, P, V)
     >>> radV
     array([ 1.        ,  1.41421356 ,  0.        ,  0.])
@@ -357,10 +357,10 @@
     # with the cylindrical radial vector for this point.
     # Unf., I don't think there's a better way to do this.
     P[:,2] = 0
-    res = na.empty(V.shape[0], dtype='float64')
+    res = np.empty(V.shape[0], dtype='float64')
     for i, rad in enumerate(P):
-        temp = na.dot(rad, V[i]) / na.dot(rad, rad) * rad
-        res[i] = na.dot(temp, temp)**0.5
+        temp = np.dot(rad, V[i]) / np.dot(rad, rad) * rad
+        res[i] = np.dot(temp, temp)**0.5
     return res
 
 def compute_cylindrical_radius(CoM, L, P, V):
@@ -396,10 +396,10 @@
     
     Examples
     --------
-    >>> CoM = na.array([0, 0, 0])
-    >>> L = na.array([0, 0, 1])
-    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
-    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> CoM = np.array([0, 0, 0])
+    >>> L = np.array([0, 0, 1])
+    >>> P = np.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = np.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
     >>> cyl_r = compute_cylindrical_radius(CoM, L, P, V)
     >>> cyl_r
     array([ 1.        ,  1.41421356,  0.        ,  1.41421356])
@@ -409,7 +409,7 @@
     # Demote all the positions to the z=0 plane, which makes the distance
     # calculation very easy.
     P[:,2] = 0
-    return na.sqrt((P * P).sum(axis=1))
+    return np.sqrt((P * P).sum(axis=1))
     
 def ortho_find(vec1):
     r"""Find two complementary orthonormal vectors to a given vector.
@@ -489,9 +489,9 @@
     >>> c
     array([-0.16903085,  0.84515425, -0.50709255])
     """
-    vec1 = na.array(vec1, dtype=na.float64)
+    vec1 = np.array(vec1, dtype=np.float64)
     # Normalize
-    norm = na.sqrt(na.vdot(vec1, vec1))
+    norm = np.sqrt(np.vdot(vec1, vec1))
     if norm == 0:
         raise ValueError("Zero vector used as input.")
     vec1 /= norm
@@ -513,9 +513,9 @@
         z2 = 0.0
         x2 = -(y1 / x1)
         norm2 = (1.0 + z2 ** 2.0) ** (0.5)
-    vec2 = na.array([x2,y2,z2])
+    vec2 = np.array([x2,y2,z2])
     vec2 /= norm2
-    vec3 = na.cross(vec1, vec2)
+    vec3 = np.cross(vec1, vec2)
     return vec1, vec2, vec3
 
 def quartiles(a, axis=None, out=None, overwrite_input=False):
@@ -570,7 +570,7 @@
 
     Examples
     --------
-    >>> a = na.arange(100).reshape(10,10)
+    >>> a = np.arange(100).reshape(10,10)
     >>> a
     array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9],
            [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
@@ -601,7 +601,7 @@
             a.sort(axis=axis)
             sorted = a
     else:
-        sorted = na.sort(a, axis=axis)
+        sorted = np.sort(a, axis=axis)
     if axis is None:
         axis = 0
     indexer = [slice(None)] * sorted.ndim
@@ -619,8 +619,8 @@
             indexer[axis] = slice(index, index+1)
         # Use mean in odd and even case to coerce data type
         # and check, use out array.
-        result.append(na.mean(sorted[indexer], axis=axis, out=out))
-    return na.array(result)
+        result.append(np.mean(sorted[indexer], axis=axis, out=out))
+    return np.array(result)
 
 def get_rotation_matrix(theta, rot_vector):
     """
@@ -656,20 +656,20 @@
     array([[ 0.70710678,  0.        ,  0.70710678],
            [ 0.        ,  1.        ,  0.        ],
            [-0.70710678,  0.        ,  0.70710678]])
-    >>> na.dot(rot,a)
+    >>> np.dot(rot,a)
     array([ 0.,  1.,  0.])
     # since a is an eigenvector by construction
-    >>> na.dot(rot,[1,0,0])
+    >>> np.dot(rot,[1,0,0])
     array([ 0.70710678,  0.        , -0.70710678])
     """
 
     ux = rot_vector[0]
     uy = rot_vector[1]
     uz = rot_vector[2]
-    cost = na.cos(theta)
-    sint = na.sin(theta)
+    cost = np.cos(theta)
+    sint = np.sin(theta)
     
-    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+    R = np.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
                   [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import abc
 import json
 import urllib2
@@ -97,10 +97,10 @@
         if hasattr(self, "_pf_mrep"):
             self._pf_mrep.upload()
         for i in metadata:
-            if isinstance(metadata[i], na.ndarray):
+            if isinstance(metadata[i], np.ndarray):
                 metadata[i] = metadata[i].tolist()
             elif hasattr(metadata[i], 'dtype'):
-                metadata[i] = na.asscalar(metadata[i])
+                metadata[i] = np.asscalar(metadata[i])
         metadata['obj_type'] = self.type
         if len(chunks) == 0:
             chunk_info = {'chunks': []}
@@ -129,7 +129,7 @@
         for i, (cn, cv) in enumerate(chunks):
             remaining = cv.size * cv.itemsize
             f = TemporaryFile()
-            na.save(f, cv)
+            np.save(f, cv)
             f.seek(0)
             pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i+1, len(chunks)))
             datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import get_rotation_matrix
@@ -52,31 +52,31 @@
            
         """
         self.steady_north = steady_north
-        if na.all(north_vector == normal_vector):
+        if np.all(north_vector == normal_vector):
             mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
             north_vector = None
         if north_vector is not None: self.steady_north = True
+        self.north_vector = north_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        normal_vector /= np.sqrt( np.dot(normal_vector, normal_vector))
         if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
+            vecs = np.identity(3)
+            t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
-            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            north_vector = na.cross(normal_vector, east_vector).ravel()
+            east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-            east_vector = na.cross(north_vector, normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+                north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
+            east_vector = np.cross(north_vector, normal_vector).ravel()
+        north_vector /= np.sqrt(np.dot(north_vector, north_vector))
+        east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
-        self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        self.inv_mat = np.linalg.pinv(self.unit_vectors)
         
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -73,7 +73,7 @@
             for g in self.grids:
                 for f in fields:
                     if f not in self.queue[g.id]:
-                        d = na.zeros(g.ActiveDimensions, dtype='float64')
+                        d = np.zeros(g.ActiveDimensions, dtype='float64')
                         self.queue[g.id][f] = d
                 for f in pfields:
                     self.queue[g.id][f] = self._read(g, f)
@@ -87,12 +87,12 @@
         fi = self.pf.field_info[f]
         if fi.particle_type and g.NumberOfParticles == 0:
             # because this gets upcast to float
-            return na.array([],dtype='float64')
+            return np.array([],dtype='float64')
         try:
             temp = self.pf.h.io._read_data_set(g, f)
         except:# self.pf.hierarchy.io._read_exception as exc:
             if fi.not_in_all:
-                temp = na.zeros(g.ActiveDimensions, dtype='float64')
+                temp = np.zeros(g.ActiveDimensions, dtype='float64')
             else:
                 raise
         return temp
@@ -137,9 +137,9 @@
         msg = dict(grid_id = grid.id, field = field, op="read")
         mylog.debug("Requesting %s for %s from %s", field, grid, dest)
         if self.pf.field_info[field].particle_type:
-            data = na.empty(grid.NumberOfParticles, 'float64')
+            data = np.empty(grid.NumberOfParticles, 'float64')
         else:
-            data = na.empty(grid.ActiveDimensions, 'float64')
+            data = np.empty(grid.ActiveDimensions, 'float64')
         hook = self.comm.comm.Irecv([data, MPI.DOUBLE], source = dest)
         self.comm.comm.send(msg, dest = dest, tag = YT_TAG_MESSAGE)
         mylog.debug("Waiting for data.")


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -27,7 +27,7 @@
 import cStringIO
 import itertools
 import logging
-import numpy as na
+import numpy as np
 import sys
 
 from yt.funcs import *
@@ -131,13 +131,13 @@
         # Note that we're doing this in advance, and with a simple means
         # of choosing them; more advanced methods will be explored later.
         if self._use_all:
-            self.my_obj_ids = na.arange(len(self._objs))
+            self.my_obj_ids = np.arange(len(self._objs))
         else:
             if not round_robin:
-                self.my_obj_ids = na.array_split(
-                                na.arange(len(self._objs)), self._skip)[self._offset]
+                self.my_obj_ids = np.array_split(
+                                np.arange(len(self._objs)), self._skip)[self._offset]
             else:
-                self.my_obj_ids = na.arange(len(self._objs))[self._offset::self._skip]
+                self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
         
     def __iter__(self):
         for gid in self.my_obj_ids:
@@ -421,14 +421,14 @@
             njobs, my_size)
         raise RuntimeError
     my_rank = my_communicator.rank
-    all_new_comms = na.array_split(na.arange(my_size), njobs)
+    all_new_comms = np.array_split(np.arange(my_size), njobs)
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
             break
     if parallel_capable:
         communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
-    obj_ids = na.arange(len(objects))
+    obj_ids = np.arange(len(objects))
 
     to_share = {}
     # If our objects object is slice-aware, like time series data objects are,
@@ -525,14 +525,14 @@
         #   cat
         #   join
         # data is selected to be of types:
-        #   na.ndarray
+        #   np.ndarray
         #   dict
         #   data field dict
         if datatype is not None:
             pass
         elif isinstance(data, types.DictType):
             datatype == "dict"
-        elif isinstance(data, na.ndarray):
+        elif isinstance(data, np.ndarray):
             datatype == "array"
         elif isinstance(data, types.ListType):
             datatype == "list"
@@ -549,14 +549,14 @@
             field_keys = data.keys()
             field_keys.sort()
             size = data[field_keys[0]].shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
@@ -581,16 +581,16 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = na.zeros(0, dtype=dtype) # This only works for
+                    data = np.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
-            sizes = na.zeros(self.comm.size, dtype='int64')
-            outsize = na.array(size, dtype='int64')
+            sizes = np.zeros(self.comm.size, dtype='int64')
+            outsize = np.array(size, dtype='int64')
             self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
-            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            offsets = np.add.accumulate(np.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
@@ -608,7 +608,7 @@
     def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
-        if isinstance(data, na.ndarray) and \
+        if isinstance(data, np.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
             if self.comm.rank == root:
                 info = (data.shape, data.dtype)
@@ -616,7 +616,7 @@
                 info = ()
             info = self.comm.bcast(info, root=root)
             if self.comm.rank != root:
-                data = na.empty(info[0], dtype=info[1])
+                data = np.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
             self.comm.Bcast([data, mpi_type], root = root)
             return data
@@ -636,7 +636,7 @@
     @parallel_passthrough
     def mpi_allreduce(self, data, dtype=None, op='sum'):
         op = op_names[op]
-        if isinstance(data, na.ndarray) and data.dtype != na.bool:
+        if isinstance(data, np.ndarray) and data.dtype != np.bool:
             if dtype is None:
                 dtype = data.dtype
             if dtype != data.dtype:
@@ -743,7 +743,7 @@
         return (obj._owner == self.comm.rank)
 
     def send_quadtree(self, target, buf, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
         self.comm.Send([sizebuf, MPI.LONG], dest=target)
         self.comm.Send([buf[0], MPI.INT], dest=target)
@@ -751,11 +751,11 @@
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
     def recv_quadtree(self, target, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
+        sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
-        buf = [na.empty((sizebuf[0],), 'int32'),
-               na.empty((sizebuf[0], args[2]),'float64'),
-               na.empty((sizebuf[0],),'float64')]
+        buf = [np.empty((sizebuf[0],), 'int32'),
+               np.empty((sizebuf[0], args[2]),'float64'),
+               np.empty((sizebuf[0],),'float64')]
         self.comm.Recv([buf[0], MPI.INT], source=target)
         self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
         self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
@@ -775,8 +775,8 @@
         sys.exit()
 
         args = qt.get_args() # Will always be the same
-        tgd = na.array([args[0], args[1]], dtype='int64')
-        sizebuf = na.zeros(1, 'int64')
+        tgd = np.array([args[0], args[1]], dtype='int64')
+        sizebuf = np.zeros(1, 'int64')
 
         while mask < size:
             if (mask & rank) != 0:
@@ -802,9 +802,9 @@
             sizebuf[0] = buf[0].size
         self.comm.Bcast([sizebuf, MPI.LONG], root=0)
         if rank != 0:
-            buf = [na.empty((sizebuf[0],), 'int32'),
-                   na.empty((sizebuf[0], args[2]),'float64'),
-                   na.empty((sizebuf[0],),'float64')]
+            buf = [np.empty((sizebuf[0],), 'int32'),
+                   np.empty((sizebuf[0], args[2]),'float64'),
+                   np.empty((sizebuf[0],),'float64')]
         self.comm.Bcast([buf[0], MPI.INT], root=0)
         self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
         self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
@@ -816,7 +816,7 @@
 
 
     def send_array(self, arr, dest, tag = 0):
-        if not isinstance(arr, na.ndarray):
+        if not isinstance(arr, np.ndarray):
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)
             return
@@ -830,7 +830,7 @@
         dt, ne = self.comm.recv(source=source, tag=tag)
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)
-        arr = na.empty(ne, dtype=dt)
+        arr = np.empty(ne, dtype=dt)
         tmp = arr.view(self.__tocast)
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
@@ -841,11 +841,11 @@
             for i in range(send.shape[0]):
                 recv.append(self.alltoallv_array(send[i,:].copy(), 
                                                  total_size, offsets, sizes))
-            recv = na.array(recv)
+            recv = np.array(recv)
             return recv
         offset = offsets[self.comm.rank]
         tmp_send = send.view(self.__tocast)
-        recv = na.empty(total_size, dtype=send.dtype)
+        recv = np.empty(total_size, dtype=send.dtype)
         recv[offset:offset+send.size] = send[:]
         dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
         roff = [off * dtr for off in offsets]
@@ -867,7 +867,7 @@
 
 communication_system = CommunicationSystem()
 if parallel_capable:
-    ranks = na.arange(MPI.COMM_WORLD.size)
+    ranks = np.arange(MPI.COMM_WORLD.size)
     communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
@@ -926,13 +926,13 @@
         xax, yax = x_dict[axis], y_dict[axis]
         cc = MPI.Compute_dims(self.comm.size, 2)
         mi = self.comm.rank
-        cx, cy = na.unravel_index(mi, cc)
-        x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
+        cx, cy = np.unravel_index(mi, cc)
+        x = np.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
 
         DLE, DRE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        LE = na.ones(3, dtype='float64') * DLE
-        RE = na.ones(3, dtype='float64') * DRE
+        LE = np.ones(3, dtype='float64') * DLE
+        RE = np.ones(3, dtype='float64') * DRE
         LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
         RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
         LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
@@ -943,7 +943,7 @@
         return True, reg
 
     def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
-        LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
+        LE, RE = np.array(ds.left_edge), np.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
         if (LE == self.pf.domain_left_edge).all() and \
@@ -973,13 +973,13 @@
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \
@@ -1000,13 +1000,13 @@
         
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+        cx, cy, cz = np.unravel_index(mi, cc)
+        x = np.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = np.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = np.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
 
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+        LE = np.array([x[0], y[0], z[0]], dtype='float64')
+        RE = np.array([x[1], y[1], z[1]], dtype='float64')
 
         if padding > 0:
             return True, \


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import time, threading, random
 
 from yt.funcs import *
@@ -142,8 +142,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
@@ -170,8 +170,8 @@
                     njobs, (my_size - 1))
         raise RunTimeError
     my_rank = comm.rank
-    all_new_comms = na.array_split(na.arange(1, my_size), njobs)
-    all_new_comms.insert(0, na.array([0]))
+    all_new_comms = np.array_split(np.arange(1, my_size), njobs)
+    all_new_comms.insert(0, np.array([0]))
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -21,7 +21,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-import numpy as na
+import numpy as np
 
 import matplotlib
 import matplotlib.colors as cc
@@ -83,14 +83,14 @@
 matplotlib.rc('image', cmap="algae")
 
 # This next colormap was designed by Tune Kamae and converted here by Matt
-_vs = na.linspace(0,1,255)
-_kamae_red = na.minimum(255,
-                113.9*na.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
+_vs = np.linspace(0,1,255)
+_kamae_red = np.minimum(255,
+                113.9*np.sin(7.64*(_vs**1.705)+0.701)-916.1*(_vs+1.755)**1.862 \
               + 3587.9*_vs+2563.4)/255.0
-_kamae_grn = na.minimum(255,
-                70.0*na.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
-_kamae_blu = na.minimum(255,
-                194.5*_vs**2.88+99.72*na.exp(-77.24*(_vs-0.742)**2.0)
+_kamae_grn = np.minimum(255,
+                70.0*np.sin(8.7*(_vs**1.26)-2.418)+151.7*_vs**0.5+70.0)/255.0
+_kamae_blu = np.minimum(255,
+                194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
 cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
@@ -121,15 +121,15 @@
 _h_cubehelix = 1.0
 
 _cubehelix_data = {
-        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * na.sin(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
-        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * na.cos(2 * na.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'red': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.14861 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) + 1.78277 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'green': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (-0.29227 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x)) - 0.90649 * np.sin(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
+        'blue': lambda x: x**_gamma_cubehelix + (_h_cubehelix * x**_gamma_cubehelix * (1 - x**_gamma_cubehelix) / 2) * (1.97294 * np.cos(2 * np.pi * (_s_cubehelix / 3 + _r_cubehelix * x))),
 }
 
 add_cmap("cubehelix", _cubehelix_data)
 
 # Add colormaps in _colormap_data.py that weren't defined here
-_vs = na.linspace(0,1,255)
+_vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps:
         cdict = { 'red': zip(_vs,v[0],v[0]),
@@ -143,5 +143,5 @@
     r = cmap._lut[:-3, 0]
     g = cmap._lut[:-3, 1]
     b = cmap._lut[:-3, 2]
-    a = na.ones(b.shape)
+    a = np.ones(b.shape)
     return [r, g, b, a]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import pyx
-import numpy as na
+import numpy as np
 from matplotlib import cm
 from _mpl_imports import FigureCanvasAgg
 
@@ -243,7 +243,7 @@
             if xdata == None:
                 self.canvas.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 self.canvas.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
         else:
             plot = pyx.graph.graphxy \
@@ -253,7 +253,7 @@
             if xdata == None:
                 plot.plot(blank_data)
             else:
-                data = pyx.graph.data.points(na.array([xdata, ydata]).T, x=1, y=2)
+                data = pyx.graph.data.points(np.array([xdata, ydata]).T, x=1, y=2)
                 plot.plot(data, [pyx.graph.style.line([pyx.style.linewidth.Thick])])
             self.canvas.insert(plot)
         self.axes_drawn = True
@@ -495,7 +495,7 @@
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
         # Convert the colormap into a string
-        x = na.linspace(1,0,256)
+        x = np.linspace(1,0,256)
         cm_string = cm.cmap_d[name](x, bytes=True)[:,0:3].tostring()
 
         cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,7 +29,7 @@
     y_dict, \
     axis_names
 import _MPL
-import numpy as na
+import numpy as np
 import weakref
 
 class FixedResolutionBuffer(object):
@@ -352,7 +352,7 @@
         """
         import numdisplay
         numdisplay.open()
-        if take_log: data=na.log10(self[field])
+        if take_log: data=np.log10(self[field])
         else: data=self[field]
         numdisplay.display(data)    
 
@@ -374,7 +374,7 @@
     """
     def __getitem__(self, item):
         if item in self.data: return self.data[item]
-        indices = na.argsort(self.data_source['dx'])[::-1]
+        indices = np.argsort(self.data_source['dx'])[::-1]
         buff = _MPL.CPixelize( self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
                                self.data_source['px'],  self.data_source['py'],
                                self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -21,7 +21,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import types, os
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer, ObliqueFixedResolutionBuffer
@@ -163,7 +163,7 @@
         """
         self.xlim = (low[0], high[0])
         self.ylim = (low[1], high[1])
-        return na.log10(self.buffer)
+        return np.log10(self.buffer)
 
     def set_width(self, width):
         """
@@ -283,7 +283,7 @@
 
     def __call__(self, val):
         self.pylab.clf()
-        self.pylab.imshow(na.log10(val), interpolation='nearest')
+        self.pylab.imshow(np.log10(val), interpolation='nearest')
         self.pylab.savefig("wimage_%03i.png" % self.tile_id)
 
 class TransportAppender(object):
@@ -297,13 +297,13 @@
     def __call__(self, val):
         from yt.utilities.lib import write_png_to_string
         from yt.visualization.image_writer import map_to_colors
-        image = na.log10(val)
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        image = np.log10(val)
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
         image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
         to_plot = map_to_colors(image, "algae")
-        to_plot = na.clip(to_plot, 0, 255)
+        to_plot = np.clip(to_plot, 0, 255)
         s = write_png_to_string(to_plot)
         response_body = "data:image/png;base64," + base64.encodestring(s)
         tf.close()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -23,7 +23,7 @@
 import types
 import imp
 import os
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 import _colormap_data as cmd
@@ -44,7 +44,7 @@
 
         >>> image = scale_image(image, min=0, max=1000)
     """
-    if isinstance(image, na.ndarray) and image.dtype == na.uint8:
+    if isinstance(image, np.ndarray) and image.dtype == np.uint8:
         return image
     if isinstance(image, (types.TupleType, types.ListType)):
         image, mi, ma = image
@@ -52,7 +52,7 @@
         mi = image.min()
     if ma is None:
         ma = image.max()
-    image = (na.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
+    image = (np.clip((image-mi)/(ma-mi) * 255, 0, 255)).astype('uint8')
     return image
 
 def multi_image_composite(fn, red_channel, blue_channel,
@@ -97,22 +97,22 @@
     Examples
     --------
 
-        >>> red_channel = na.log10(frb["Temperature"])
-        >>> blue_channel = na.log10(frb["Density"])
+        >>> red_channel = np.log10(frb["Temperature"])
+        >>> blue_channel = np.log10(frb["Density"])
         >>> multi_image_composite("multi_channel1.png", red_channel, blue_channel)
 
     """
     red_channel = scale_image(red_channel)
     blue_channel = scale_image(blue_channel)
     if green_channel is None:
-        green_channel = na.zeros(red_channel.shape, dtype='uint8')
+        green_channel = np.zeros(red_channel.shape, dtype='uint8')
     else:
         green_channel = scale_image(green_channel)
     if alpha_channel is None:
-        alpha_channel = na.zeros(red_channel.shape, dtype='uint8') + 255
+        alpha_channel = np.zeros(red_channel.shape, dtype='uint8') + 255
     else:
         alpha_channel = scale_image(alpha_channel) 
-    image = na.array([red_channel, green_channel, blue_channel, alpha_channel])
+    image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
@@ -141,16 +141,16 @@
         The upper limit to clip values to in the output, if converting to uint8.
         If `bitmap_array` is already uint8, this will be ignore.
     """
-    if bitmap_array.dtype != na.uint8:
+    if bitmap_array.dtype != np.uint8:
         if max_val is None: max_val = bitmap_array.max()
-        bitmap_array = na.clip(bitmap_array / max_val, 0.0, 1.0) * 255
+        bitmap_array = np.clip(bitmap_array / max_val, 0.0, 1.0) * 255
         bitmap_array = bitmap_array.astype("uint8")
     if len(bitmap_array.shape) != 3 or bitmap_array.shape[-1] not in (3,4):
         raise RuntimeError
     if bitmap_array.shape[-1] == 3:
         s1, s2 = bitmap_array.shape[:2]
-        alpha_channel = 255*na.ones((s1,s2,1), dtype='uint8')
-        bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
+        alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
+        bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
         for channel in range(bitmap_array.shape[2]):
             bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
@@ -229,14 +229,14 @@
     """
     image = func(image)
     if color_bounds is None:
-        mi = na.nanmin(image[~na.isinf(image)])
-        ma = na.nanmax(image[~na.isinf(image)])
+        mi = np.nanmin(image[~np.isinf(image)])
+        ma = np.nanmax(image[~np.isinf(image)])
         color_bounds = mi, ma
     else:
         color_bounds = [func(c) for c in color_bounds]
     image = (image - color_bounds[0])/(color_bounds[1] - color_bounds[0])
     to_plot = map_to_colors(image, cmap_name)
-    to_plot = na.clip(to_plot, 0, 255)
+    to_plot = np.clip(to_plot, 0, 255)
     return to_plot
 
 def annotate_image(image, text, xpos, ypos, font_name = "Vera",
@@ -279,7 +279,7 @@
     >>> annotate_image(bitmap, "Hello!", 0, 100)
     >>> write_bitmap(bitmap, "saved.png")
     """
-    if len(image.shape) != 3 or image.dtype != na.uint8:
+    if len(image.shape) != 3 or image.dtype != np.uint8:
         raise RuntimeError("This routine requires a UINT8 bitmapped image.")
     font_path = os.path.join(imp.find_module("matplotlib")[1],
                              "mpl-data/fonts/ttf/",
@@ -295,10 +295,10 @@
         print "Your color map was not found in the extracted colormap file."
         raise KeyError(cmap_name)
     lut = cmd.color_map_luts[cmap_name]
-    x = na.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+    x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
     shape = buff.shape
-    mapped = na.dstack(
-            [(na.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
+    mapped = np.dstack(
+            [(np.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
     return mapped.copy("C")
 
 def strip_colormap_data(fn = "color_map_data.py",


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -26,7 +26,7 @@
 from matplotlib import figure
 import shutil
 import tempfile
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -71,7 +71,7 @@
 
     def add_image(self, fn, descr):
         self.image_metadata.append(descr)
-        self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+        self.images.append((os.path.basename(fn), np.fromfile(fn, dtype='c')))
 
 class PlotCollection(object):
     __id_counter = 0
@@ -122,7 +122,7 @@
         elif center == "center" or center == "c":
             self.c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         else:
-            self.c = na.array(center, dtype='float64')
+            self.c = np.array(center, dtype='float64')
         mylog.info("Created plot collection with default plot-center = %s",
                     list(self.c))
 
@@ -1884,7 +1884,7 @@
         norm = matplotlib.colors.Normalize()
     ax = pylab.figure().gca()
     ax.autoscale(False)
-    axi = ax.imshow(na.random.random((npix, npix)),
+    axi = ax.imshow(np.random.random((npix, npix)),
                     extent = extent, norm = norm,
                     origin = 'lower')
     cb = pylab.colorbar(axi, norm = norm)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -27,7 +27,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -52,25 +52,25 @@
     def convert_to_plot(self, plot, coord, offset = True):
         # coord should be a 2 x ncoord array-like datatype.
         try:
-            ncoord = na.array(coord).shape[1]
+            ncoord = np.array(coord).shape[1]
         except IndexError:
             ncoord = 1
 
         # Convert the data and plot limits to tiled numpy arrays so that
         # convert_to_plot is automatically vectorized.
 
-        x0 = na.tile(plot.xlim[0],ncoord)
-        x1 = na.tile(plot.xlim[1],ncoord)
-        xx0 = na.tile(plot._axes.get_xlim()[0],ncoord)
-        xx1 = na.tile(plot._axes.get_xlim()[1],ncoord)
+        x0 = np.tile(plot.xlim[0],ncoord)
+        x1 = np.tile(plot.xlim[1],ncoord)
+        xx0 = np.tile(plot._axes.get_xlim()[0],ncoord)
+        xx1 = np.tile(plot._axes.get_xlim()[1],ncoord)
         
-        y0 = na.tile(plot.ylim[0],ncoord)
-        y1 = na.tile(plot.ylim[1],ncoord)
-        yy0 = na.tile(plot._axes.get_ylim()[0],ncoord)
-        yy1 = na.tile(plot._axes.get_ylim()[1],ncoord)
+        y0 = np.tile(plot.ylim[0],ncoord)
+        y1 = np.tile(plot.ylim[1],ncoord)
+        yy0 = np.tile(plot._axes.get_ylim()[0],ncoord)
+        yy1 = np.tile(plot._axes.get_ylim()[1],ncoord)
         
         # We need a special case for when we are only given one coordinate.
-        if na.array(coord).shape == (2,):
+        if np.array(coord).shape == (2,):
             return ((coord[0]-x0)/(x1-x0)*(xx1-xx0) + xx0,
                     (coord[1]-y0)/(y1-y0)*(yy1-yy0) + yy0)
         else:
@@ -195,10 +195,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = na.meshgrid(na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
+            nn = np.sqrt(pixX**2 + pixY**2)
             pixX /= nn
             pixY /= nn
         plot._axes.quiver(X,Y, pixX, pixY, scale=self.scale, scale_units=self.scale_units)
@@ -250,12 +250,12 @@
         #appropriate shift to the coppied field.  
 
         #set the cumulative arrays for the periodic shifting.
-        AllX = na.zeros(plot.data["px"].size, dtype='bool')
-        AllY = na.zeros(plot.data["py"].size, dtype='bool')
+        AllX = np.zeros(plot.data["px"].size, dtype='bool')
+        AllY = np.zeros(plot.data["py"].size, dtype='bool')
         XShifted = plot.data["px"].copy()
         YShifted = plot.data["py"].copy()
         dom_x, dom_y = plot._period
-        for shift in na.mgrid[-1:1:3j]:
+        for shift in np.mgrid[-1:1:3j]:
             xlim = ((plot.data["px"] + shift*dom_x >= x0)
                  &  (plot.data["px"] + shift*dom_x <= x1))
             ylim = ((plot.data["py"] + shift*dom_y >= y0)
@@ -269,24 +269,24 @@
         wI = (AllX & AllY)
 
         # We want xi, yi in plot coordinates
-        xi, yi = na.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
+        xi, yi = np.mgrid[xx0:xx1:numPoints_x/(self.factor*1j),\
                           yy0:yy1:numPoints_y/(self.factor*1j)]
 
         # This converts XShifted and YShifted into plot coordinates
         x = (XShifted[wI]-x0)*dx + xx0
         y = (YShifted[wI]-y0)*dy + yy0
         z = plot.data[self.field][wI]
-        if plot.pf.field_info[self.field].take_log: z=na.log10(z)
+        if plot.pf.field_info[self.field].take_log: z=np.log10(z)
 
         # Both the input and output from the triangulator are in plot
         # coordinates
         zi = self.triang(x,y).nn_interpolator(z)(xi,yi)
         
         if plot.pf.field_info[self.field].take_log and self.clim is not None: 
-            self.clim = (na.log10(self.clim[0]), na.log10(self.clim[1]))
+            self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = na.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
         
         plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -324,9 +324,9 @@
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
         if self.periodic:
-            pxs, pys = na.mgrid[-1:1:3j,-1:1:3j]
+            pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
         else:
-            pxs, pys = na.mgrid[0:0:1j,0:0:1j]
+            pxs, pys = np.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
@@ -339,7 +339,7 @@
             visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
                        ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
             if visible.nonzero()[0].size == 0: continue
-            verts = na.array(
+            verts = np.array(
                 [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
                  (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
@@ -352,8 +352,8 @@
             if self.draw_ids:
                 visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
                                ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )
-                active_ids = na.unique(plot.data['GridIndices'])
-                for i in na.where(visible_ids)[0]:
+                active_ids = np.unique(plot.data['GridIndices'])
+                for i in np.where(visible_ids)[0]:
                     plot._axes.text(
                         left_edge_x[i] + (2 * (xx1 - xx0) / xpix),
                         left_edge_y[i] + (2 * (yy1 - yy0) / ypix),
@@ -418,18 +418,18 @@
                              plot.data[self.field_y],
                              int(nx), int(ny),
                            (x0, x1, y0, y1),)
-        r0 = na.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
+        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
                       self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = na.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
+        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
         lines[0,:,:,:] = r0
-        mag = na.sqrt(pixX**2 + pixY**2)
-        scale = na.sqrt(nx*ny) / (self.factor * mag.mean())
+        mag = np.sqrt(pixX**2 + pixY**2)
+        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
         dt = 1.0 / (self.nsample-1)
         for i in range(1,self.nsample):
             xt = lines[i-1,0,:,:]
             yt = lines[i-1,1,:,:]
-            ix = na.maximum(na.minimum((xt).astype('int'), nx-1), 0)
-            iy = na.maximum(na.minimum((yt).astype('int'), ny-1), 0)
+            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
+            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
         for i in range(self.data_size[0]):
@@ -517,18 +517,18 @@
         max_dx = plot.data['pdx'].max()
         w_min_x = 250.0 * min_dx
         w_max_x = 1.0 / self.factor
-        min_exp_x = na.ceil(na.log10(w_min_x*plot.data.pf[self.unit])
-                           /na.log10(self.factor))
-        max_exp_x = na.floor(na.log10(w_max_x*plot.data.pf[self.unit])
-                            /na.log10(self.factor))
+        min_exp_x = np.ceil(np.log10(w_min_x*plot.data.pf[self.unit])
+                           /np.log10(self.factor))
+        max_exp_x = np.floor(np.log10(w_max_x*plot.data.pf[self.unit])
+                            /np.log10(self.factor))
         n_x = max_exp_x - min_exp_x + 1
-        widths = na.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
+        widths = np.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
         widths /= plot.data.pf[self.unit]
         left_edge_px = (center[xi] - widths/2.0 - x0)*dx
         left_edge_py = (center[yi] - widths/2.0 - y0)*dy
         right_edge_px = (center[xi] + widths/2.0 - x0)*dx
         right_edge_py = (center[yi] + widths/2.0 - y0)*dy
-        verts = na.array(
+        verts = np.array(
                 [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                  (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
         visible =  ( right_edge_px - left_edge_px > 25 ) & \
@@ -635,7 +635,7 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0] / self.factor
         ny = plot.image._A.shape[1] / self.factor
-        indices = na.argsort(plot.data['dx'])[::-1]
+        indices = np.argsort(plot.data['dx'])[::-1]
         pixX = _MPL.CPixelize( plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
@@ -650,8 +650,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = na.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = na.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
+        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -687,7 +687,7 @@
         DomainWidth = DomainRight - DomainLeft
         
         nx, ny = plot.image._A.shape
-        buff = na.zeros((nx,ny),dtype='float64')
+        buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
@@ -701,7 +701,7 @@
                                  clump['dx']*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
-            buff = na.maximum(temp, buff)
+            buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
                                      **self.plot_args)
         plot._axes.hold(False)
@@ -845,7 +845,7 @@
             if size < self.min_size or size > self.max_size: continue
             # This could use halo.maximum_radius() instead of width
             if self.width is not None and \
-                na.abs(halo.center_of_mass() - 
+                np.abs(halo.center_of_mass() - 
                        plot.data.center)[plot.data.axis] > \
                    self.width:
                 continue
@@ -1093,8 +1093,8 @@
         LE[zax] = data.center[zax] - self.width*0.5
         RE[zax] = data.center[zax] + self.width*0.5
         if self.region is not None \
-            and na.all(self.region.left_edge <= LE) \
-            and na.all(self.region.right_edge >= RE):
+            and np.all(self.region.left_edge <= LE) \
+            and np.all(self.region.right_edge >= RE):
             return self.region
         self.region = data.pf.h.periodic_region(
             data.center, LE, RE)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from _mpl_imports import *
@@ -183,21 +183,21 @@
         if (zmin in (None,'min')) or (zmax in (None,'max')):    
             imbuff = self._axes.images[-1]._A
             if zmin == 'min':
-                zmin = na.nanmin(imbuff[na.nonzero(imbuff)])
+                zmin = np.nanmin(imbuff[np.nonzero(imbuff)])
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(imbuff))
+                    zmax = min(zmin*10**(dex),np.nanmax(imbuff))
             if zmax == 'max':
-                zmax = na.nanmax(imbuff)
+                zmax = np.nanmax(imbuff)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(imbuff))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(imbuff))
         if self.colorbar is not None:
             if ticks is not None:
-                ticks = na.sort(ticks)
+                ticks = np.sort(ticks)
                 self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                 self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
             elif minmaxtick:
                 if self.log_field: 
-                    ticks = na.array(self.colorbar._ticker()[1],dtype='float')
+                    ticks = np.array(self.colorbar._ticker()[1],dtype='float')
                     ticks = [zmin] + ticks.tolist() + [zmax]
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(ticks)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (x) for x in ticks])
@@ -205,11 +205,11 @@
                     mylog.error('Sorry, we do not support minmaxtick for linear fields.  It likely comes close by default')
             elif nticks is not None:
                 if self.log_field:
-                    lin = na.linspace(na.log10(zmin),na.log10(zmax),nticks)
+                    lin = np.linspace(np.log10(zmin),np.log10(zmax),nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(10**lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % (10**x) for x in lin])
                 else: 
-                    lin = na.linspace(zmin,zmax,nticks)
+                    lin = np.linspace(zmin,zmax,nticks)
                     self.colorbar.locator = matplotlib.ticker.FixedLocator(lin)
                     self.colorbar.formatter = matplotlib.ticker.FixedFormatter(["%0.2e" % x for x in lin])
 
@@ -218,7 +218,7 @@
                     self.colorbar.locator = self._old_locator
                 if hasattr(self,'_old_formatter'):
                     self.colorbar.formatter = self._old_formatter
-        self.norm.autoscale(na.array([zmin,zmax], dtype='float64'))
+        self.norm.autoscale(np.array([zmin,zmax], dtype='float64'))
         self.image.changed()
         if self.colorbar is not None:
             mpl_notify(self.image, self.colorbar)
@@ -343,7 +343,7 @@
             self.colorbar.formatter = ttype()
 
     def __init_temp_image(self, setup_colorbar):
-        temparray = na.ones(self.size)
+        temparray = np.ones(self.size)
         self.image = \
             self._axes.imshow(temparray, interpolation='nearest',
                              norm = self.norm, aspect=1.0, picker=True,
@@ -394,20 +394,20 @@
         if self[self.axis_names["Z"]].size == 0:
             raise YTNoDataInObjectError(self.data)
         mylog.debug("Received buffer of min %s and max %s (data: %s %s)",
-                    na.nanmin(buff), na.nanmax(buff),
+                    np.nanmin(buff), np.nanmax(buff),
                     self[self.axis_names["Z"]].min(),
                     self[self.axis_names["Z"]].max())
         if self.log_field:
-            bI = na.where(buff > 0)
+            bI = np.where(buff > 0)
             if len(bI[0]) == 0:
                 newmin = 1e-99
                 newmax = 1e-99
             else:
-                newmin = na.nanmin(buff[bI])
-                newmax = na.nanmax(buff[bI])
+                newmin = np.nanmin(buff[bI])
+                newmax = np.nanmax(buff[bI])
         else:
-            newmin = na.nanmin(buff)
-            newmax = na.nanmax(buff)
+            newmin = np.nanmin(buff)
+            newmax = np.nanmax(buff)
         aspect = (self.ylim[1]-self.ylim[0])/(self.xlim[1]-self.xlim[0])
         if self.image._A.size != buff.size:
             self._axes.clear()
@@ -418,7 +418,7 @@
             self.image.set_data(buff)
         if self._axes.get_aspect() != aspect: self._axes.set_aspect(aspect)
         if self.do_autoscale:
-            self.norm.autoscale(na.array((newmin,newmax), dtype='float64'))
+            self.norm.autoscale(np.array((newmin,newmax), dtype='float64'))
         self._reset_image_parameters()
         self._run_callbacks()
 
@@ -476,8 +476,8 @@
         self._redraw_image()
 
     def autoscale(self):
-        zmin = na.nanmin(self._axes.images[-1]._A)
-        zmax = na.nanmax(self._axes.images[-1]._A)
+        zmin = np.nanmin(self._axes.images[-1]._A)
+        zmax = np.nanmax(self._axes.images[-1]._A)
         self.set_zlim(zmin, zmax)
 
     def switch_y(self, *args, **kwargs):
@@ -558,16 +558,16 @@
         numPoints_y = int(width)
         dx = numPoints_x / (x1-x0)
         dy = numPoints_y / (y1-y0)
-        xlim = na.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
+        xlim = np.logical_and(self.data["px"]+2.0*self.data['pdx'] >= x0,
                               self.data["px"]-2.0*self.data['pdx'] <= x1)
-        ylim = na.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
+        ylim = np.logical_and(self.data["py"]+2.0*self.data['pdy'] >= y0,
                               self.data["py"]-2.0*self.data['pdy'] <= y1)
-        wI = na.where(na.logical_and(xlim,ylim))
-        xi, yi = na.mgrid[0:numPoints_x, 0:numPoints_y]
+        wI = np.where(np.logical_and(xlim,ylim))
+        xi, yi = np.mgrid[0:numPoints_x, 0:numPoints_y]
         x = (self.data["px"][wI]-x0)*dx
         y = (self.data["py"][wI]-y0)*dy
         z = self.data[self.axis_names["Z"]][wI]
-        if self.log_field: z=na.log10(z)
+        if self.log_field: z=np.log10(z)
         buff = de.Triangulation(x,y).nn_interpolator(z)(xi,yi)
         buff = buff.clip(z.min(), z.max())
         if self.log_field: buff = 10**buff
@@ -603,7 +603,7 @@
         else:
             height = width
         self.pix = (width,height)
-        indices = na.argsort(self.data['dx'])[::-1]
+        indices = np.argsort(self.data['dx'])[::-1]
         buff = _MPL.CPixelize( self.data['x'], self.data['y'], self.data['z'],
                                self.data['px'], self.data['py'],
                                self.data['pdx'], self.data['pdy'], self.data['pdz'],
@@ -756,7 +756,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)
@@ -823,7 +823,7 @@
             cb(self)
 
     def __init_colorbar(self):
-        temparray = na.ones((self.x_bins.size, self.y_bins.size))
+        temparray = np.ones((self.x_bins.size, self.y_bins.size))
         self.norm = matplotlib.colors.Normalize()
         self.image = self._axes.pcolormesh(self.x_bins, self.y_bins,
                                       temparray, shading='flat',
@@ -858,13 +858,13 @@
         #self._redraw_image()
         if (zmin is None) or (zmax is None):    
             if zmin == 'min':
-                zmin = na.nanmin(self._axes.images[-1]._A)
+                zmin = np.nanmin(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmax = min(zmin*10**(dex),na.nanmax(self._axes.images[-1]._A))
+                    zmax = min(zmin*10**(dex),np.nanmax(self._axes.images[-1]._A))
             if zmax == 'max':
-                zmax = na.nanmax(self._axes.images[-1]._A)
+                zmax = np.nanmax(self._axes.images[-1]._A)
                 if dex is not None:
-                    zmin = max(zmax/(10**(dex)),na.nanmin(self._axes.images[-1]._A))
+                    zmin = max(zmax/(10**(dex)),np.nanmin(self._axes.images[-1]._A))
         self._zlim = (zmin, zmax)
 
     def set_log_field(self, val):
@@ -883,8 +883,8 @@
     def _redraw_image(self):
         vals = self.data[self.fields[2]].transpose()
         used_bin = self.data["UsedBins"].transpose()
-        vmin = na.nanmin(vals[used_bin])
-        vmax = na.nanmax(vals[used_bin])
+        vmin = np.nanmin(vals[used_bin])
+        vmax = np.nanmax(vals[used_bin])
         if self._zlim is not None: vmin, vmax = self._zlim
         if self._log_z:
             # We want smallest non-zero vmin
@@ -892,10 +892,10 @@
                                                 clip=False)
             self.ticker = matplotlib.ticker.LogLocator()
             if self._zlim is None:
-                vI = na.where(vals > 0)
+                vI = np.where(vals > 0)
                 vmin = vals[vI].min()
                 vmax = vals[vI].max()
-            self.norm.autoscale(na.array((vmin,vmax), dtype='float64'))
+            self.norm.autoscale(np.array((vmin,vmax), dtype='float64'))
         else:
             self.norm=matplotlib.colors.Normalize(vmin=vmin, vmax=vmax,
                                                   clip=False)
@@ -979,7 +979,7 @@
             func = self._axes.semilogy
         elif self._log_x and self._log_y:
             func = self._axes.loglog
-        indices = na.argsort(self.data[self.fields[0]])
+        indices = np.argsort(self.data[self.fields[0]])
         func(self.data[self.fields[0]][indices],
              self.data[self.fields[1]][indices],
              **self.plot_options)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -33,7 +33,7 @@
 import __builtin__
 from functools import wraps
 
-import numpy as na
+import numpy as np
 from ._mpl_imports import *
 from .color_maps import yt_colormaps, is_colormap
 from .image_writer import \
@@ -122,7 +122,7 @@
             ticks = []
         return ticks
 
-log_transform = FieldTransform('log10', na.log10, LogLocator())
+log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
 def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
@@ -164,7 +164,7 @@
     if not iterable(width):
         width = (width, width)
     Wx, Wy = width
-    width = na.array((Wx/pf[unit], Wy/pf[unit]))
+    width = np.array((Wx/pf[unit], Wy/pf[unit]))
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -174,11 +174,11 @@
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
     # Transforming to the cutting plane coordinate system
-    center = na.array(center)
+    center = np.array(center)
     center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
     (normal,perp1,perp2) = ortho_find(normal)
-    mat = na.transpose(na.column_stack((perp1,perp2,normal)))
-    center = na.dot(mat,center)
+    mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+    center = np.dot(mat,center)
     width = width/pf.domain_width.min()
 
     bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
@@ -809,7 +809,7 @@
                 raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
             self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name=None):
+    def save(self,name=None,mpl_kwargs={}):
         """saves the plot to disk.
 
         Parameters
@@ -817,6 +817,10 @@
         name : string
            the base of the filename.  If not set the filename of 
            the parameter file is used
+        mpl_kwargs : dict
+           A dict of keyword arguments to be passed to matplotlib.
+           
+        >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
         if name == None:
@@ -841,7 +845,7 @@
                 n = "%s_%s_%s" % (name, type, k)
             if weight:
                 n += "_%s" % (weight)
-            names.append(v.save(n))
+            names.append(v.save(n,mpl_kwargs))
         return names
 
     def _send_zmq(self):
@@ -1119,7 +1123,7 @@
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
             x_width = self.xlim[1] - self.xlim[0]
-            zoom_fac = na.log10(x_width*self.pf['unitary'])/na.log10(min_zoom)
+            zoom_fac = np.log10(x_width*self.pf['unitary'])/np.log10(min_zoom)
             zoom_fac = 100.0*max(0.0, zoom_fac)
             ticks = self.get_ticks(field)
             payload = {'type':'png_string',
@@ -1163,12 +1167,12 @@
 
         raw_data = self._frb.data_source
         b = self._frb.bounds
-        xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
+        xi, yi = np.mgrid[b[0]:b[1]:(vi / 8) * 1j,
                           b[2]:b[3]:(vj / 8) * 1j]
         x = raw_data['px']
         y = raw_data['py']
         z = raw_data[field]
-        if logit: z = na.log10(z)
+        if logit: z = np.log10(z)
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         ax.contour(fvals, number, colors='w')
@@ -1187,8 +1191,8 @@
         fy = "%s-velocity" % (axis_names[y_dict[axis]])
         px = new_frb[fx][::-1,:]
         py = new_frb[fy][::-1,:]
-        x = na.mgrid[0:vi-1:ny*1j]
-        y = na.mgrid[0:vj-1:nx*1j]
+        x = np.mgrid[0:vi-1:ny*1j]
+        y = np.mgrid[0:vj-1:nx*1j]
         # Always normalize, then we scale
         nn = ((px**2.0 + py**2.0)**0.5).max()
         px /= nn
@@ -1212,7 +1216,7 @@
     def _get_cbar_image(self, height = 400, width = 40, field = None):
         if field is None: field = self._current_field
         cmap_name = self._colormaps[field]
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals, cmap_name = cmap_name)
         pngs = write_png_to_string(to_plot)
@@ -1255,14 +1259,23 @@
     def __init__(self, field, size):
         self._plot_valid = True
         fsize, axrect, caxrect = self._get_best_layout(size)
-        # Hardcoding the axis dimensions for now
         
-        self.figure = matplotlib.figure.Figure(figsize = fsize, 
-                                               frameon = True)
-        self.axes = self.figure.add_axes(axrect)
-        self.cax = self.figure.add_axes(caxrect)
-
-    def save(self, name, canvas = None):
+        if np.any(np.array(axrect) < 0):
+            self.figure = matplotlib.figure.Figure(figsize = size, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes((.07,.10,.8,.8))
+            self.cax = self.figure.add_axes((.87,.10,.04,.8))
+            mylog.warning('The axis ratio of the requested plot is very narrow.  '
+                          'There is a good chance the plot will not look very good, '
+                          'consider making the plot manually using FixedResolutionBuffer '
+                          'and matplotlib.')
+        else:
+            self.figure = matplotlib.figure.Figure(figsize = fsize, 
+                                                   frameon = True)
+            self.axes = self.figure.add_axes(axrect)
+            self.cax = self.figure.add_axes(caxrect)
+            
+    def save(self, name, mpl_kwargs, canvas = None):
         if name[-4:] == '.png':
             suffix = ''
         else:
@@ -1279,7 +1292,7 @@
             else:
                 mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
                 canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn)
+        canvas.print_figure(fn,**mpl_kwargs)
         return fn
 
     def _get_best_layout(self, size):


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -27,7 +27,7 @@
 import types
 
 from functools import wraps
-import numpy as na
+import numpy as np
 
 from .image_writer import \
     write_image, apply_colormap
@@ -129,19 +129,19 @@
         use_mesh = False
         xmi, xma = self.x_spec.bounds
         if self.x_spec.scale == 'log':
-            x_bins = na.logspace(na.log10(xmi), na.log10(xma),
+            x_bins = np.logspace(np.log10(xmi), np.log10(xma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            x_bins = na.logspace(xmi, xma, self.image.shape[0]+1)
+            x_bins = np.logspace(xmi, xma, self.image.shape[0]+1)
 
         ymi, yma = self.y_spec.bounds
         if self.y_spec.scale == 'log':
-            y_bins = na.logspace(na.log10(ymi), na.log10(yma),
+            y_bins = np.logspace(np.log10(ymi), np.log10(yma),
                                  self.image.shape[0]+1)
             use_mesh = True
         else:
-            y_bins = na.logspace(ymi, yma, self.image.shape[0]+1)
+            y_bins = np.logspace(ymi, yma, self.image.shape[0]+1)
 
         im = self.image
         if self.cbar.scale == 'log':
@@ -338,11 +338,11 @@
         raw_data = self.plot.image[::-1,:]
 
         if self.plot.cbar.scale == 'log':
-            func = na.log10
+            func = np.log10
         else:
             func = lambda a: a
-        raw_data = na.repeat(raw_data, 3, axis=0)
-        raw_data = na.repeat(raw_data, 3, axis=1)
+        raw_data = np.repeat(raw_data, 3, axis=0)
+        raw_data = np.repeat(raw_data, 3, axis=1)
         to_plot = apply_colormap(raw_data, self.plot.cbar.bounds,
                                  self.plot.cbar.cmap, func)
         if self.plot.cbar.scale == 'log':
@@ -369,7 +369,7 @@
 
     def _convert_axis(self, spec):
         func = lambda a: a
-        if spec.scale == 'log': func = na.log10
+        if spec.scale == 'log': func = np.log10
         tick_info = self._convert_ticks(spec.ticks, spec.bounds, func)
         ax = {'ticks':tick_info,
               'title': spec.title}
@@ -378,7 +378,7 @@
     def _get_cbar_image(self, height = 400, width = 40):
         # Right now there's just the single 'cmap', but that will eventually
         # change.  I think?
-        vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
+        vals = np.mgrid[1:0:height * 1j] * np.ones(width)[:,None]
         vals = vals.transpose()
         to_plot = apply_colormap(vals)
         pngs = write_png_to_string(to_plot)


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_passthrough
@@ -61,7 +61,7 @@
         Default: minimum dx
     length : float, optional
         Optionally specify the length of integration.  
-        Default: na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        Default: np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
     direction : real, optional
         Specifies the direction of integration.  The magnitude of this
         value has no effect, only the sign.
@@ -77,10 +77,10 @@
     >>> from yt.visualization.api import Streamlines
     >>> pf = load('DD1701') # Load pf
 
-    >>> c = na.array([0.5]*3)
+    >>> c = np.array([0.5]*3)
     >>> N = 100
     >>> scale = 1.0
-    >>> pos_dx = na.random.random((N,3))*scale-scale/2.
+    >>> pos_dx = np.random.random((N,3))*scale-scale/2.
     >>> pos = c+pos_dx
     
     >>> streamlines = Streamlines(pf,pos,'x-velocity', 'y-velocity', 'z-velocity', length=1.0) 
@@ -91,7 +91,7 @@
     >>> fig=pl.figure() 
     >>> ax = Axes3D(fig)
     >>> for stream in streamlines.streamlines:
-    >>>     stream = stream[na.all(stream != 0.0, axis=1)]
+    >>>     stream = stream[np.all(stream != 0.0, axis=1)]
     >>>     ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
     >>> pl.savefig('streamlines.png')
     """
@@ -101,13 +101,13 @@
                  get_magnitude=False):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
-        self.start_positions = na.array(positions)
+        self.start_positions = np.array(positions)
         self.N = self.start_positions.shape[0]
         self.xfield = xfield
         self.yfield = yfield
         self.zfield = zfield
         self.get_magnitude=get_magnitude
-        self.direction = na.sign(direction)
+        self.direction = np.sign(direction)
         if volume is None:
             volume = AMRKDTree(self.pf, fields=[self.xfield,self.yfield,self.zfield],
                             log_fields=[False,False,False], merge_trees=True)
@@ -116,13 +116,13 @@
             dx = self.pf.h.get_smallest_dx()
         self.dx = dx
         if length is None:
-            length = na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+            length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
         self.steps = int(length/dx)
-        self.streamlines = na.zeros((self.N,self.steps,3), dtype='float64')
+        self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
-            self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
+            self.magnitudes = np.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
         nprocs = self.comm.size
@@ -161,21 +161,21 @@
                 brick.integrate_streamline(stream[-step+1], self.direction*self.dx, marr)
                 mag[-step+1] = marr[0]
                 
-            if na.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
-                   na.any(stream[-step+1,:] >= self.pf.domain_right_edge):
+            if np.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
+                   np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if na.any(stream[-step+1,:] < node.l_corner) | \
-                   na.any(stream[-step+1,:] >= node.r_corner):
+            if np.any(stream[-step+1,:] < node.l_corner) | \
+                   np.any(stream[-step+1,:] >= node.r_corner):
                 return step-1
             step -= 1
         return step
 
     def clean_streamlines(self):
-        temp = na.empty(self.N, dtype='object')
-        temp2 = na.empty(self.N, dtype='object')
+        temp = np.empty(self.N, dtype='object')
+        temp2 = np.empty(self.N, dtype='object')
         for i,stream in enumerate(self.streamlines):
-            mask = na.all(stream != 0.0, axis=1)
+            mask = np.all(stream != 0.0, axis=1)
             temp[i] = stream[mask]
             temp2[i] = self.magnitudes[i,mask]
         self.streamlines = temp


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/tick_locators.py
--- a/yt/visualization/tick_locators.py
+++ b/yt/visualization/tick_locators.py
@@ -5,7 +5,7 @@
 ##
 
 import math
-import numpy as na
+import numpy as np
 
 def is_decade(x,base=10):
     if x == 0.0:
@@ -40,7 +40,7 @@
         if subs is None:
             self._subs = None  # autosub
         else:
-            self._subs = na.asarray(subs)+0.0
+            self._subs = np.asarray(subs)+0.0
 
     def _set_numticks(self):
         self.numticks = 15  # todo; be smart here; this is just for dev
@@ -62,9 +62,9 @@
         numdec = math.floor(vmax)-math.ceil(vmin)
 
         if self._subs is None: # autosub
-            if numdec>10: subs = na.array([1.0])
-            elif numdec>6: subs = na.arange(2.0, b, 2.0)
-            else: subs = na.arange(2.0, b)
+            if numdec>10: subs = np.array([1.0])
+            elif numdec>6: subs = np.arange(2.0, b, 2.0)
+            else: subs = np.arange(2.0, b)
         else:
             subs = self._subs
 
@@ -72,7 +72,7 @@
         while numdec/stride+1 > self.numticks:
             stride += 1
 
-        decades = na.arange(math.floor(vmin),
+        decades = np.arange(math.floor(vmin),
                              math.ceil(vmax)+stride, stride)
         if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
             ticklocs = []
@@ -81,7 +81,7 @@
         else:
             ticklocs = b**decades
 
-        return na.array(ticklocs)
+        return np.array(ticklocs)
 
 
 class LinearLocator(object):
@@ -122,7 +122,7 @@
 
 
         if self.numticks==0: return []
-        ticklocs = na.linspace(vmin, vmax, self.numticks)
+        ticklocs = np.linspace(vmin, vmax, self.numticks)
 
         #return self.raise_if_exceeds(ticklocs)
         return ticklocs


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/CUDARayCast.py
--- a/yt/visualization/volume_rendering/CUDARayCast.py
+++ b/yt/visualization/volume_rendering/CUDARayCast.py
@@ -27,7 +27,7 @@
 
 from yt.mods import *
 import yt.extensions.HierarchySubset as hs
-import numpy as na
+import numpy as np
 import h5py, time
 
 import matplotlib;matplotlib.use("Agg");import pylab
@@ -62,7 +62,7 @@
 
     print "Constructing transfer function."
     if "Data" in fn:
-        mh = na.log10(1.67e-24)
+        mh = np.log10(1.67e-24)
         tf = ColorTransferFunction((7.5+mh, 14.0+mh))
         tf.add_gaussian( 8.25+mh, 0.002, [0.2, 0.2, 0.4, 0.1])
         tf.add_gaussian( 9.75+mh, 0.002, [0.0, 0.0, 0.3, 0.1])
@@ -77,17 +77,17 @@
         tf.add_gaussian(-28.5, 0.05, [1.0, 1.0, 1.0, 1.0])
     else: raise RuntimeError
 
-    cpu['ngrids'] = na.array([cpu['dims'].shape[0]], dtype='int32')
+    cpu['ngrids'] = np.array([cpu['dims'].shape[0]], dtype='int32')
     cpu['tf_r'] = tf.red.y.astype("float32")
     cpu['tf_g'] = tf.green.y.astype("float32")
     cpu['tf_b'] = tf.blue.y.astype("float32")
     cpu['tf_a'] = tf.alpha.y.astype("float32")
 
-    cpu['tf_bounds'] = na.array(tf.x_bounds, dtype='float32')
+    cpu['tf_bounds'] = np.array(tf.x_bounds, dtype='float32')
 
-    cpu['v_dir'] = na.array([0.3, 0.5, 0.6], dtype='float32')
+    cpu['v_dir'] = np.array([0.3, 0.5, 0.6], dtype='float32')
 
-    c = na.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
+    c = np.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
 
     print "Getting cutting plane."
     cp = pf.h.cutting(cpu['v_dir'], c)
@@ -98,16 +98,16 @@
     back_c = c - cp._norm_vec * W
     front_c = c + cp._norm_vec * W
 
-    px, py = na.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
+    px, py = np.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
     xv = cp._inv_mat[0,0]*px + cp._inv_mat[0,1]*py + cp.center[0]
     yv = cp._inv_mat[1,0]*px + cp._inv_mat[1,1]*py + cp.center[1]
     zv = cp._inv_mat[2,0]*px + cp._inv_mat[2,1]*py + cp.center[2]
-    cpu['v_pos'] = na.array([xv, yv, zv], dtype='float32').transpose()
+    cpu['v_pos'] = np.array([xv, yv, zv], dtype='float32').transpose()
 
-    cpu['image_r'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_g'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_b'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
-    cpu['image_a'] = na.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_r'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_g'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_b'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
+    cpu['image_a'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
 
     print "Generating module"
     source = open("yt/extensions/volume_rendering/_cuda_caster.cu").read()
@@ -161,7 +161,7 @@
         pylab.imshow(image[-1], interpolation='nearest')
         pylab.savefig("/u/ki/mturk/public_html/vr6/%s.png" % (ii))
 
-    image = na.array(image).transpose()
+    image = np.array(image).transpose()
     image = (image - mi) / (ma - mi)
     pylab.clf()
     pylab.imshow(image, interpolation='nearest')


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/UBVRI.py
--- a/yt/visualization/volume_rendering/UBVRI.py
+++ b/yt/visualization/volume_rendering/UBVRI.py
@@ -24,21 +24,21 @@
 """
 
 
-import numpy as na
+import numpy as np
 
 johnson_filters = dict(
     B = dict(
-      wavelen = na.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
+      wavelen = np.array([3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550, 4600,
         4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000, 5050, 5100, 5150, 5200,
         5250, 5300, 5350, 5400, 5450, 5500, 5550], dtype='float64'),
-      trans = na.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
+      trans = np.array([0.0, 0.0, 0.02, 0.05, 0.11, 0.18, 0.35, 0.55, 0.92,
         0.95, 0.98, 0.99, 1.0, 0.99, 0.98, 0.96, 0.94, 0.91, 0.87, 0.83, 0.79,
         0.74, 0.69, 0.63, 0.58, 0.52, 0.46, 0.41, 0.36, 0.3, 0.25, 0.2, 0.15,
         0.12, 0.09, 0.06, 0.04, 0.02, 0.01, 0.0, ], dtype='float64'),
       ),
     I = dict(
-      wavelen = na.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
+      wavelen = np.array([ 6800, 6850, 6900, 6950, 7000, 7050, 7100,
         7150, 7200, 7250, 7300, 7350, 7400, 7450, 7500, 7550, 7600, 7650, 7700,
         7750, 7800, 7850, 7900, 7950, 8000, 8050, 8100, 8150, 8200, 8250, 8300,
         8350, 8400, 8450, 8500, 8550, 8600, 8650, 8700, 8750, 8800, 8850, 8900,
@@ -48,7 +48,7 @@
         10600, 10650, 10700, 10750, 10800, 10850, 10900, 10950, 11000, 11050,
         11100, 11150, 11200, 11250, 11300, 11350, 11400, 11450, 11500, 11550,
         11600, 11650, 11700, 11750, 11800, 11850, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.01, 0.04, 0.08, 0.13, 0.17,
         0.21, 0.26, 0.3, 0.36, 0.4, 0.44, 0.49, 0.56, 0.6, 0.65, 0.72, 0.76,
         0.84, 0.9, 0.93, 0.96, 0.97, 0.97, 0.98, 0.98, 0.99, 0.99, 0.99, 0.99,
         1.0, 1.0, 1.0, 1.0, 1.0, 0.99, 0.98, 0.98, 0.97, 0.96, 0.94, 0.93, 0.9,
@@ -59,7 +59,7 @@
         0.02, 0.02, 0.02, 0.02, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     R = dict(
-      wavelen = na.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
+      wavelen = np.array([ 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, 7400,
@@ -67,7 +67,7 @@
         8050, 8100, 8150, 8200, 8250, 8300, 8350, 8400, 8450, 8500, 8550, 8600,
         8650, 8700, 8750, 8800, 8850, 8900, 8950, 9000, 9050, 9100, 9150, 9200,
         9250, 9300, 9350, 9400, 9450, 9500, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
+      trans = np.array([ 0.0, 0.01, 0.02, 0.04, 0.06, 0.11, 0.18, 0.23, 0.28,
         0.34, 0.4, 0.46, 0.5, 0.55, 0.6, 0.64, 0.69, 0.71, 0.74, 0.77, 0.79,
         0.81, 0.84, 0.86, 0.88, 0.9, 0.91, 0.92, 0.94, 0.95, 0.96, 0.97, 0.98,
         0.99, 0.99, 1.0, 1.0, 0.99, 0.98, 0.96, 0.94, 0.92, 0.9, 0.88, 0.85,
@@ -77,20 +77,20 @@
         0.02, 0.01, 0.01, 0.01, 0.01, 0.0, ], dtype='float64'),
       ),
     U = dict(
-      wavelen = na.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
+      wavelen = np.array([ 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, 3400,
         3450, 3500, 3550, 3600, 3650, 3700, 3750, 3800, 3850, 3900, 3950, 4000,
         4050, 4100, 4150, ], dtype='float64'),
-      trans = na.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
+      trans = np.array([ 0.0, 0.04, 0.1, 0.25, 0.61, 0.75, 0.84, 0.88, 0.93,
         0.95, 0.97, 0.99, 1.0, 0.99, 0.97, 0.92, 0.73, 0.56, 0.36, 0.23, 0.05,
         0.03, 0.01, 0.0, ], dtype='float64'),),
     V = dict(
-      wavelen = na.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
+      wavelen = np.array([ 4600, 4650, 4700, 4750, 4800, 4850, 4900, 4950, 5000,
         5050, 5100, 5150, 5200, 5250, 5300, 5350, 5400, 5450, 5500, 5550, 5600,
         5650, 5700, 5750, 5800, 5850, 5900, 5950, 6000, 6050, 6100, 6150, 6200,
         6250, 6300, 6350, 6400, 6450, 6500, 6550, 6600, 6650, 6700, 6750, 6800,
         6850, 6900, 6950, 7000, 7050, 7100, 7150, 7200, 7250, 7300, 7350, ],
           dtype='float64'),
-      trans = na.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
+      trans = np.array([ 0.0, 0.0, 0.01, 0.01, 0.02, 0.05, 0.11, 0.2, 0.38,
         0.67, 0.78, 0.85, 0.91, 0.94, 0.96, 0.98, 0.98, 0.95, 0.87, 0.79, 0.72,
         0.71, 0.69, 0.65, 0.62, 0.58, 0.52, 0.46, 0.4, 0.34, 0.29, 0.24, 0.2,
         0.17, 0.14, 0.11, 0.08, 0.06, 0.05, 0.03, 0.02, 0.02, 0.01, 0.01, 0.01,
@@ -102,4 +102,4 @@
 for filter, vals in johnson_filters.items():
     wavelen = vals["wavelen"]
     trans = vals["trans"]
-    vals["Lchar"] = wavelen[na.argmax(trans)]
+    vals["Lchar"] = wavelen[np.argmax(trans)]


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -24,7 +24,7 @@
 """
 
 import __builtin__
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.utilities.math_utils import *
@@ -167,12 +167,12 @@
         >>> pf = EnzoStaticOutput('DD1701') # Load pf
         >>> c = [0.5]*3 # Center
         >>> L = [1.0,1.0,1.0] # Viewpoint
-        >>> W = na.sqrt(3) # Width
+        >>> W = np.sqrt(3) # Width
         >>> N = 1024 # Pixels (1024^2)
 
         # Get density min, max
         >>> mi, ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi, ma = na.log10(mi), na.log10(ma)
+        >>> mi, ma = np.log10(mi), np.log10(ma)
 
         # Construct transfer function
         >>> tf = vr.ColorTransferFunction((mi-2, ma+2))
@@ -226,10 +226,10 @@
     def _setup_box_properties(self, width, center, unit_vectors):
         self.width = width
         self.center = center
-        self.box_vectors = na.array([unit_vectors[0]*width[0],
+        self.box_vectors = np.array([unit_vectors[0]*width[0],
                                      unit_vectors[1]*width[1],
                                      unit_vectors[2]*width[2]])
-        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.origin = center - 0.5*np.dot(width,unit_vectors)
         self.back_center =  center - 0.5*width[2]*unit_vectors[2]
         self.front_center = center + 0.5*width[2]*unit_vectors[2]         
 
@@ -289,23 +289,23 @@
                                          north_vector = north_vector)
         self._setup_box_properties(width, self.center, self.orienter.unit_vectors)
     def new_image(self):
-        image = na.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
+        image = np.zeros((self.resolution[0], self.resolution[1], 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.transfer_function, self.sub_samples)
+                np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
     def get_sampler(self, args):
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
-            temp_dir = na.empty(3,dtype='float64')
+            temp_dir = np.empty(3,dtype='float64')
             temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
                     self.light_dir[1] * self.orienter.unit_vectors[2] + \
                     self.light_dir[2] * self.orienter.unit_vectors[0]
@@ -326,13 +326,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
 
         view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos, self.front_center, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
@@ -510,30 +510,30 @@
         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
         ...     iw.write_bitmap(snapshot, "move_%04i.png" % i)
         """
-        self.center = na.array(self.center)
+        self.center = np.array(self.center)
         dW = None
         if exponential:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back 
                 if (self.center == 0.0).all():
-                    self.center += (na.array(final) - self.center) / (10. * n_steps)
-                final_zoom = final_width/na.array(self.width)
+                    self.center += (np.array(final) - self.center) / (10. * n_steps)
+                final_zoom = final_width/np.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
             else:
-                dW = na.array([1.0,1.0,1.0])
-            position_diff = (na.array(final)/self.center)*1.0
+                dW = np.array([1.0,1.0,1.0])
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
             if final_width is not None:
                 if not iterable(final_width):
-                    width = na.array([final_width, final_width, final_width]) 
+                    width = np.array([final_width, final_width, final_width]) 
                     # left/right, top/bottom, front/back
-                dW = (1.0*final_width-na.array(self.width))/n_steps
+                dW = (1.0*final_width-np.array(self.width))/n_steps
             else:
-                dW = na.array([0.0,0.0,0.0])
-            dx = (na.array(final)-self.center)*1.0/n_steps
+                dW = np.array([0.0,0.0,0.0])
+            dx = (np.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.switch_view(center=self.center*dx, width=self.width*dW)
@@ -559,7 +559,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.rotation_vector
@@ -568,7 +568,7 @@
 
         normal_vector = self.front_center-self.center
 
-        self.switch_view(normal_vector=na.dot(R,normal_vector))
+        self.switch_view(normal_vector=np.dot(R,normal_vector))
 
     def roll(self, theta):
         r"""Roll by a given angle
@@ -583,12 +583,12 @@
         Examples
         --------
 
-        >>> cam.roll(na.pi/4)
+        >>> cam.roll(np.pi/4)
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
         north_vector = self.orienter.north_vector
-        self.switch_view(north_vector=na.dot(R, north_vector))
+        self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):
         r"""Loop over rotate, creating a rotation
@@ -613,7 +613,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -676,12 +676,12 @@
         self.front_center += self.expand_factor*dl
         self.back_center -= dl
 
-        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+        px = np.linspace(-self.width[0]/2.0, self.width[0]/2.0,
                          self.resolution[0])[:,None]
-        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+        py = np.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
         inv_mat = self.orienter.inv_mat
-        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+        positions = np.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
@@ -693,14 +693,14 @@
         positions = self.front_center - 1.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
         vectors = (self.front_center - positions)
 
-        uv = na.ones(3, dtype='float64')
+        uv = np.ones(3, dtype='float64')
         image.shape = (self.resolution[0]**2,1,3)
         vectors.shape = (self.resolution[0]**2,1,3)
         positions.shape = (self.resolution[0]**2,1,3)
         args = (positions, vectors, self.back_center, 
                 (0.0,1.0,0.0,1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'), 
+                np.zeros(3, dtype='float64'), 
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -708,7 +708,7 @@
         image.shape = self.resolution[0], self.resolution[0], 3
 
 def corners(left_edge, right_edge):
-    return na.array([
+    return np.array([
       [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
       [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
@@ -726,7 +726,7 @@
                  pf = None, use_kd=True, no_ghost=False, use_light=False):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.nside = nside
         self.use_kd = use_kd
@@ -747,20 +747,20 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
+        image = np.zeros((12 * self.nside ** 2, 1, 3), dtype='float64', order='C')
         return image
 
     def get_sampler_args(self, image):
         nv = 12 * self.nside ** 2
-        vs = arr_pix2vec_nest(self.nside, na.arange(nv))
+        vs = arr_pix2vec_nest(self.nside, np.arange(nv))
         vs *= self.radius
         vs.shape = nv, 1, 3
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nv, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nv, 1, 3), dtype='float64') * self.center
         args = (positions, vs, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
  
@@ -771,13 +771,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -823,14 +823,14 @@
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg
-            phi, theta = na.mgrid[0.0:2*na.pi:800j, 0:na.pi:800j]
+            phi, theta = np.mgrid[0.0:2*np.pi:800j, 0:np.pi:800j]
             pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
             image *= self.radius * self.pf['cm']
-            img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+            img = np.log10(image[:,0,0][pixi]).reshape((800,800))
 
             fig = matplotlib.figure.Figure((10, 5))
             ax = fig.add_subplot(1,1,1,projection='hammer')
-            implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
+            implot = ax.imshow(img, extent=(-np.pi,np.pi,-np.pi/2,np.pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
 
             if label == None:
@@ -852,7 +852,7 @@
                  rays_per_cell = 0.1, max_nside = 8192):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.use_kd = use_kd
         if transfer_function is None:
@@ -880,8 +880,8 @@
                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
         total_cells = 0
         bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-        left_edges = na.array([b.LeftEdge for b in bricks])
-        right_edges = na.array([b.RightEdge for b in bricks])
+        left_edges = np.array([b.LeftEdge for b in bricks])
+        right_edges = np.array([b.RightEdge for b in bricks])
         min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
                      for b in bricks))
         # We jitter a bit if we're on a boundary of our initial grid
@@ -896,7 +896,7 @@
         for i,brick in enumerate(bricks):
             ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
                                        bricks)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         info, values = ray_source.get_rays()
@@ -935,10 +935,10 @@
         self.use_light = use_light
         self.light_dir = None
         self.light_rgba = None
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.fov = fov
         if iterable(resolution):
@@ -957,7 +957,7 @@
         self.volume = volume
 
     def new_image(self):
-        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        image = np.zeros((self.resolution**2,1,3), dtype='float64', order='C')
         return image
         
     def get_sampler_args(self, image):
@@ -968,13 +968,13 @@
             vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
         del vp2
         vp *= self.radius
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
 
         args = (positions, vp, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                na.zeros(3, dtype='float64'),
+                np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
         return args
 
@@ -988,13 +988,13 @@
         if double_check:
             for brick in self.volume.bricks:
                 for data in brick.my_data:
-                    if na.any(na.isnan(data)):
+                    if np.any(np.isnan(data)):
                         raise RuntimeError
         
         view_pos = self.center
         for brick in self.volume.traverse(view_pos, None, image):
             sampler(brick, num_threads=num_threads)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         
         pbar.finish()
@@ -1088,7 +1088,7 @@
         
         >>> field='Density'
         >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi,ma = na.log10(mi), na.log10(ma)
+        >>> mi,ma = np.log10(mi), np.log10(ma)
         
         # You may want to comment out the above lines and manually set the min and max
         # of the log of the Density field. For example:
@@ -1106,7 +1106,7 @@
         # the color range to the min and max values, rather than the transfer function
         # bounds.
         >>> Nc = 5
-        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=np.logspace(-2,0,Nc),
         >>>         colormap='RdBu_r')
         >>> 
         # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
@@ -1164,18 +1164,18 @@
             self.nimy = 1
         if pf is not None: self.pf = pf
         
-        if rotation is None: rotation = na.eye(3)
+        if rotation is None: rotation = np.eye(3)
         self.rotation_matrix = rotation
         
-        self.normal_vector = na.array([0.,0.,1])
-        self.north_vector = na.array([1.,0.,0.])
-        self.east_vector = na.array([0.,1.,0.])
+        self.normal_vector = np.array([0.,0.,1])
+        self.north_vector = np.array([1.,0.,0.])
+        self.east_vector = np.array([0.,1.,0.])
         self.rotation_vector = self.north_vector
 
         if iterable(resolution):
             raise RuntimeError("Resolution must be a single int")
         self.resolution = resolution
-        self.center = na.array(center, dtype='float64')
+        self.center = np.array(center, dtype='float64')
         self.focal_center = focal_center
         self.radius = radius
         self.fov = fov
@@ -1195,17 +1195,17 @@
 
     def get_vector_plane(self):
         if self.focal_center is not None:
-            rvec =  na.array(self.focal_center) - na.array(self.center)
+            rvec =  np.array(self.focal_center) - np.array(self.center)
             rvec /= (rvec**2).sum()**0.5
-            angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
+            angle = np.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
                 (rvec**2).sum()**0.5))
-            rot_vector = na.cross(rvec, self.normal_vector)
+            rot_vector = np.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
             self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
-            self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
-            self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
-            self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
+            self.normal_vector = np.dot(self.rotation_matrix,self.normal_vector)
+            self.north_vector = np.dot(self.rotation_matrix,self.north_vector)
+            self.east_vector = np.dot(self.rotation_matrix,self.east_vector)
         else:
             self.focal_center = self.center + self.radius*self.normal_vector  
         dist = ((self.focal_center - self.center)**2).sum()**0.5
@@ -1228,9 +1228,9 @@
             self.get_vector_plane()
 
         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+        image = np.zeros((nx*ny,1,3), dtype='float64', order='C')
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((nx*ny, 1, 3), dtype='float64') * self.center
         vector_plane = VectorPlane(positions, self.vp, self.center,
                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
         tfp = TransferFunctionProxy(self.transfer_function)
@@ -1243,7 +1243,7 @@
         total_cells = 0
         for brick in self.volume.traverse(None, self.center, image):
             brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
+            total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
         pbar.finish()
         image.shape = (nx, ny, 3)
@@ -1269,7 +1269,7 @@
         if self.image_decomp:
             if self.comm.rank == 0:
                 if self.global_comm.rank == 0:
-                    final_image = na.empty((nx*self.nimx, 
+                    final_image = np.empty((nx*self.nimx, 
                         ny*self.nimy, 3),
                         dtype='float64',order='C')
                     final_image[:nx, :ny, :] = image
@@ -1312,7 +1312,7 @@
         Examples
         --------
 
-        >>> cam.rotate(na.pi/4)
+        >>> cam.rotate(np.pi/4)
         """
         if rot_vector is None:
             rot_vector = self.north_vector
@@ -1322,9 +1322,9 @@
         R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
-        self.normal_vector = na.dot(R,self.normal_vector)
-        self.north_vector = na.dot(R,self.north_vector)
-        self.east_vector = na.dot(R,self.east_vector)
+        self.normal_vector = np.dot(R,self.normal_vector)
+        self.north_vector = np.dot(R,self.north_vector)
+        self.east_vector = np.dot(R,self.east_vector)
 
         if keep_focus:
             self.center = self.focal_center - dist*self.normal_vector
@@ -1349,7 +1349,7 @@
         Examples
         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        >>> for i, snapshot in enumerate(cam.rotation(np.pi, 10)):
         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
         """
 
@@ -1381,10 +1381,10 @@
         ...     cam.save_image('move_%04i.png' % i)
         """
         if exponential:
-            position_diff = (na.array(final)/self.center)*1.0
+            position_diff = (np.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
-            dx = (na.array(final) - self.center)*1.0/n_steps
+            dx = (np.array(final) - self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
                 self.center *= dx
@@ -1426,7 +1426,7 @@
         effects of nearby cells.
     rotation : optional, 3x3 array
         If supplied, the vectors will be rotated by this.  You can construct
-        this by, for instance, calling na.array([v1,v2,v3]) where those are the
+        this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
 
     Returns
@@ -1445,7 +1445,7 @@
     # We manually modify the ProjectionTransferFunction to get it to work the
     # way we want, with a second field that's also passed through.
     fields = [field]
-    center = na.array(center, dtype='float64')
+    center = np.array(center, dtype='float64')
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
         def _make_wf(f, w):
@@ -1457,8 +1457,8 @@
             function=_make_wf(field, weight))
         fields = ["temp_weightfield", weight]
     nv = 12*nside**2
-    image = na.zeros((nv,1,3), dtype='float64', order='C')
-    vs = arr_pix2vec_nest(nside, na.arange(nv))
+    image = np.zeros((nv,1,3), dtype='float64', order='C')
+    vs = arr_pix2vec_nest(nside, np.arange(nv))
     vs.shape = (nv,1,3)
     if rotation is not None:
         vs2 = vs.copy()
@@ -1466,14 +1466,14 @@
             vs[:,:,i] = (vs2 * rotation[:,i]).sum(axis=2)
     else:
         vs += 1e-8
-    positions = na.ones((nv, 1, 3), dtype='float64', order='C') * center
+    positions = np.ones((nv, 1, 3), dtype='float64', order='C') * center
     dx = min(g.dds.min() for g in pf.h.find_point(center)[0])
     positions += inner_radius * dx * vs
     vs *= radius
-    uv = na.ones(3, dtype='float64')
+    uv = np.ones(3, dtype='float64')
     grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
-                                image, uv, uv, na.zeros(3, dtype='float64'))
+                                image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
         data = [grid[field] * grid.child_mask.astype('float64')
@@ -1502,15 +1502,15 @@
                         take_log = True, resolution=512, cmin=None, cmax=None):
     import matplotlib.figure
     import matplotlib.backends.backend_agg
-    if rotation is None: rotation = na.eye(3).astype("float64")
+    if rotation is None: rotation = np.eye(3).astype("float64")
 
     img, count = pixelize_healpix(nside, image, resolution, resolution, rotation)
 
     fig = matplotlib.figure.Figure((10, 5))
     ax = fig.add_subplot(1,1,1,projection='aitoff')
-    if take_log: func = na.log10
+    if take_log: func = np.log10
     else: func = lambda a: a
-    implot = ax.imshow(func(img), extent=(-na.pi,na.pi,-na.pi/2,na.pi/2),
+    implot = ax.imshow(func(img), extent=(-np.pi,np.pi,-np.pi/2,np.pi/2),
                        clip_on=False, aspect=0.5, vmin=cmin, vmax=cmax)
     cb = fig.colorbar(implot, orientation='horizontal')
     cb.set_label(label)
@@ -1568,12 +1568,12 @@
             pass
 
     def get_sampler_args(self, image):
-        rotp = na.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
             (-self.width[0]/2, self.width[0]/2,
              -self.width[1]/2, self.width[1]/2),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
-                na.array(self.width), self.sub_samples)
+                np.array(self.width), self.sub_samples)
         return args
 
     def finalize_image(self,image):
@@ -1607,8 +1607,8 @@
                     this_point = (self.center + width/2. * off1 * north_vector
                                          + width/2. * off2 * east_vector
                                          + width/2. * off3 * normal_vector)
-                    na.minimum(mi, this_point, mi)
-                    na.maximum(ma, this_point, ma)
+                    np.minimum(mi, this_point, mi)
+                    np.maximum(ma, this_point, ma)
         # Now we have a bounding box.
         grids = pf.h.region(self.center, mi, ma)._grids
 
@@ -1630,7 +1630,7 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.pf.field_info[self.field].take_log:
-            im = na.log10(image)
+            im = np.log10(image)
         else:
             im = image
         if self.comm.rank is 0 and fn is not None:
@@ -1722,7 +1722,7 @@
 
     >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
                       0.2, N, "Temperature", "Density")
-    >>> write_image(na.log10(image), "offaxis.png")
+    >>> write_image(np.log10(image), "offaxis.png")
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -24,7 +24,7 @@
 """
 
 import random
-import numpy as na
+import numpy as np
 from .create_spline import create_spline
 
 class Keyframes(object):
@@ -67,12 +67,12 @@
         Examples
         --------
 
-        >>> import numpy as na
+        >>> import numpy as np
         >>> import matplotlib.pyplot as plt
         >>> from yt.visualization.volume_rendering.camera_path import *
 
         # Make a camera path from 10 random (x,y,z) keyframes
-        >>> data = na.random.random.((10,3))
+        >>> data = np.random.random.((10,3))
         >>> kf = Keyframes(data[:,0], data[:,1], data[:,2])
         >>> path = kf.create_path(250, shortest_path=False)
 
@@ -93,7 +93,7 @@
             print "Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz)
             sys.exit()
         self.nframes = Nx
-        self.pos = na.zeros((Nx,3))
+        self.pos = np.zeros((Nx,3))
         self.pos[:,0] = x
         self.pos[:,1] = y
         if z != None:
@@ -103,7 +103,7 @@
         self.north_vectors = north_vectors
         self.up_vectors = up_vectors
         if times == None:
-            self.times = na.arange(self.nframes)
+            self.times = np.arange(self.nframes)
         else:
             self.times = times
         self.cartesian_matrix()
@@ -131,7 +131,7 @@
         """
         # randomize tour
         self.tour = range(self.nframes)
-        na.random.shuffle(self.tour)
+        np.random.shuffle(self.tour)
         if fixed_start:
             first = self.tour.index(0)
             self.tour[0], self.tour[first] = self.tour[first], self.tour[0]
@@ -191,17 +191,17 @@
         Create a distance matrix for the city coords that uses
         straight line distance
         """
-        self.dist_matrix = na.zeros((self.nframes, self.nframes))
-        xmat = na.zeros((self.nframes, self.nframes))
+        self.dist_matrix = np.zeros((self.nframes, self.nframes))
+        xmat = np.zeros((self.nframes, self.nframes))
         xmat[:,:] = self.pos[:,0]
         dx = xmat - xmat.T
-        ymat = na.zeros((self.nframes, self.nframes))
+        ymat = np.zeros((self.nframes, self.nframes))
         ymat[:,:] = self.pos[:,1]
         dy = ymat - ymat.T
-        zmat = na.zeros((self.nframes, self.nframes))
+        zmat = np.zeros((self.nframes, self.nframes))
         zmat[:,:] = self.pos[:,2]
         dz = zmat - zmat.T
-        self.dist_matrix = na.sqrt(dx*dx + dy*dy + dz*dz)
+        self.dist_matrix = np.sqrt(dx*dx + dy*dy + dz*dz)
 
     def tour_length(self, tour):
         r"""
@@ -227,7 +227,7 @@
         if next > prev:
             return 1.0
         else:
-            return na.exp( -abs(next-prev) / temperature )
+            return np.exp( -abs(next-prev) / temperature )
 
     def get_shortest_path(self):
         r"""Determine shortest path between all keyframes.
@@ -294,14 +294,14 @@
             path.  Also saved to self.path.
         """
         self.npoints = npoints
-        self.path = {"time": na.zeros(npoints),
-                     "position": na.zeros((npoints, 3)),
-                     "north_vectors": na.zeros((npoints,3)),
-                     "up_vectors": na.zeros((npoints,3))}
+        self.path = {"time": np.zeros(npoints),
+                     "position": np.zeros((npoints, 3)),
+                     "north_vectors": np.zeros((npoints,3)),
+                     "up_vectors": np.zeros((npoints,3))}
         if shortest_path:
             self.get_shortest_path()
         if path_time == None:
-            path_time = na.linspace(0, self.nframes, npoints)
+            path_time = np.linspace(0, self.nframes, npoints)
         self.path["time"] = path_time
         for dim in range(3):
             self.path["position"][:,dim] = create_spline(self.times, self.pos[:,dim],


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/create_spline.py
--- a/yt/visualization/volume_rendering/create_spline.py
+++ b/yt/visualization/volume_rendering/create_spline.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def create_spline(old_x, old_y, new_x, tension=0.5, sorted=False):
     """
@@ -45,18 +45,18 @@
     """
     ndata = len(old_x)
     N = len(new_x)
-    result = na.zeros(N)
+    result = np.zeros(N)
     if not sorted:
-        isort = na.argsort(old_x)
+        isort = np.argsort(old_x)
         old_x = old_x[isort]
         old_y = old_y[isort]
     # Floor/ceiling of values outside of the original data
-    new_x = na.minimum(new_x, old_x[-1])
-    new_x = na.maximum(new_x, old_x[0])
-    ind = na.searchsorted(old_x, new_x)
-    im2 = na.maximum(ind-2, 0)
-    im1 = na.maximum(ind-1, 0)
-    ip1 = na.minimum(ind+1, ndata-1)
+    new_x = np.minimum(new_x, old_x[-1])
+    new_x = np.maximum(new_x, old_x[0])
+    ind = np.searchsorted(old_x, new_x)
+    im2 = np.maximum(ind-2, 0)
+    im1 = np.maximum(ind-1, 0)
+    ip1 = np.minimum(ind+1, ndata-1)
     for i in range(N):
         if ind[i] != im1[i]:
             u = (new_x[i] - old_x[im1[i]]) / (old_x[ind[i]] - old_x[im1[i]])


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 import h5py
 
@@ -63,10 +63,10 @@
                    len(self.bricks), back_point, front_point)
         if self.bricks is None: self.initialize_source()
         vec = front_point - back_point
-        dist = na.minimum(
-             na.sum((self.brick_left_edges - back_point) * vec, axis=1),
-             na.sum((self.brick_right_edges - back_point) * vec, axis=1))
-        ind = na.argsort(dist)
+        dist = np.minimum(
+             np.sum((self.brick_left_edges - back_point) * vec, axis=1),
+             np.sum((self.brick_right_edges - back_point) * vec, axis=1))
+        ind = np.argsort(dist)
         for b in self.bricks[ind]:
             #print b.LeftEdge, b.RightEdge
             yield b
@@ -79,7 +79,7 @@
         for field, log_field in zip(self.fields, self.log_fields):
             vcd = grid.get_vertex_centered_data(field, no_ghost = self.no_ghost)
             vcd = vcd.astype("float64")
-            if log_field: vcd = na.log10(vcd)
+            if log_field: vcd = np.log10(vcd)
             vcds.append(vcd)
 
         GF = GridFaces(grid.Children + [grid])
@@ -121,11 +121,11 @@
         # intersection, we only need to do the left edge & right edge.
         #
         # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.bricks = na.empty(len(bricks), dtype='object')
+        self.brick_left_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_right_edges = np.zeros( (NB, 3), dtype='float64')
+        self.brick_parents = np.zeros( NB, dtype='int64')
+        self.brick_dimensions = np.zeros( (NB, 3), dtype='int64')
+        self.bricks = np.empty(len(bricks), dtype='object')
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
             self.brick_right_edges[i,:] = b.RightEdge
@@ -143,12 +143,12 @@
             for j in [-1, 1]:
                 for k in [-1, 1]:
                     for b in self.bricks:
-                        BB = na.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
-                        LE, RE = na.min(BB, axis=0), na.max(BB, axis=0)
+                        BB = np.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
+                        LE, RE = np.min(BB, axis=0), np.max(BB, axis=0)
                         nb.append(
                             PartitionedGrid(b.parent_grid_id, len(b.my_data), 
                                 [md[::i,::j,::k].copy("C") for md in b.my_data],
-                                LE, RE, na.array(b.my_data[0].shape) - 1))
+                                LE, RE, np.array(b.my_data[0].shape) - 1))
         # Replace old bricks
         self.initialize_bricks(nb)
 
@@ -183,7 +183,7 @@
                                 self.brick_right_edges[i,:],
                                 self.brick_dimensions[i,:],
                                 ))
-        self.bricks = na.array(bricks, dtype='object')
+        self.bricks = np.array(bricks, dtype='object')
         f.close()
 
     def reset_cast(self):
@@ -194,10 +194,10 @@
     def __init__(self, data_array):
         self.bricks = [PartitionedGrid(-1, 1, 
                        [data_array.astype("float64")],
-                       na.zeros(3, dtype='float64'),
-                       na.ones(3, dtype='float64'),
-                       na.array(data_array.shape, dtype='int64')-1)]
-        self.brick_dimensions = na.ones((1, 3), dtype='int64')*data_array.shape
+                       np.zeros(3, dtype='float64'),
+                       np.ones(3, dtype='float64'),
+                       np.array(data_array.shape, dtype='int64')-1)]
+        self.brick_dimensions = np.ones((1, 3), dtype='int64')*data_array.shape
 
     def initialize_source(self):
         pass
@@ -221,24 +221,24 @@
     def __getitem__(self, item):
         return self.faces[item]
 
-def export_partitioned_grids(grid_list, fn, int_type=na.int64, float_type=na.float64):
+def export_partitioned_grids(grid_list, fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "w")
     pbar = get_pbar("Writing Grids", len(grid_list))
     nelem = sum((grid.my_data.size for grid in grid_list))
     ngrids = len(grid_list)
     group = f.create_group("/PGrids")
-    left_edge = na.concatenate([[grid.LeftEdge,] for grid in grid_list])
+    left_edge = np.concatenate([[grid.LeftEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/LeftEdges", data=left_edge, dtype=float_type); del left_edge
-    right_edge = na.concatenate([[grid.RightEdge,] for grid in grid_list])
+    right_edge = np.concatenate([[grid.RightEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/RightEdges", data=right_edge, dtype=float_type); del right_edge
-    dims = na.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
+    dims = np.concatenate([[grid.my_data.shape[:],] for grid in grid_list])
     f.create_dataset("/PGrids/Dims", data=dims, dtype=int_type); del dims
-    data = na.concatenate([grid.my_data.ravel() for grid in grid_list])
+    data = np.concatenate([grid.my_data.ravel() for grid in grid_list])
     f.create_dataset("/PGrids/Data", data=data, dtype=float_type); del data
     f.close()
     pbar.finish()
 
-def import_partitioned_grids(fn, int_type=na.int64, float_type=na.float64):
+def import_partitioned_grids(fn, int_type=np.int64, float_type=np.float64):
     f = h5py.File(fn, "r")
     n_groups = len(f)
     grid_list = []
@@ -258,4 +258,4 @@
         pbar.update(i)
     pbar.finish()
     f.close()
-    return na.array(grid_list, dtype='object')
+    return np.array(grid_list, dtype='object')


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -25,7 +25,7 @@
 import h5py
 try: import pyfits
 except: pass
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 
@@ -67,7 +67,7 @@
         f.close()
     else:
         print 'No support for fits import.'
-    return na.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
+    return np.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
 
 def plot_channel(image, name, cmap='gist_heat', log=True, dex=3, zero_factor=1.0e-10, 
                  label=None, label_color='w', label_size='large'):
@@ -84,7 +84,7 @@
     import matplotlib
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     ma = image[image>0.0].max()
     image[image==0.0] = ma*zero_factor
     if log:
@@ -113,7 +113,7 @@
     """
     import pylab
     Nvec = image.shape[0]
-    image[na.isnan(image)] = 0.0
+    image[np.isnan(image)] = 0.0
     if image.shape[2] >= 4:
         image = image[:,:,:3]
     pylab.clf()


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -261,7 +261,7 @@
         tex_coord.Append((t1,t0,t1)); ver_coord.Append((x1, y0, z1)) # 7
         
         # Store quads
-        self._quads[tex_id] = (tex_coord, ver_coord, na.array(indices,dtype=na.uint8))
+        self._quads[tex_id] = (tex_coord, ver_coord, np.array(indices,dtype=np.uint8))
 
 def visvis_plot(vp):
     """
@@ -280,10 +280,10 @@
     ax = vv.gca()
 
     for i,g in enumerate(gs):
-        ss = ((g.RightEdge - g.LeftEdge) / (na.array(g.my_data[0].shape)-1)).tolist()
+        ss = ((g.RightEdge - g.LeftEdge) / (np.array(g.my_data[0].shape)-1)).tolist()
         origin = g.LeftEdge.astype("float32").tolist()
         dd = (g.my_data[0].astype("float32") - mi)/(ma - mi)
-        dd = na.clip(dd, 0.0, 1.0)
+        dd = np.clip(dd, 0.0, 1.0)
         print ss
         texes.append(vv.Aarray(dd, origin = origin, sampling = ss))
 


diff -r cc17d1b6a2f976688ba07ab333e013becb85f097 -r 28ca834d247d54c73293472ed55c010f0a7fbe05 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from matplotlib.cm import get_cmap
 
 from yt.funcs import *
@@ -59,10 +59,10 @@
         self.pass_through = 0
         self.nbins = nbins
         self.x_bounds = x_bounds
-        self.x = na.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
-        self.y = na.zeros(nbins, dtype='float64')
+        self.x = np.linspace(x_bounds[0], x_bounds[1], nbins).astype('float64')
+        self.y = np.zeros(nbins, dtype='float64')
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
@@ -88,8 +88,8 @@
         >>> tf = TransferFunction( (-10.0, -5.0) )
         >>> tf.add_gaussian(-9.0, 0.01, 1.0)
         """
-        vals = height * na.exp(-(self.x - location)**2.0/width)
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        vals = height * np.exp(-(self.x - location)**2.0/width)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_line(self, start, stop):
         r"""Add a line between two points to the transmission function.
@@ -122,7 +122,7 @@
         # not satisfy our bounding box arguments
         vals = slope * (self.x - x0) + y0
         vals[~((self.x >= x0) & (self.x <= x1))] = 0.0
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_step(self, start, stop, value):
         r"""Adds a step function to the transfer function.
@@ -154,12 +154,12 @@
         >>> tf.add_gaussian(-7.0, 0.01, 1.0)
         >>> tf.add_step(-8.0, -6.0, 0.5)
         """
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         vals[(self.x >= start) & (self.x <= stop)] = value
-        self.y = na.clip(na.maximum(vals, self.y), 0.0, na.inf)
+        self.y = np.clip(np.maximum(vals, self.y), 0.0, np.inf)
 
     def add_filtered_planck(self, wavelength, trans):
-        vals = na.zeros(self.x.shape, 'float64')
+        vals = np.zeros(self.x.shape, 'float64')
         nu = clight/(wavelength*1e-8)
         nu = nu[::-1]
 
@@ -167,15 +167,15 @@
             T = 10**logT
             # Black body at this nu, T
             Bnu = ((2.0 * hcgs * nu**3) / clight**2.0) / \
-                    (na.exp(hcgs * nu / (kboltz * T)) - 1.0)
+                    (np.exp(hcgs * nu / (kboltz * T)) - 1.0)
             # transmission
             f = Bnu * trans[::-1]
             # integrate transmission over nu
-            vals[i] = na.trapz(f,nu)
+            vals[i] = np.trapz(f,nu)
 
         # normalize by total transmission over filter
-        self.y = vals/trans.sum() #/na.trapz(trans[::-1],nu)
-        #self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+        self.y = vals/trans.sum() #/np.trapz(trans[::-1],nu)
+        #self.y = np.clip(np.maximum(vals, self.y), 0.0, 1.0)
 
     def plot(self, filename):
         r"""Save an image file of the transfer function.
@@ -245,7 +245,7 @@
         self.field_table_ids = [0] * 6
         self.weight_table_ids = [-1] * 6
         self.grad_field = -1
-        self.light_source_v = self.light_source_c = na.zeros(3, 'float64')
+        self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):
@@ -459,20 +459,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -500,20 +500,20 @@
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
         ax = pyplot.axes()
-        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
-        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
-        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
-        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        i_data = np.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = np.outer(np.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = np.outer(np.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = np.outer(np.ones(self.alpha.x.size), self.funcs[2].y)
         ax.imshow(i_data, origin='lower')
-        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.fill_between(np.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
         ax.set_xlim(0, self.alpha.x.size)
-        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
         ax.xaxis.set_ticks(xticks)
         def x_format(x, pos):
             return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
         ax.xaxis.set_major_formatter(FuncFormatter(x_format))
-        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        yticks = np.linspace(0,1,5) * self.alpha.y.size
         ax.yaxis.set_ticks(yticks)
         def y_format(y, pos):
             return (y / self.alpha.y.size)
@@ -574,7 +574,7 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
-        tomap = na.linspace(0.,1.,num=rel1-rel0)
+        tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)*scale
         if scale_func is None:
@@ -640,17 +640,17 @@
             if ma is None: ma = col_bounds[1] - dist/(10.0*N)
         if w is None: w = 0.001 * (ma-mi)/N
         if alpha is None and self.grey_opacity:
-            alpha = na.ones(N, dtype="float64")
+            alpha = np.ones(N, dtype="float64")
         elif alpha is None and not self.grey_opacity:
-            alpha = na.logspace(-3, 0, N)
-        for v, a in zip(na.mgrid[mi:ma:N*1j], alpha):
+            alpha = np.logspace(-3, 0, N)
+        for v, a in zip(np.mgrid[mi:ma:N*1j], alpha):
             self.sample_colormap(v, w, a, colormap=colormap, col_bounds=col_bounds)
 
     def get_colormap_image(self, height, width):
-        image = na.zeros((height, width, 3), dtype='uint8')
-        hvals = na.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
+        image = np.zeros((height, width, 3), dtype='uint8')
+        hvals = np.mgrid[self.x_bounds[0]:self.x_bounds[1]:height * 1j]
         for i,f in enumerate(self.funcs[:3]):
-            vals = na.interp(hvals, f.x, f.y)
+            vals = np.interp(hvals, f.x, f.y)
             image[:,:,i] = (vals[:,None] * 255).astype('uint8')
         image = image[::-1,:,:]
         return image
@@ -736,7 +736,7 @@
         self._normalize()
 
     def _normalize(self):
-        fmax  = na.array([f.y for f in self.tables[:3]])
+        fmax  = np.array([f.y for f in self.tables[:3]])
         normal = fmax.max(axis=0)
         for f in self.tables[:3]:
             f.y = f.y/normal



https://bitbucket.org/yt_analysis/yt/changeset/d2daf9466108/
changeset:   d2daf9466108
branch:      yt
user:        ngoldbaum
date:        2012-09-20 20:06:20
summary:     Don't need to modify grid_patch.py after all, so long as the grid edges are set correctly in the frontend everything should work.
affected #:  1 file

diff -r 28ca834d247d54c73293472ed55c010f0a7fbe05 -r d2daf94661086662a9efe6139fe88557b71bbff3 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -210,8 +210,6 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE - LE) / self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
-        if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property



https://bitbucket.org/yt_analysis/yt/changeset/0afe4bd46a1a/
changeset:   0afe4bd46a1a
branch:      yt
user:        MatthewTurk
date:        2012-09-20 20:15:49
summary:     Merged in ngoldbaum/yt-cleancopy (pull request #274)
affected #:  3 files



diff -r b30f700ac72bcaa6d1e6145499d5e8a13c747e06 -r 0afe4bd46a1acc7653831364d4f541eadb45d530 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -148,8 +148,8 @@
 
         for i in xrange(self.num_grids):
             dx = dxs[self.grid_levels[i],:]
-            self.grid_left_edge[i] = np.rint(self.grid_left_edge[i]/dx)*dx
-            self.grid_right_edge[i] = np.rint(self.grid_right_edge[i]/dx)*dx
+            self.grid_left_edge[i][:ND] = np.rint(self.grid_left_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
+            self.grid_right_edge[i][:ND] = np.rint(self.grid_right_edge[i][:ND]/dx[0][:ND])*dx[0][:ND]
                         
     def _populate_grid_objects(self):
         # We only handle 3D data, so offset is 7 (nfaces+1)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list