[yt-svn] commit/yt: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Jul 2 00:27:37 PDT 2014


5 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/30dd0b288f61/
Changeset:   30dd0b288f61
Branch:      yt-3.0
User:        hegan
Date:        2014-06-27 20:06:23
Summary:     Make annotate particles work with ptype
Affected #:  1 file

diff -r 35cecc8a0a24bbf074956ef320f70e0e686478a7 -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -958,7 +958,7 @@
     region = None
     _descriptor = None
     def __init__(self, width, p_size=1.0, col='k', marker='o', stride=1.0,
-                 ptype=None, stars_only=False, dm_only=False,
+                 ptype='all', stars_only=False, dm_only=False,
                  minimum_mass=None, alpha=1.0):
         PlotCallback.__init__(self)
         self.width = width
@@ -988,11 +988,12 @@
         axis_names = plot.data.pf.coordinates.axis_name
         field_x = "particle_position_%s" % axis_names[xax]
         field_y = "particle_position_%s" % axis_names[yax]
-        gg = ( ( reg[field_x] >= x0 ) & ( reg[field_x] <= x1 )
-           &   ( reg[field_y] >= y0 ) & ( reg[field_y] <= y1 ) )
-        if self.ptype is not None:
-            gg &= (reg["particle_type"] == self.ptype)
-            if gg.sum() == 0: return
+        pt = self.ptype
+        gg = ( ( reg[pt, field_x] >= x0 ) & ( reg[pt, field_x] <= x1 )
+           &   ( reg[pt, field_y] >= y0 ) & ( reg[pt, field_y] <= y1 ) )
+        #if self.ptype is not None:
+        #    gg &= (reg["particle_type"] == self.ptype)
+        #    if gg.sum() == 0: return
         if self.stars_only:
             gg &= (reg["creation_time"] > 0.0)
             if gg.sum() == 0: return
@@ -1004,8 +1005,8 @@
             if gg.sum() == 0: return
         plot._axes.hold(True)
         px, py = self.convert_to_plot(plot,
-                    [np.array(reg[field_x][gg][::self.stride]),
-                     np.array(reg[field_y][gg][::self.stride])])
+                    [np.array(reg[pt, field_x][gg][::self.stride]),
+                     np.array(reg[pt, field_y][gg][::self.stride])])
         plot._axes.scatter(px, py, edgecolors='None', marker=self.marker,
                            s=self.p_size, c=self.color,alpha=self.alpha)
         plot._axes.set_xlim(xx0,xx1)


https://bitbucket.org/yt_analysis/yt/commits/0dd048ef1c8f/
Changeset:   0dd048ef1c8f
Branch:      yt-3.0
User:        hegan
Date:        2014-06-30 16:59:16
Summary:     merging
Affected #:  39 files

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -57,3 +57,12 @@
 serial the operation ``for pf in ts:`` would also have worked identically.
 
 .. yt_cookbook:: time_series.py
+
+Complex Derived Fields
+~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe estimates the ratio of gravitational and pressure forces in a galaxy
+cluster simulation.  This shows how to create and work with vector derived 
+fields.
+
+.. yt_cookbook:: hse_field.py

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -36,7 +36,7 @@
 axes.  To focus on what's happening in the x-y plane, we make an additional
 Temperature slice for the bottom-right subpanel.
 
-.. yt-cookbook:: multiplot_2x2_coordaxes_slice.py
+.. yt_cookbook:: multiplot_2x2_coordaxes_slice.py
 
 Multi-Plot Slice and Projections
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/fits_xray_images.rst
--- a/doc/source/cookbook/fits_xray_images.rst
+++ b/doc/source/cookbook/fits_xray_images.rst
@@ -1,6 +1,6 @@
 .. _xray_fits:
 
 FITS X-ray Images in yt
-----------------------
+-----------------------
 
-.. notebook:: fits_xray_images.ipynb
\ No newline at end of file
+.. notebook:: fits_xray_images.ipynb

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -7,8 +7,10 @@
 # Define the components of the gravitational acceleration vector field by
 # taking the gradient of the gravitational potential
 
- at yt.derived_field(name='grav_accel_x', units='cm/s**2', take_log=False)
-def grav_accel_x(field, data):
+ at yt.derived_field(name='gravitational_acceleration_x',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_x(field, data):
 
     # We need to set up stencils
 
@@ -16,20 +18,22 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dx = div_fac * data['dx'].flat[0]
+    dx = div_fac * data['dx'][0]
 
     gx = data["gravitational_potential"][sl_right, 1:-1, 1:-1]/dx
     gx -= data["gravitational_potential"][sl_left, 1:-1, 1:-1]/dx
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gx.uq
     new_field[1:-1, 1:-1, 1:-1] = -gx
 
     return new_field
 
 
- at yt.derived_field(name='grav_accel_y', units='cm/s**2', take_log=False)
-def grav_accel_y(field, data):
+ at yt.derived_field(name='gravitational_acceleration_y',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_y(field, data):
 
     # We need to set up stencils
 
@@ -37,20 +41,23 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dy = div_fac * data['dy'].flat[0]
+    dy = div_fac * data['dy'].flatten()[0]
 
     gy = data["gravitational_potential"][1:-1, sl_right, 1:-1]/dy
     gy -= data["gravitational_potential"][1:-1, sl_left, 1:-1]/dy
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gy.uq
+
     new_field[1:-1, 1:-1, 1:-1] = -gy
 
     return new_field
 
 
- at yt.derived_field(name='grav_accel_z', units='cm/s**2', take_log=False)
-def grav_accel_z(field, data):
+ at yt.derived_field(name='gravitational_acceleration_z',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_z(field, data):
 
     # We need to set up stencils
 
@@ -58,13 +65,13 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dz = div_fac * data['dz'].flat[0]
+    dz = div_fac * data['dz'].flatten()[0]
 
     gz = data["gravitational_potential"][1:-1, 1:-1, sl_right]/dz
     gz -= data["gravitational_potential"][1:-1, 1:-1, sl_left]/dz
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gz.uq
     new_field[1:-1, 1:-1, 1:-1] = -gz
 
     return new_field
@@ -73,7 +80,8 @@
 # Define the components of the pressure gradient field
 
 
- at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_x(field, data):
 
     # We need to set up stencils
@@ -82,18 +90,19 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dx = div_fac * data['dx'].flat[0]
+    dx = div_fac * data['dx'].flatten()[0]
 
     px = data["pressure"][sl_right, 1:-1, 1:-1]/dx
     px -= data["pressure"][sl_left, 1:-1, 1:-1]/dx
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.uq
     new_field[1:-1, 1:-1, 1:-1] = px
 
     return new_field
 
 
- at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_y(field, data):
 
     # We need to set up stencils
@@ -102,18 +111,19 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dy = div_fac * data['dy'].flat[0]
+    dy = div_fac * data['dy'].flatten()[0]
 
     py = data["pressure"][1:-1, sl_right, 1:-1]/dy
     py -= data["pressure"][1:-1, sl_left, 1:-1]/dy
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*py.uq
     new_field[1:-1, 1:-1, 1:-1] = py
 
     return new_field
 
 
- at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_z(field, data):
 
     # We need to set up stencils
@@ -122,12 +132,12 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dz = div_fac * data['dz'].flat[0]
+    dz = div_fac * data['dz'].flatten()[0]
 
     pz = data["pressure"][1:-1, 1:-1, sl_right]/dz
     pz -= data["pressure"][1:-1, 1:-1, sl_left]/dz
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*pz.uq
     new_field[1:-1, 1:-1, 1:-1] = pz
 
     return new_field
@@ -135,49 +145,29 @@
 
 # Define the "degree of hydrostatic equilibrium" field
 
- at yt.derived_field(name='HSE', units=None, take_log=False)
+ at yt.derived_field(name='HSE', units=None, take_log=False,
+                  display_name='Hydrostatic Equilibrium')
 def HSE(field, data):
 
-    gx = data["density"]*data["Grav_Accel_x"]
-    gy = data["density"]*data["Grav_Accel_y"]
-    gz = data["density"]*data["Grav_Accel_z"]
+    gx = data["density"]*data["gravitational_acceleration_x"]
+    gy = data["density"]*data["gravitational_acceleration_y"]
+    gz = data["density"]*data["gravitational_acceleration_z"]
 
-    hx = data["Grad_Pressure_x"] - gx
-    hy = data["Grad_Pressure_y"] - gy
-    hz = data["Grad_Pressure_z"] - gz
+    hx = data["grad_pressure_x"] - gx
+    hy = data["grad_pressure_y"] - gy
+    hz = data["grad_pressure_z"] - gz
 
-    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))*gx.unit_array
+    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))
 
     return h
 
 
-# Open two files, one at the beginning and the other at a later time when
-# there's a lot of sloshing going on.
+# Open a dataset from when there's a lot of sloshing going on.
 
-dsi = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0000")
-dsf = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
+ds = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
 
-# Sphere objects centered at the cluster potential minimum with a radius
-# of 200 kpc
 
-sphere_i = dsi.sphere(dsi.domain_center, (200, "kpc"))
-sphere_f = dsf.sphere(dsf.domain_center, (200, "kpc"))
+# Take a slice through the center of the domain
+slc = yt.SlicePlot(ds, 2, ["density", "HSE"], width=(1, 'Mpc'))
 
-# Average "degree of hydrostatic equilibrium" in these spheres
-
-hse_i = sphere_i.quantities["WeightedAverageQuantity"]("HSE", "cell_mass")
-hse_f = sphere_f.quantities["WeightedAverageQuantity"]("HSE", "cell_mass")
-
-print "Degree of hydrostatic equilibrium initially: ", hse_i
-print "Degree of hydrostatic equilibrium later: ", hse_f
-
-# Just for good measure, take slices through the center of the domains
-# of the two files
-
-slc_i = yt.SlicePlot(dsi, 2, ["density", "HSE"], center=dsi.domain_center,
-                     width=(1.0, "Mpc"))
-slc_f = yt.SlicePlot(dsf, 2, ["density", "HSE"], center=dsf.domain_center,
-                     width=(1.0, "Mpc"))
-
-slc_i.save("initial")
-slc_f.save("final")
+slc.save("hse")

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/profile_with_variance.py
--- a/doc/source/cookbook/profile_with_variance.py
+++ b/doc/source/cookbook/profile_with_variance.py
@@ -16,17 +16,15 @@
 
 # Create a 1D profile object for profiles over radius
 # and add a velocity profile.
-prof = yt.ProfilePlot(sp, 'radius', 'velocity_magnitude', 
-                      weight_field='cell_mass')
-prof.set_unit('radius', 'kpc')
-prof.set_xlim(0.1, 1000)
+prof = yt.create_profile(sp, 'radius', 'velocity_magnitude',
+                         units = {'radius': 'kpc'},
+                         extrema = {'radius': ((0.1, 'kpc'), (1000.0, 'kpc'))},
+                         weight_field='cell_mass')
 
 # Plot the average velocity magnitude.
-plt.loglog(prof['radius'], prof['velocity_magnitude'],
-              label='Mean')
+plt.loglog(prof.x, prof['velocity_magnitude'], label='Mean')
 # Plot the variance of the velocity madnitude.
-plt.loglog(prof['radius'], prof['velocity_magnitude_std'],
-              label='Standard Deviation')
+plt.loglog(prof.x, prof['velocity_magnitude_std'], label='Standard Deviation')
 plt.xlabel('r [kpc]')
 plt.ylabel('v [cm/s]')
 plt.legend()

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/rad_velocity.py
--- a/doc/source/cookbook/rad_velocity.py
+++ b/doc/source/cookbook/rad_velocity.py
@@ -21,18 +21,15 @@
 
 # Radial profile without correction
 
-rp0 = yt.ProfilePlot(sp0, 'radius', 'radial_velocity')
-rp0.set_unit('radius', 'kpc')
-rp0.set_log('radius', False)
+rp0 = yt.create_profile(sp0, 'radius', 'radial_velocity',
+        units = {'radius': 'kpc'},
+        logs = {'radius': False})
 
 # Radial profile with correction for bulk velocity
 
-rp1 = yt.ProfilePlot(sp1, 'radius', 'radial_velocity')
-rp1.set_unit('radius', 'kpc')
-rp1.set_log('radius', False)
-
-#rp0.save('radial_velocity_profile_uncorrected.png')
-#rp1.save('radial_velocity_profile_corrected.png')
+rp1 = yt.create_profile(sp1, 'radius', 'radial_velocity',
+        units = {'radius': 'kpc'},
+        logs = {'radius': False})
 
 # Make a plot using matplotlib
 
@@ -40,8 +37,8 @@
 ax = fig.add_subplot(111)
 
 # Here we scale the velocities by 1.0e5 to get into km/s
-ax.plot(rad_profile0["Radiuskpc"], rad_profile0["RadialVelocity"]/1.0e5,
-		rad_profile1["Radiuskpc"], rad_profile1["RadialVelocity"]/1.0e5)
+ax.plot(rp0.x, rp0["radial_velocity"].in_units("km/s"),
+		rp1.x, rp1["radial_velocity"].in_units("km/s"))
 
 ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
 ax.set_ylabel(r"$\mathrm{v_r\ (km/s)}$")

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/radial_profile_styles.py
--- a/doc/source/cookbook/radial_profile_styles.py
+++ b/doc/source/cookbook/radial_profile_styles.py
@@ -12,11 +12,9 @@
 
 # Bin up the data from the sphere into a radial profile
 
-#rp = BinnedProfile1D(sphere, 100, "Radiuskpc", 0.0, 500., log_space=False)
-#rp.add_fields("density","temperature")
-rp = yt.ProfilePlot(sp, 'radius', ['density', 'temperature'])
-rp.set_unit('radius', 'kpc')
-rp.set_log('radius', False)
+rp = yt.create_profile(sp, 'radius', ['density', 'temperature'],
+                       units = {'radius': 'kpc'},
+                       logs = {'radius': False})
 
 # Make plots using matplotlib
 
@@ -24,7 +22,7 @@
 ax = fig.add_subplot(111)
 
 # Plot the density as a log-log plot using the default settings
-dens_plot = ax.loglog(rp["Radiuskpc"], rp["density"])
+dens_plot = ax.loglog(rp.x, rp["density"])
 
 # Here we set the labels of the plot axes
 
@@ -52,15 +50,3 @@
 dens_plot[0].set_markersize(10)
 
 fig.savefig("density_profile_thick_with_xs.png")
-
-# Now get rid of the line on the axes plot
-
-ax.lines = []
-
-# Since the radial profile object also includes the standard deviation in each bin,
-# we'll use these as errorbars. We have to make a new plot for this:
-
-dens_err_plot = ax.errorbar(pr["Radiuskpc"], rp["density"],
-                            yerr=rp["Density_std"])
-                                                        
-fig.savefig("density_profile_with_errorbars.png")

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ /dev/null
@@ -1,72 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-import matplotlib.pyplot as plt
-import h5py as h5
-
-ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
-
-# Get a sphere
-
-sp = ds.sphere(ds.domain_center, (500., "kpc"))
-
-# Radial profile from the sphere
-
-prof = yt.BinnedProfile1D(sp, 100, "Radiuskpc", 0.0, 500., log_space=False)
-prof = yt.ProfilePlot(sp, 'radius', ['density', 'temperature'], weight_field="cell_mass")
-prof.set_unit('radius', 'kpc')
-prof.set_log('radius', False)
-prof.set_xlim(0, 500)
-
-# Write profiles to ASCII file
-
-prof.write_out("%s_profile.dat" % ds, bin_style="center")
-
-# Write profiles to HDF5 file
-
-prof.write_out_h5("%s_profile.h5" % ds, bin_style="center")
-
-# Now we will show how using NumPy, h5py, and Matplotlib the data in these
-# files may be plotted.
-
-# Plot density from ASCII file
-
-# Open the text file using NumPy's "loadtxt" method. In order to get the 
-# separate columns into separate NumPy arrays, it is essential to set unpack=True.
-
-r, dens, std_dens, temp, std_temp = \
-	np.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
-
-fig1 = plt.figure()
-
-ax = fig1.add_subplot(111)
-ax.plot(r, dens)
-ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
-ax.set_ylabel(r"$\mathrm{\rho\ (g\ cm^{-3})}$")
-ax.set_title("Density vs. Radius")
-fig1.savefig("%s_dens.png" % ds)
-
-# Plot temperature from HDF5 file
-
-# Get the file handle
-
-f = h5py.File("%s_profile.h5" % ds, "r")
-
-# Get the radius and temperature arrays from the file handle
-
-r = f["/Radiuskpc-1d"].attrs["x-axis-Radiuskpc"][:]
-temp = f["/Radiuskpc-1d/temperature"][:]
-
-# Close the file handle
-
-f.close()
-
-fig2 = plt.figure()
-
-ax = fig2.add_subplot(111)
-ax.plot(r, temp)
-ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
-ax.set_ylabel(r"$\mathrm{T\ (K)}$")
-ax.set_title("temperature vs. Radius")
-fig2.savefig("%s_temp.png" % ds)

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/simple_profile.py
--- a/doc/source/cookbook/simple_profile.py
+++ b/doc/source/cookbook/simple_profile.py
@@ -12,6 +12,7 @@
 sphere = ds.sphere("c", (100., "kpc"))
 plot = yt.ProfilePlot(sphere, "density", ["temperature", "velocity_x"],
                       weight_field="cell_mass")
+plot.set_log("velocity_x", False)
 
 # Save the image.
 # Optionally, give a string as an argument

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/cookbook/simple_slice_with_multiple_fields.py
--- a/doc/source/cookbook/simple_slice_with_multiple_fields.py
+++ b/doc/source/cookbook/simple_slice_with_multiple_fields.py
@@ -7,5 +7,5 @@
 ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
 
 # Create density slices of several fields along the x axis
-yt.SlicePlot(ds, 'x', ['density','temperature','pressure','vorticity_squared'], 
+yt.SlicePlot(ds, 'x', ['density','temperature','pressure'], 
              width = (800.0, 'kpc')).save()

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -51,7 +51,7 @@
 
 If you are developing new functionality, it is sometimes more convenient to use
 the Nose command line interface, ``nosetests``. You can run the unit tests
-using `no`qsetets` by navigating to the base directory of the yt mercurial
+using ``nose`` by navigating to the base directory of the yt mercurial
 repository and invoking ``nosetests``:
 
 .. code-block:: bash

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:cd145d8cadbf1a0065d0f9fb4ea107c215fcd53245b3bb7d29303af46f063552"
+  "signature": "sha256:5fc7783d6c99659c353a35348bb21210fcb7572d5357f32dd61755d4a7f8fe6c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -443,7 +443,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits.gz\")\n",
+      "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
       "f.info()"
      ],
      "language": "python",
@@ -462,7 +462,7 @@
      "collapsed": false,
      "input": [
       "data = {}\n",
-      "for hdu in f[1:]:\n",
+      "for hdu in f:\n",
       "    name = hdu.name.lower()\n",
       "    data[name] = (hdu.data,\"km/s\")\n",
       "print data.keys()"

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -711,11 +711,13 @@
 ``spectral_factor``
 ~~~~~~~~~~~~~~~~~~~
 
-Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt sets the pixel
-scale as the ``code_length``, certain visualizations (such as volume renderings) may look extended
-or distended in ways that are undesirable. To adjust the width in ``code_length`` of the spectral
- axis, set ``spectral_factor`` equal to a constant which gives the desired scaling,
- or set it to ``"auto"`` to make the width the same as the largest axis in the sky plane.
+Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt
+sets the pixel scale as the ``code_length``, certain visualizations (such as
+volume renderings) may look extended or distended in ways that are
+undesirable. To adjust the width in ``code_length`` of the spectral axis, set
+``spectral_factor`` equal to a constant which gives the desired scaling, or set
+it to ``"auto"`` to make the width the same as the largest axis in the sky
+plane.
 
 Miscellaneous Tools for Use with FITS Data
 ++++++++++++++++++++++++++++++++++++++++++
@@ -792,11 +794,11 @@
 PyNE Data
 ---------
 
-.. _loading-numpy-array:
-
 Generic Array Data
 ------------------
 
+See :ref:`loading-numpy-array` for more detail.
+
 Even if your data is not strictly related to fields commonly used in
 astrophysical codes or your code is not supported yet, you can still feed it to
 ``yt`` to use its advanced visualization and analysis facilities. The only
@@ -848,6 +850,8 @@
 Generic AMR Data
 ----------------
 
+See :ref:`loading-numpy-array` for more detail.
+
 It is possible to create native ``yt`` parameter file from Python's dictionary
 that describes set of rectangular patches of data of possibly varying
 resolution. 

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -120,6 +120,8 @@
 .. python-script::
    
    from yt.mods import *
+   from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
+
    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
    halos_pf = load('rockstar_halos/halos_0.0.bin')
 

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/visualizing/_images/mapserver.png
Binary file doc/source/visualizing/_images/mapserver.png has changed

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -478,8 +478,7 @@
     :ref:`cookbook-amrkdtree_to_uniformgrid`.
 
 System Requirements
--------------------
-.. versionadded:: 3.0
++++++++++++++++++++
 
 Nvidia graphics card - The memory limit of the graphics card sets the limit
                        on the size of the data source.
@@ -490,7 +489,7 @@
 the common/inc samples shipped with CUDA. The following shows an example
 in bash with CUDA 5.5 installed in /usr/local :
 
-export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
+    export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
 
 PyCUDA must also be installed to use Theia. 
 
@@ -503,13 +502,13 @@
 
 
 Tutorial
---------
-.. versionadded:: 3.0
+++++++++
 
 Currently rendering only works on uniform grids. Here is an example
 on a 1024 cube of float32 scalars.
 
 .. code-block:: python
+
    from yt.visualization.volume_rendering.theia.scene import TheiaScene
    from yt.visualization.volume_rendering.algorithms.front_to_back import FrontToBackRaycaster
    import numpy as np
@@ -528,28 +527,27 @@
 .. _the-theiascene-interface:
 
 The TheiaScene Interface
---------------------
-.. versionadded:: 3.0
+++++++++++++++++++++++++
 
 A TheiaScene object has been created to provide a high level entry point for
-controlling the raycaster's view onto the data. The class  
-:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates
- a Camera object and a TheiaSource that intern encapsulates
-a volume. The :class:`~yt.visualization.volume_rendering.theia.Camera`
-provides controls for rotating, translating, and zooming into the volume.
-Using the :class:`~yt.visualization.volume_rendering.theia.TheiaSource`
-automatically transfers the volume to the graphic's card texture memory.
+controlling the raycaster's view onto the data. The class
+:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates a
+Camera object and a TheiaSource that intern encapsulates a volume. The
+:class:`~yt.visualization.volume_rendering.theia.Camera` provides controls for
+rotating, translating, and zooming into the volume.  Using the
+:class:`~yt.visualization.volume_rendering.theia.TheiaSource` automatically
+transfers the volume to the graphic's card texture memory.
 
 Example Cookbooks
----------------
++++++++++++++++++
 
 OpenGL Example for interactive volume rendering:
 :ref:`cookbook-opengl_volume_rendering`.
 
-OpenGL Stereoscopic Example :
 .. warning::  Frame rate will suffer significantly from stereoscopic rendering.
               ~2x slower since the volume must be rendered twice.
-:ref:`cookbook-opengl_stereo_volume_rendering`.
+
+OpenGL Stereoscopic Example: :ref:`cookbook-opengl_stereo_volume_rendering`.
 
 Pseudo-Realtime video rendering with ffmpeg :
 :ref:`cookbook-ffmpeg_volume_rendering`.

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -201,7 +201,8 @@
             if self.suppress_logging:
                 old_level = int(ytcfg.get("yt","loglevel"))
                 mylog.setLevel(40)
-            dd_first = self.data_series[0].all_data()
+            ds_first = self.data_series[0]
+            dd_first = ds_first.all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
                 if self.data_series[0].field_info[fd].particle_type:

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -156,7 +156,8 @@
 
     def _create_intensity(self, i):
         def _intensity(field, data):
-            w = np.abs(data["v_los"]-self.vmid[i])/self.dv
+            vlos = data["v_los"]
+            w = np.abs(vlos-self.vmid[i])/self.dv.in_units(vlos.units)
             w = 1.-w
             w[w < 0.0] = 0.0
             return data[self.field]*w

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -811,6 +811,8 @@
         blank = ~temp_storage.used
         self.used = temp_storage.used
         if self.weight_field is not None:
+            # This is unnecessary, but it will suppress division errors.
+            temp_storage.weight_values[blank] = 1e-30
             temp_storage.values /= temp_storage.weight_values[...,None]
             self.weight = temp_storage.weight_values[...,None]
             self.weight[blank] = 0.0

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -717,3 +717,6 @@
 
     def _calculate_offsets(self, fields):
         pass
+
+    def __cmp__(self, other):
+        return cmp(self.filename, other.filename)

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -45,28 +45,25 @@
     def _specific_angular_momentum_x(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         center = data.get_field_parameter('center')
-        v_vec = obtain_rvec(data)
-        v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
-        v_vec = data.pf.arr(v_vec, input_units = data["index", "x"].units)
-        rv = v_vec - center
+        rv = obtain_rvec(data)
+        rv = np.rollaxis(rv, 0, len(rv.shape))
+        rv = data.pf.arr(rv, input_units = data["index", "x"].units)
         return yv * rv[...,2] - zv * rv[...,1]
 
     def _specific_angular_momentum_y(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         center = data.get_field_parameter('center')
-        v_vec = obtain_rvec(data)
-        v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
-        v_vec = data.pf.arr(v_vec, input_units = data["index", "x"].units)
-        rv = v_vec - center
+        rv = obtain_rvec(data)
+        rv = np.rollaxis(rv, 0, len(rv.shape))
+        rv = data.pf.arr(rv, input_units = data["index", "x"].units)
         return - (xv * rv[...,2] - zv * rv[...,0])
 
     def _specific_angular_momentum_z(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         center = data.get_field_parameter('center')
-        v_vec = obtain_rvec(data)
-        v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
-        v_vec = data.pf.arr(v_vec, input_units = data["index", "x"].units)
-        rv = v_vec - center
+        rv = obtain_rvec(data)
+        rv = np.rollaxis(rv, 0, len(rv.shape))
+        rv = data.pf.arr(rv, input_units = data["index", "x"].units)
         return xv * rv[...,1] - yv * rv[...,0]
 
     registry.add_field((ftype, "specific_angular_momentum_x"),

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -49,7 +49,7 @@
 
 class EnzoFieldInfo(FieldInfoContainer):
     known_other_fields = (
-        ("Cooling_Time", ("code_time", ["cooling_time"], None)),
+        ("Cooling_Time", ("s", ["cooling_time"], None)),
         ("HI_kph", ("1/code_time", [], None)),
         ("HeI_kph", ("1/code_time", [], None)),
         ("HeII_kph", ("1/code_time", [], None)),

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -283,7 +283,8 @@
                     else :
                         pval = val
                     if vn in self.parameters and self.parameters[vn] != pval:
-                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn)) 
+                        mylog.info("{0} {1} overwrites a simulation "
+                                   "scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
         if self._flash_version == 7:
             for hn in hns:
@@ -300,7 +301,8 @@
                     else :
                         pval = val
                     if vn in self.parameters and self.parameters[vn] != pval:
-                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
+                        mylog.info("{0} {1} overwrites a simulation "
+                                   "scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
         
         # Determine block size
@@ -363,7 +365,7 @@
         try:
             self.gamma = self.parameters["gamma"]
         except:
-            mylog.warning("Cannot find Gamma")
+            mylog.info("Cannot find Gamma")
             pass
 
         # Get the simulation time

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/frontends/halo_catalogs/halo_catalog/io.py
--- a/yt/frontends/halo_catalogs/halo_catalog/io.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/io.py
@@ -43,7 +43,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
                 x = f['particle_position_x'].value.astype("float64")
@@ -61,7 +61,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/frontends/halo_catalogs/owls_subfind/io.py
--- a/yt/frontends/halo_catalogs/owls_subfind/io.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/io.py
@@ -44,7 +44,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]
@@ -78,7 +78,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -45,7 +45,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
@@ -65,7 +65,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -48,7 +48,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         assert(len(data_files) == 1)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = self._handle['x'].size
             yield "dark_matter", (
                 self._handle['x'], self._handle['y'], self._handle['z'])
@@ -62,7 +62,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         assert(len(data_files) == 1)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = self._handle['x'].size
             for ptype, field_list in sorted(ptf.items()):
                 x = self._handle['x']

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -77,7 +77,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -93,7 +93,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
                 g = f["/%s" % ptype]
@@ -251,7 +251,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -268,7 +268,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -498,7 +498,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -519,7 +519,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -725,7 +725,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             for ptype in ptf:
                 s = self._open_stream(data_file, (ptype, "Coordinates"))
                 c = np.frombuffer(s, dtype="float64")
@@ -738,7 +738,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             for ptype, field_list in sorted(ptf.items()):
                 s = self._open_stream(data_file, (ptype, "Coordinates"))
                 c = np.frombuffer(s, dtype="float64")

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -108,7 +108,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -121,7 +121,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
                 x, y, z = (f[ptype, "particle_position_%s" % ax]

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -60,7 +60,10 @@
     convert scalar, list or tuple argument passed to functions using Cython.
     """
     if isinstance(obj, np.ndarray):
-        return obj
+        if obj.shape == ():
+            return np.array([obj])
+        # We cast to ndarray to catch ndarray subclasses
+        return np.array(obj)
     elif isinstance(obj, (types.ListType, types.TupleType)):
         return np.asarray(obj)
     else:

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -241,6 +241,40 @@
         ind = pts.find_points_in_tree()
         return self.grids[ind], ind
 
+    def find_field_value_at_point(self, fields, coord):
+        r"""Find the value of fields at a coordinate.
+
+        Returns the values [field1, field2,...] of the fields at the given
+        (x, y, z) points. Returns a list of field values in the same order as
+        the input *fields*.
+
+        Parameters
+        ----------
+        fields : string or list of strings
+            The field(s) that will be returned.
+
+        coord : list or array of coordinates
+            The location for which field values will be returned.
+
+        Examples
+        --------
+        >>> pf.h.find_field_value_at_point(['Density', 'Temperature'],
+            [0.4, 0.3, 0.8])
+        [2.1489e-24, 1.23843e4]
+        """
+        this = self.find_points(*coord)[0][-1]
+        cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions
+        mark = np.zeros(3).astype('int')
+        # Find the index for the cell containing this point.
+        for dim in xrange(len(coord)):
+            mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim])
+        out = []
+        fields = ensure_list(fields)
+        # Pull out the values and add it to the out list.
+        for field in fields:
+            out.append(this[field][mark[0], mark[1], mark[2]])
+        return out
+
     def get_grid_tree(self) :
 
         left_edge = self.pf.arr(np.zeros((self.num_grids, 3)),

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -18,6 +18,7 @@
 cimport numpy as np
 import numpy as np
 from selection_routines cimport SelectorObject
+from libc.math cimport floor
 cimport selection_routines
 
 ORDER_MAX = 20
@@ -278,7 +279,7 @@
         cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
+            ind[i] = <np.int64_t> (floor((ppos[i] - self.DLE[i])/dds[i]))
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
             ind32[i] = ind[i]

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -756,7 +756,6 @@
     yield assert_array_equal, yt_arr, YTArray(yt_arr.to_astropy())
     yield assert_equal, yt_quan, YTQuantity(yt_quan.to_astropy())
 
-
 def test_subclass():
 
     class YTASubclass(YTArray):

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -501,7 +501,7 @@
         streamplot_args = {'x': X, 'y': Y, 'u':pixX, 'v': pixY,
                            'density': self.dens}
         streamplot_args.update(self.plot_args)
-        plot._axes.streamplot(**self.streamplot_args)
+        plot._axes.streamplot(**streamplot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -317,10 +317,10 @@
         return ret
 
     def _setup_plots(self):
+        for f in self.axes:
+            self.axes[f].cla()
         for i, profile in enumerate(self.profiles):
             for field, field_data in profile.items():
-                if field in self.axes:
-                    self.axes[field].cla()
                 self.axes[field].plot(np.array(profile.x), np.array(field_data),
                                       label=self.label[i], **self.plot_spec[i])
         

diff -r 30dd0b288f618fdf1cd46322e8e6fae17b3a8fa9 -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -581,7 +581,8 @@
         return image
 
     def _render(self, double_check, num_threads, image, sampler):
-        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        ncells = sum(b.source_mask.size for b in self.volume.bricks)
+        pbar = get_pbar("Ray casting", ncells)
         total_cells = 0
         if double_check:
             for brick in self.volume.bricks:
@@ -592,7 +593,7 @@
         view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos):
             sampler(brick, num_threads=num_threads)
-            total_cells += np.prod(brick.my_data[0].shape)
+            total_cells += brick.source_mask.size
             pbar.update(total_cells)
 
         pbar.finish()


https://bitbucket.org/yt_analysis/yt/commits/17df8b2ec18a/
Changeset:   17df8b2ec18a
Branch:      yt-3.0
User:        hegan
Date:        2014-06-30 17:38:22
Summary:     cleaned up modification and removed redundant kwargs
Affected #:  1 file

diff -r 0dd048ef1c8f9ce260bea96eaa3af9a28c45edcb -r 17df8b2ec18a27a7560a4627847ebfda3c1873c8 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -958,8 +958,7 @@
     region = None
     _descriptor = None
     def __init__(self, width, p_size=1.0, col='k', marker='o', stride=1.0,
-                 ptype='all', stars_only=False, dm_only=False,
-                 minimum_mass=None, alpha=1.0):
+                 ptype='all', minimum_mass=None, alpha=1.0):
         PlotCallback.__init__(self)
         self.width = width
         self.p_size = p_size
@@ -967,8 +966,6 @@
         self.marker = marker
         self.stride = stride
         self.ptype = ptype
-        self.stars_only = stars_only
-        self.dm_only = dm_only
         self.minimum_mass = minimum_mass
         self.alpha = alpha
 
@@ -991,15 +988,6 @@
         pt = self.ptype
         gg = ( ( reg[pt, field_x] >= x0 ) & ( reg[pt, field_x] <= x1 )
            &   ( reg[pt, field_y] >= y0 ) & ( reg[pt, field_y] <= y1 ) )
-        #if self.ptype is not None:
-        #    gg &= (reg["particle_type"] == self.ptype)
-        #    if gg.sum() == 0: return
-        if self.stars_only:
-            gg &= (reg["creation_time"] > 0.0)
-            if gg.sum() == 0: return
-        if self.dm_only:
-            gg &= (reg["creation_time"] <= 0.0)
-            if gg.sum() == 0: return
         if self.minimum_mass is not None:
             gg &= (reg["particle_mass"] >= self.minimum_mass)
             if gg.sum() == 0: return


https://bitbucket.org/yt_analysis/yt/commits/a84b70b77e07/
Changeset:   a84b70b77e07
Branch:      yt-3.0
User:        hegan
Date:        2014-06-30 17:47:27
Summary:     missed one reference to a field without a ptype
Affected #:  1 file

diff -r 17df8b2ec18a27a7560a4627847ebfda3c1873c8 -r a84b70b77e0749d7adfdcaaec400b1ada0482cec yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -989,7 +989,7 @@
         gg = ( ( reg[pt, field_x] >= x0 ) & ( reg[pt, field_x] <= x1 )
            &   ( reg[pt, field_y] >= y0 ) & ( reg[pt, field_y] <= y1 ) )
         if self.minimum_mass is not None:
-            gg &= (reg["particle_mass"] >= self.minimum_mass)
+            gg &= (reg[pt, "particle_mass"] >= self.minimum_mass)
             if gg.sum() == 0: return
         plot._axes.hold(True)
         px, py = self.convert_to_plot(plot,


https://bitbucket.org/yt_analysis/yt/commits/57ecc4cbdf24/
Changeset:   57ecc4cbdf24
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-02 09:27:28
Summary:     Merged in hegan/yt/yt-3.0 (pull request #986)

Make annotate particles work with ptype
Affected #:  1 file

diff -r 5403193d8a46d5f8c166dbaa259653f6437056e6 -r 57ecc4cbdf24f36a5d522c983612468861ffde35 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -958,8 +958,7 @@
     region = None
     _descriptor = None
     def __init__(self, width, p_size=1.0, col='k', marker='o', stride=1.0,
-                 ptype=None, stars_only=False, dm_only=False,
-                 minimum_mass=None, alpha=1.0):
+                 ptype='all', minimum_mass=None, alpha=1.0):
         PlotCallback.__init__(self)
         self.width = width
         self.p_size = p_size
@@ -967,8 +966,6 @@
         self.marker = marker
         self.stride = stride
         self.ptype = ptype
-        self.stars_only = stars_only
-        self.dm_only = dm_only
         self.minimum_mass = minimum_mass
         self.alpha = alpha
 
@@ -988,24 +985,16 @@
         axis_names = plot.data.pf.coordinates.axis_name
         field_x = "particle_position_%s" % axis_names[xax]
         field_y = "particle_position_%s" % axis_names[yax]
-        gg = ( ( reg[field_x] >= x0 ) & ( reg[field_x] <= x1 )
-           &   ( reg[field_y] >= y0 ) & ( reg[field_y] <= y1 ) )
-        if self.ptype is not None:
-            gg &= (reg["particle_type"] == self.ptype)
-            if gg.sum() == 0: return
-        if self.stars_only:
-            gg &= (reg["creation_time"] > 0.0)
-            if gg.sum() == 0: return
-        if self.dm_only:
-            gg &= (reg["creation_time"] <= 0.0)
-            if gg.sum() == 0: return
+        pt = self.ptype
+        gg = ( ( reg[pt, field_x] >= x0 ) & ( reg[pt, field_x] <= x1 )
+           &   ( reg[pt, field_y] >= y0 ) & ( reg[pt, field_y] <= y1 ) )
         if self.minimum_mass is not None:
-            gg &= (reg["particle_mass"] >= self.minimum_mass)
+            gg &= (reg[pt, "particle_mass"] >= self.minimum_mass)
             if gg.sum() == 0: return
         plot._axes.hold(True)
         px, py = self.convert_to_plot(plot,
-                    [np.array(reg[field_x][gg][::self.stride]),
-                     np.array(reg[field_y][gg][::self.stride])])
+                    [np.array(reg[pt, field_x][gg][::self.stride]),
+                     np.array(reg[pt, field_y][gg][::self.stride])])
         plot._axes.scatter(px, py, edgecolors='None', marker=self.marker,
                            s=self.p_size, c=self.color,alpha=self.alpha)
         plot._axes.set_xlim(xx0,xx1)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list