[yt-svn] commit/yt: 7 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 17 14:15:03 PDT 2014


7 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3bac46ab6074/
Changeset:   3bac46ab6074
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-03 07:45:21
Summary:     Adding boltzmann's constant and Rankine units.
Affected #:  3 files

diff -r 57ecc4cbdf24f36a5d522c983612468861ffde35 -r 3bac46ab60743e9e07407b449eb88f1018396975 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -18,7 +18,8 @@
     mass_sun_grams, sec_per_year, sec_per_day, sec_per_hr, \
     sec_per_min, temp_sun_kelvin, luminosity_sun_ergs_per_sec, \
     metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \
-    cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams
+    cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \
+    boltzmann_constant_erg_per_K, rankine_per_kelvin
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -89,6 +90,9 @@
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "counts": (1.0, dimensions.dimensionless),
+    "kB": (boltzmann_constant_erg_per_K,
+           dimensions.energy/dimensions.temperature),
+    "R": (rankine_per_kelvin, dimensions.temperature),
 
     # for AstroPy compatibility
     "solMass": (mass_sun_grams, dimensions.mass),

diff -r 57ecc4cbdf24f36a5d522c983612468861ffde35 -r 3bac46ab60743e9e07407b449eb88f1018396975 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -45,6 +45,7 @@
 mh = mp
 clight = speed_of_light_cgs
 kboltz = boltzmann_constant_cgs
+kb = kboltz
 hcgs = planck_constant_cgs
 sigma_thompson = cross_section_thompson_cgs
 Na = 1 / amu_cgs

diff -r 57ecc4cbdf24f36a5d522c983612468861ffde35 -r 3bac46ab60743e9e07407b449eb88f1018396975 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -75,6 +75,7 @@
 keV_per_K = 1.0 / K_per_keV
 keV_per_erg = 1.0 / erg_per_keV
 eV_per_erg = 1.0 / erg_per_eV
+rankine_per_kelvin = 9./5.
 
 # Solar System masses
 # Standish, E.M. (1995) "Report of the IAU WGAS Sub-Group on Numerical Standards",


https://bitbucket.org/yt_analysis/yt/commits/3c9af020cd9f/
Changeset:   3c9af020cd9f
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-06 21:00:48
Summary:     Merging with mainline tip to clear a conflict.
Affected #:  31 files

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -567,8 +567,10 @@
 
 mkdir -p ${DEST_DIR}/data
 cd ${DEST_DIR}/data
-echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
-[ ! -e xray_emissivity.h5 ] && get_ytdata xray_emissivity.h5
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  cloudy_emissivity.h5' > cloudy_emissivity.h5.sha512
+[ ! -e cloudy_emissivity.h5 ] && get_ytdata cloudy_emissivity.h5
+echo '0f714ae2eace0141b1381abf1160dc8f8a521335e886f99919caf3beb31df1fe271d67c7b2a804b1467949eb16b0ef87a3d53abad0e8160fccac1e90d8d9e85f  apec_emissivity.h5' > apec_emissivity.h5.sha512
+[ ! -e apec_emissivity.h5 ] && get_ytdata apec_emissivity.h5
 
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -1,6 +1,11 @@
 Constructing Mock X-ray Observations
 ------------------------------------
 
+.. note::
+
+  If you just want to create derived fields for X-ray emission,
+  you should go `here <xray_emission_fields.html>`_ instead.
+
 The ``photon_simulator`` analysis module enables the creation of
 simulated X-ray photon lists of events from datasets that ``yt`` is able
 to read. The simulated events then can be exported to X-ray telescope

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -2,41 +2,46 @@
 
 X-ray Emission Fields
 =====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
+.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>, John ZuHone <jzuhone at gmail.com>
+
+.. note::
+
+  If you came here trying to figure out how to create simulated X-ray photons and observations,
+  you should go `here <photon_simulator.html>`_ instead.
 
 This functionality provides the ability to create metallicity-dependent 
-X-ray luminosity, emissivity, and photo emissivity fields for a given 
+X-ray luminosity, emissivity, and photon emissivity fields for a given
 photon energy range.  This works by interpolating from emission tables 
-created with the photoionization code, `Cloudy <http://nublado.org/>`_.  
-If you installed yt with the install script, the data should be located in 
-the *data* directory inside the installation directory.  Emission fields can 
-be made for any interval between 0.1 keV and 100 keV.
+created from the photoionization code `Cloudy <http://nublado.org/>`_ or
+the collisional ionization database `AtomDB <http://www.atomdb.org>`_. If
+you installed yt with the install script, these data files should be located in
+the *data* directory inside the installation directory, or can be downloaded
+from `<http://yt-project.org/data>`_. Emission fields can be made for any
+interval between 0.1 keV and 100 keV.
 
 Adding Emission Fields
 ----------------------
 
-Fields can be created for luminosity (erg/s), emissivity (erg/s/cm^3), 
-and photon emissivity (photons/s/cm^3).  The only required arguments are 
-the minimum and maximum energies.
+Fields will be created for luminosity :math:`{\rm (erg~s^{-1})}`, emissivity :math:`{\rm (erg~s^{-1}~cm^{-3})}`,
+and photon emissivity :math:`{\rm (photons~s^{-1}~cm^{-3})}`.  The only required arguments are the
+dataset object, and the minimum and maximum energies of the energy band.
 
 .. code-block:: python
 
-  from yt.mods import *
+  import yt
   from yt.analysis_modules.spectral_integrator.api import \
-       add_xray_luminosity_field, \
-       add_xray_emissivity_field, \
-       add_xray_photon_emissivity_field
+       add_xray_emissivity_field
 
-  add_xray_luminosity_field(0.5, 7)
-  add_xray_emissivity_field(0.5, 7)
-  add_xray_photon_emissivity_field(0.5, 7)
+  xray_fields = add_xray_emissivity_field(0.5, 7.0)
 
 Additional keyword arguments are:
 
- * **filename**  (*string*): Path to data file containing emissivity 
-   values.  If None, a file called xray_emissivity.h5 is used.  This file 
-   contains emissivity tables for primordial elements and for metals at 
-   solar metallicity for the energy range 0.1 to 100 keV.  Default: None.
+ * **filename** (*string*): Path to data file containing emissivity values. If None,
+   a file called "cloudy_emissivity.h5" is used, for photoionized plasmas. A second
+   option, for collisionally ionized plasmas, is in the file "apec_emissivity.h5",
+   available at http://yt-project.org/data. These files contain emissivity tables
+   for primordial elements and for metals at solar metallicity for the energy range
+   0.1 to 100 keV. Default: None.
 
  * **with_metals** (*bool*): If True, use the metallicity field to add the 
    contribution from metals.  If False, only the emission from H/He is 
@@ -46,24 +51,27 @@
    metallicity for the emission from metals.  The *with_metals* keyword 
    must be set to False to use this.  Default: None.
 
-The resulting fields can be used like all normal fields.
+The resulting fields can be used like all normal fields. The function will return the names of
+the created fields in a Python list.
 
-.. python-script::
+.. code-block:: python
 
-  from yt.mods import *
+  import yt
   from yt.analysis_modules.spectral_integrator.api import \
-       add_xray_luminosity_field, \
-       add_xray_emissivity_field, \
-       add_xray_photon_emissivity_field
+       add_xray_emissivity_field
 
-  add_xray_luminosity_field(0.5, 7)
-  add_xray_emissivity_field(0.5, 7)
-  add_xray_photon_emissivity_field(0.5, 7)
+  xray_fields = add_xray_emissivity_field(0.5, 7.0, filename="apec_emissivity.h5")
 
-  pf = load("enzo_tiny_cosmology/DD0046/DD0046")
-  plot = SlicePlot(pf, 'x', 'Xray_Luminosity_0.5_7keV')
+  ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+  plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')
   plot.save()
-  plot = ProjectionPlot(pf, 'x', 'Xray_Emissivity_0.5_7keV')
+  plot = yt.ProjectionPlot(ds, 'x', 'xray_emissivity_0.5_7.0_keV')
   plot.save()
-  plot = ProjectionPlot(pf, 'x', 'Xray_Photon_Emissivity_0.5_7keV')
+  plot = yt.ProjectionPlot(ds, 'x', 'xray_photon_emissivity_0.5_7.0_keV')
   plot.save()
+
+.. warning::
+
+  The X-ray fields depend on the number density of hydrogen atoms, in the yt field
+  ``H_number_density``. If this field is not defined (either in the dataset or by the user),
+  the primordial hydrogen mass fraction (X = 0.76) will be used to construct it.
\ No newline at end of file

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ /dev/null
@@ -1,20 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-
-# Load the dataset.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-
-# Create a 15 kpc radius sphere, centered on the center of the sim volume
-sp = ds.sphere("center", (15.0, "kpc"))
-
-# Get the angular momentum vector for the sphere.
-L = sp.quantities.angular_momentum_vector()
-
-print "Angular momentum vector: {0}".format(L)
-
-# Create an OffAxisSlicePlot of density centered on the object with the L 
-# vector as its normal and a width of 25 kpc on a side
-p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
-p.save()

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 import numpy as np
 

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/halo_plotting.py
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
 

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/rad_velocity.py
--- a/doc/source/cookbook/rad_velocity.py
+++ b/doc/source/cookbook/rad_velocity.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 import matplotlib.pyplot as plt
 

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/radial_profile_styles.py
--- a/doc/source/cookbook/radial_profile_styles.py
+++ b/doc/source/cookbook/radial_profile_styles.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 import matplotlib.pyplot as plt
 

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/simple_off_axis_projection.py
--- a/doc/source/cookbook/simple_off_axis_projection.py
+++ b/doc/source/cookbook/simple_off_axis_projection.py
@@ -3,9 +3,7 @@
 # Load the dataset.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the max density.  Note that this
-# sphere is very small compared to the size of our final plot, and it has a
-# non-axially aligned L vector.
+# Create a 15 kpc radius sphere, centered on the center of the sim volume
 sp = ds.sphere("center", (15.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
@@ -13,6 +11,7 @@
 
 print "Angular momentum vector: {0}".format(L)
 
-# Create an OffAxisSlicePlot on the object with the L vector as its normal
+# Create an OffAxisProjectionPlot of density centered on the object with the L 
+# vector as its normal and a width of 25 kpc on a side
 p = yt.OffAxisProjectionPlot(ds, L, "density", sp.center, (25, "kpc"))
 p.save()

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/simple_off_axis_slice.py
--- /dev/null
+++ b/doc/source/cookbook/simple_off_axis_slice.py
@@ -0,0 +1,17 @@
+import yt
+
+# Load the dataset.
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+# Create a 15 kpc radius sphere, centered on the center of the sim volume
+sp = ds.sphere("center", (15.0, "kpc"))
+
+# Get the angular momentum vector for the sphere.
+L = sp.quantities.angular_momentum_vector()
+
+print "Angular momentum vector: {0}".format(L)
+
+# Create an OffAxisSlicePlot of density centered on the object with the L 
+# vector as its normal and a width of 25 kpc on a side
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
+p.save()

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/simple_plots.rst
--- a/doc/source/cookbook/simple_plots.rst
+++ b/doc/source/cookbook/simple_plots.rst
@@ -49,8 +49,7 @@
 Simple Radial Profiles
 ~~~~~~~~~~~~~~~~~~~~~~
 
-This shows how to make a profile of a quantity with respect to the radius, in
-this case the radius in Mpc.
+This shows how to make a profile of a quantity with respect to the radius.
 
 .. yt_cookbook:: simple_radial_profile.py
 
@@ -87,17 +86,17 @@
 Off-Axis Slicing
 ~~~~~~~~~~~~~~~~
 
-A cutting plane allows you to slice at some angle that isn't aligned with the
-axes.
+One can create slices from any arbitrary angle, not just those aligned with
+the x,y,z axes.
 
-.. yt_cookbook:: aligned_cutting_plane.py
+.. yt_cookbook:: simple_off_axis_slice.py
 
 .. _cookbook-simple-off-axis-projection:
 
 Off-Axis Projection
 ~~~~~~~~~~~~~~~~~~~
 
-Like cutting planes, off-axis projections can be created from any arbitrary 
+Like off-axis slices, off-axis projections can be created from any arbitrary 
 viewing angle.
 
 .. yt_cookbook:: simple_off_axis_projection.py

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/simple_slice_with_multiple_fields.py
--- a/doc/source/cookbook/simple_slice_with_multiple_fields.py
+++ b/doc/source/cookbook/simple_slice_with_multiple_fields.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 
 # Load the dataset

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/cookbook/time_series_profiles.py
--- a/doc/source/cookbook/time_series_profiles.py
+++ b/doc/source/cookbook/time_series_profiles.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 
 # Create a time-series object.

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -550,8 +550,6 @@
    ~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum
    ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.EmissivityIntegrator
    ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.add_xray_emissivity_field
-   ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.add_xray_luminosity_field
-   ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.add_xray_photon_emissivity_field
 
 Absorption spectra fitting:
 

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -84,9 +84,7 @@
     RadialColumnDensity
 
 from .spectral_integrator.api import \
-     add_xray_emissivity_field, \
-     add_xray_luminosity_field, \
-     add_xray_photon_emissivity_field
+     add_xray_emissivity_field
 
 from .star_analysis.api import \
     StarFormationRate, \

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -956,7 +956,7 @@
 
         if isinstance(self.parameters["Area"], basestring):
              mylog.error("Writing SIMPUT files is only supported if you didn't convolve with an ARF.")
-             raise TypeError
+             raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with an ARF.")
         
         if emin is None:
             emin = self.events["eobs"].min().value*0.95
@@ -1032,7 +1032,10 @@
         f = h5py.File(h5file, "w")
 
         f.create_dataset("/exp_time", data=float(self.parameters["ExposureTime"]))
-        f.create_dataset("/area", data=float(self.parameters["Area"]))
+        area = self.parameters["Area"]
+        if not isinstance(area, basestring):
+            area = float(area)
+        f.create_dataset("/area", data=area)
         f.create_dataset("/redshift", data=self.parameters["Redshift"])
         f.create_dataset("/d_a", data=float(self.parameters["AngularDiameterDistance"]))
         if "ARF" in self.parameters:

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -181,7 +181,7 @@
     Examples
     --------
     >>> apec_model = TableApecModel("/Users/jzuhone/Data/atomdb_v2.0.2/", 0.05, 50.0,
-                                    1000, thermal_broad=True)
+    ...                             1000, thermal_broad=True)
     """
     def __init__(self, apec_root, emin, emax, nchan,
                  apec_vers="2.0.2", thermal_broad=False):
@@ -226,7 +226,7 @@
         
         tmpspec = np.zeros((self.nchan))
         
-        i = np.where((self.line_handle[tindex].data.field('element')==element) &
+        i = np.where((self.line_handle[tindex].data.field('element') == element) &
                      (self.line_handle[tindex].data.field('lambda') > self.minlam) &
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
@@ -242,24 +242,24 @@
                 vec += np.diff(cdf)*a
         else:
             ie = np.searchsorted(ebins, E0, side='right')-1
-            for i,a in zip(ie,amp): vec[i] += a
+            for i, a in zip(ie, amp): vec[i] += a
         tmpspec += vec
 
-        ind = np.where((self.coco_handle[tindex].data.field('Z')==element) &
-                       (self.coco_handle[tindex].data.field('rmJ')==0))[0]
-        if len(ind)==0:
+        ind = np.where((self.coco_handle[tindex].data.field('Z') == element) &
+                       (self.coco_handle[tindex].data.field('rmJ') == 0))[0]
+        if len(ind) == 0:
             return tmpspec
         else:
-            ind=ind[0]
+            ind = ind[0]
                                                     
-        n_cont=self.coco_handle[tindex].data.field('N_Cont')[ind]
-        e_cont=self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
+        n_cont = self.coco_handle[tindex].data.field('N_Cont')[ind]
+        e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
         continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
 
         tmpspec += np.interp(self.emid.ndarray_view(), e_cont, continuum)*self.de.ndarray_view()
         
-        n_pseudo=self.coco_handle[tindex].data.field('N_Pseudo')[ind]
-        e_pseudo=self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
+        n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
+        e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
         pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
         
         tmpspec += np.interp(self.emid.ndarray_view(), e_pseudo, pseudo)*self.de.ndarray_view()

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/analysis_modules/spectral_integrator/api.py
--- a/yt/analysis_modules/spectral_integrator/api.py
+++ b/yt/analysis_modules/spectral_integrator/api.py
@@ -15,6 +15,4 @@
 
 from .spectral_frequency_integrator import \
     EmissivityIntegrator, \
-    add_xray_emissivity_field, \
-    add_xray_luminosity_field, \
-    add_xray_photon_emissivity_field
+    add_xray_emissivity_field
\ No newline at end of file

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -1,6 +1,6 @@
 """
 Integrator classes to deal with interpolation and integration of input spectral
-bins.  Currently only supports Cloudy-style data.
+bins.  Currently only supports Cloudy and APEC-style data.
 
 
 
@@ -23,19 +23,21 @@
      mylog, \
      only_on_root
 
-from yt.fields.local_fields import add_field
+from yt.utilities.exceptions import YTFieldNotFound
 from yt.utilities.exceptions import YTException
 from yt.utilities.linear_interpolators import \
-    BilinearFieldInterpolator
+    UnilinearFieldInterpolator, BilinearFieldInterpolator
 from yt.utilities.physical_constants import \
-    erg_per_eV, hcgs
-from yt.units import keV, Hz
-keV_per_Hz = keV/Hz/hcgs
+    hcgs, mp
+from yt.units.yt_array import YTArray, YTQuantity
+from yt.utilities.physical_ratios import \
+    primordial_H_mass_fraction, erg_per_keV
 
 xray_data_version = 1
 
-def _get_data_file():
-    data_file = "xray_emissivity.h5"
+def _get_data_file(data_file=None):
+    if data_file is None:
+        data_file = "cloudy_emissivity.h5"
     data_url = "http://yt-project.org/data"
     if "YT_DEST" in os.environ and \
       os.path.isdir(os.path.join(os.environ["YT_DEST"], "data")):
@@ -62,8 +64,9 @@
 
 class ObsoleteDataException(YTException):
     def __str__(self):
-        return "X-ray emissivity data is out of data.\nDownload the latest data from http://yt-project.org/data/xray_emissivity.h5 and move it to %s." % \
-          os.path.join(os.environ["YT_DEST"], "data", "xray_emissivity.h5")
+        return "X-ray emissivity data is out of date.\n" + \
+               "Download the latest data from http://yt-project.org/data/cloudy_emissivity.h5 and move it to %s." % \
+          os.path.join(os.environ["YT_DEST"], "data", "cloudy_emissivity.h5")
           
 class EmissivityIntegrator(object):
     r"""Class for making X-ray emissivity fields with hdf5 data tables 
@@ -75,9 +78,11 @@
     ----------
     filename: string, default None
         Path to data file containing emissivity values.  If None,
-        a file called xray_emissivity.h5 is used.  This file contains 
-        emissivity tables for primordial elements and for metals at 
-        solar metallicity for the energy range 0.1 to 100 keV.
+        a file called "cloudy_emissivity.h5" is used, for photoionized
+        plasmas. A second option, for collisionally ionized plasmas, is
+        in the file "apec_emissivity.h5", available at http://yt-project.org/data.
+        These files contain emissivity tables for primordial elements and
+        for metals at solar metallicity for the energy range 0.1 to 100 keV.
         Default: None.
         
     """
@@ -89,7 +94,8 @@
             default_filename = True
 
         if not os.path.exists(filename):
-            raise IOError("File does not exist: %s." % filename)
+            mylog.warning("File %s does not exist, will attempt to find it." % filename)
+            filename = _get_data_file(data_file=filename)
         only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
         in_file = h5py.File(filename, "r")
         if "info" in in_file.attrs:
@@ -103,51 +109,51 @@
 
         for field in ["emissivity_primordial", "emissivity_metals",
                       "log_nH", "log_T", "log_E"]:
-            setattr(self, field, in_file[field][:])
+            if field in in_file:
+                setattr(self, field, in_file[field][:])
         in_file.close()
 
         E_diff = np.diff(self.log_E)
         self.E_bins = \
-                  np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
-                                               [self.log_E[-1] - 0.5 * E_diff[-1],
-                                                self.log_E[-1] + 0.5 * E_diff[-1]]]))
-        self.dnu = keV_per_Hz * np.diff(self.E_bins)
+                  YTArray(np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
+                                                      [self.log_E[-1] - 0.5 * E_diff[-1],
+                                                       self.log_E[-1] + 0.5 * E_diff[-1]]])),
+                          "keV")
+        self.dnu = (np.diff(self.E_bins)/hcgs).in_units("Hz")
 
-    def _get_interpolator(self, data, e_min, e_max):
-        r"""Create an interpolator for total emissivity in a 
-        given energy range.
-
-        Parameters
-        ----------
-        e_min: float
-            the minimum energy in keV for the energy band.
-        e_min: float
-            the maximum energy in keV for the energy band.
-
-        """
+    def get_interpolator(self, data, e_min, e_max):
+        e_min = YTQuantity(e_min, "keV")
+        e_max = YTQuantity(e_max, "keV")
         if (e_min - self.E_bins[0]) / e_min < -1e-3 or \
           (e_max - self.E_bins[-1]) / e_max > 1e-3:
-            raise EnergyBoundsException(np.power(10, self.E_bins[0]),
-                                        np.power(10, self.E_bins[-1]))
+            raise EnergyBoundsException(self.E_bins[0], self.E_bins[-1])
         e_is, e_ie = np.digitize([e_min, e_max], self.E_bins)
         e_is = np.clip(e_is - 1, 0, self.E_bins.size - 1)
         e_ie = np.clip(e_ie, 0, self.E_bins.size - 1)
 
-        my_dnu = np.copy(self.dnu[e_is: e_ie])
+        my_dnu = self.dnu[e_is: e_ie].copy()
         # clip edge bins if the requested range is smaller
-        my_dnu[0] -= e_min - self.E_bins[e_is]
-        my_dnu[-1] -= self.E_bins[e_ie] - e_max
+        my_dnu[0] -= ((e_min - self.E_bins[e_is])/hcgs).in_units("Hz")
+        my_dnu[-1] -= ((self.E_bins[e_ie] - e_max)/hcgs).in_units("Hz")
 
         interp_data = (data[..., e_is:e_ie] * my_dnu).sum(axis=-1)
-        return BilinearFieldInterpolator(np.log10(interp_data),
-                                         [self.log_nH[0], self.log_nH[-1],
-                                          self.log_T[0],  self.log_T[-1]],
-                                         ["log_nH", "log_T"], truncate=True)
+        if len(data.shape) == 2:
+            emiss = UnilinearFieldInterpolator(np.log10(interp_data),
+                                               [self.log_T[0],  self.log_T[-1]],
+                                               "log_T", truncate=True)
+        else:
+            emiss = BilinearFieldInterpolator(np.log10(interp_data),
+                                              [self.log_nH[0], self.log_nH[-1],
+                                               self.log_T[0],  self.log_T[-1]],
+                                              ["log_nH", "log_T"], truncate=True)
 
-def add_xray_emissivity_field(e_min, e_max, filename=None,
+        return emiss
+
+def add_xray_emissivity_field(ds, e_min, e_max,
+                              filename=None,
                               with_metals=True,
                               constant_metallicity=None):
-    r"""Create an X-ray emissivity field for a given energy range.
+    r"""Create X-ray emissivity fields for a given energy range.
 
     Parameters
     ----------
@@ -155,197 +161,117 @@
         the minimum energy in keV for the energy band.
     e_min: float
         the maximum energy in keV for the energy band.
-
-    Other Parameters
-    ----------------
-    filename: string
+    filename: string, optional
         Path to data file containing emissivity values.  If None,
-        a file called xray_emissivity.h5 is used.  This file contains 
-        emissivity tables for primordial elements and for metals at 
-        solar metallicity for the energy range 0.1 to 100 keV.
+        a file called "cloudy_emissivity.h5" is used, for photoionized
+        plasmas. A second option, for collisionally ionized plasmas, is
+        in the file "apec_emissivity.h5", available at http://yt-project.org/data.
+        These files contain emissivity tables for primordial elements and
+        for metals at solar metallicity for the energy range 0.1 to 100 keV.
         Default: None.
-    with_metals: bool
+    with_metals: bool, optional
         If True, use the metallicity field to add the contribution from 
         metals.  If False, only the emission from H/He is considered.
         Default: True.
-    constant_metallicity: float
+    constant_metallicity: float, optional
         If specified, assume a constant metallicity for the emission 
         from metals.  The *with_metals* keyword must be set to False 
         to use this.
         Default: None.
 
-    This will create a field named "Xray_Emissivity_{e_min}_{e_max}keV".
-    The units of the field are erg s^-1 cm^-3.
+    This will create three fields:
+
+    "xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3)
+    "xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1)
+    "xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3)
 
     Examples
     --------
 
     >>> from yt.mods import *
     >>> from yt.analysis_modules.spectral_integrator.api import *
-    >>> add_xray_emissivity_field(0.5, 2)
-    >>> pf = load(dataset)
-    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> ds = load(dataset)
+    >>> add_xray_emissivity_field(ds, 0.5, 2)
+    >>> p = ProjectionPlot(ds, 'x', "xray_emissivity_0.5_2_keV")
     >>> p.save()
 
     """
 
+    if with_metals:
+        try:
+            ds._get_field_info("metal_density")
+        except YTFieldNotFound:
+            raise RuntimeError("Your dataset does not have a \"metal_density\" field! " +
+                               "Perhaps you should specify a constant metallicity?")
+
     my_si = EmissivityIntegrator(filename=filename)
 
-    em_0 = my_si._get_interpolator(my_si.emissivity_primordial, e_min, e_max)
+    em_0 = my_si.get_interpolator(my_si.emissivity_primordial, e_min, e_max)
     em_Z = None
     if with_metals or constant_metallicity is not None:
-        em_Z = my_si._get_interpolator(my_si.emissivity_metals, e_min, e_max)
+        em_Z = my_si.get_interpolator(my_si.emissivity_metals, e_min, e_max)
+
+    energy_erg = np.power(10, my_si.log_E) * erg_per_keV
+    emp_0 = my_si.get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
+                                   e_min, e_max)
+    emp_Z = None
+    if with_metals or constant_metallicity is not None:
+        emp_Z = my_si.get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
+                                       e_min, e_max)
+
+    try:
+        ds._get_field_info("H_number_density")
+    except YTFieldNotFound:
+        mylog.warning("Could not find a field for \"H_number_density\". Assuming primordial H " +
+                      "mass fraction.")
+        def _nh(field, data):
+            return primordial_H_mass_fraction*data["gas","density"]/mp
+        ds.add_field(("gas","H_number_density"), function=_nh, units="cm**-3")
 
     def _emissivity_field(field, data):
-        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
-              "log_T"   : np.log10(data["Temperature"])}
+        dd = {"log_nH" : np.log10(data["gas","H_number_density"]),
+              "log_T"   : np.log10(data["gas","temperature"])}
 
         my_emissivity = np.power(10, em_0(dd))
         if em_Z is not None:
             if with_metals:
-                my_Z = data["Metallicity"]
+                my_Z = data["gas","metallicity"]
             elif constant_metallicity is not None:
                 my_Z = constant_metallicity
             my_emissivity += my_Z * np.power(10, em_Z(dd))
 
-        return data["H_NumberDensity"]**2 * my_emissivity
+        return data["gas","H_number_density"]**2 * YTArray(my_emissivity, "erg*cm**3/s")
 
-    field_name = "Xray_Emissivity_%s_%skeV" % (e_min, e_max)
-    add_field(field_name, function=_emissivity_field,
-              projection_conversion="cm",
-              display_name=r"\epsilon_{X}\/(%s-%s\/keV)" % (e_min, e_max),
-              units=r"\rm{erg}\/\rm{cm}^{-3}\/\rm{s}^{-1}")
-    return field_name
-
-def add_xray_luminosity_field(e_min, e_max, filename=None,
-                              with_metals=True,
-                              constant_metallicity=None):
-    r"""Create an X-ray luminosity field for a given energy range.
-
-    Parameters
-    ----------
-    e_min: float
-        the minimum energy in keV for the energy band.
-    e_min: float
-        the maximum energy in keV for the energy band.
-
-    Other Parameters
-    ----------------
-    filename: string
-        Path to data file containing emissivity values.  If None,
-        a file called xray_emissivity.h5 is used.  This file contains 
-        emissivity tables for primordial elements and for metals at 
-        solar metallicity for the energy range 0.1 to 100 keV.
-        Default: None.
-    with_metals: bool
-        If True, use the metallicity field to add the contribution from 
-        metals.  If False, only the emission from H/He is considered.
-        Default: True.
-    constant_metallicity: float
-        If specified, assume a constant metallicity for the emission 
-        from metals.  The *with_metals* keyword must be set to False 
-        to use this.
-        Default: None.
-
-    This will create a field named "Xray_Luminosity_{e_min}_{e_max}keV".
-    The units of the field are erg s^-1.
-
-    Examples
-    --------
-
-    >>> from yt.mods import *
-    >>> from yt.analysis_modules.spectral_integrator.api import *
-    >>> add_xray_luminosity_field(0.5, 2)
-    >>> pf = load(dataset)
-    >>> sp = pf.sphere('max', (2., 'mpc'))
-    >>> print sp.quantities['TotalQuantity']('Xray_Luminosity_0.5_2keV')
-    
-    """
-
-    em_field = add_xray_emissivity_field(e_min, e_max, filename=filename,
-                                         with_metals=with_metals,
-                                         constant_metallicity=constant_metallicity)
+    emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
+    ds.add_field(emiss_name, function=_emissivity_field,
+                 display_name=r"\epsilon_{X}\/(%s-%s\/keV)" % (e_min, e_max),
+                 units="erg/cm**3/s")
 
     def _luminosity_field(field, data):
-        return data[em_field] * data["CellVolume"]
-    field_name = "Xray_Luminosity_%s_%skeV" % (e_min, e_max)
-    add_field(field_name, function=_luminosity_field,
-              display_name=r"\rm{L}_{X}\/(%s-%s\/keV)" % (e_min, e_max),
-              units=r"\rm{erg}\/\rm{s}^{-1}")
-    return field_name
+        return data[emiss_name] * data["cell_volume"]
 
-def add_xray_photon_emissivity_field(e_min, e_max, filename=None,
-                                     with_metals=True,
-                                     constant_metallicity=None):
-    r"""Create an X-ray photon emissivity field for a given energy range.
+    lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
+    ds.add_field(lum_name, function=_luminosity_field,
+                 display_name=r"\rm{L}_{X}\/(%s-%s\/keV)" % (e_min, e_max),
+                 units="erg/s")
 
-    Parameters
-    ----------
-    e_min: float
-        the minimum energy in keV for the energy band.
-    e_min: float
-        the maximum energy in keV for the energy band.
+    def _photon_emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["gas","H_number_density"]),
+              "log_T"   : np.log10(data["gas","temperature"])}
 
-    Other Parameters
-    ----------------
-    filename: string
-        Path to data file containing emissivity values.  If None,
-        a file called xray_emissivity.h5 is used.  This file contains 
-        emissivity tables for primordial elements and for metals at 
-        solar metallicity for the energy range 0.1 to 100 keV.
-        Default: None.
-    with_metals: bool
-        If True, use the metallicity field to add the contribution from 
-        metals.  If False, only the emission from H/He is considered.
-        Default: True.
-    constant_metallicity: float
-        If specified, assume a constant metallicity for the emission 
-        from metals.  The *with_metals* keyword must be set to False 
-        to use this.
-        Default: None.
-
-    This will create a field named "Xray_Photon_Emissivity_{e_min}_{e_max}keV".
-    The units of the field are photons s^-1 cm^-3.
-
-    Examples
-    --------
-
-    >>> from yt.mods import *
-    >>> from yt.analysis_modules.spectral_integrator.api import *
-    >>> add_xray_emissivity_field(0.5, 2)
-    >>> pf = load(dataset)
-    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
-    >>> p.save()
-
-    """
-
-    my_si = EmissivityIntegrator(filename=filename)
-    energy_erg = np.power(10, my_si.log_E) * erg_per_eV
-
-    em_0 = my_si._get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
-                                   e_min, e_max)
-    em_Z = None
-    if with_metals or constant_metallicity is not None:
-        em_Z = my_si._get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
-                                       e_min, e_max)
-
-    def _emissivity_field(field, data):
-        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
-              "log_T"   : np.log10(data["Temperature"])}
-
-        my_emissivity = np.power(10, em_0(dd))
-        if em_Z is not None:
+        my_emissivity = np.power(10, emp_0(dd))
+        if emp_Z is not None:
             if with_metals:
-                my_Z = data["Metallicity"]
+                my_Z = data["gas","metallicity"]
             elif constant_metallicity is not None:
                 my_Z = constant_metallicity
-            my_emissivity += my_Z * np.power(10, em_Z(dd))
+            my_emissivity += my_Z * np.power(10, emp_Z(dd))
 
-        return data["H_NumberDensity"]**2 * my_emissivity
+        return data["gas","H_number_density"]**2 * YTArray(my_emissivity, "photons*cm**3/s")
 
-    field_name = "Xray_Photon_Emissivity_%s_%skeV" % (e_min, e_max)
-    add_field(field_name, function=_emissivity_field,
-              projection_conversion="cm",
-              display_name=r"\epsilon_{X}\/(%s-%s\/keV)" % (e_min, e_max),
-              units=r"\rm{photons}\/\rm{cm}^{-3}\/\rm{s}^{-1}")
-    return field_name
+    phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
+    ds.add_field(phot_name, function=_photon_emissivity_field,
+                 display_name=r"\epsilon_{X}\/(%s-%s\/keV)" % (e_min, e_max),
+                 units="photons/cm**3/s")
+
+    return emiss_name, lum_name, phot_name
\ No newline at end of file

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -626,7 +626,7 @@
         self.ActiveDimensions = np.array(dims, dtype='int32')
         if self.ActiveDimensions.size == 1:
             self.ActiveDimensions = np.array([dims, dims, dims], dtype="int32")
-        self.dds = (self.right_edge - self.left_edge)/self.ActiveDimensions
+        self.dds = self.base_dds = (self.right_edge - self.left_edge)/self.ActiveDimensions
         self.level = 99
         self._setup_data_source()
 

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -199,7 +199,7 @@
         op.process_octree(self.oct_handler, mdom_ind, positions, 
             self.fcoords, fields,
             self.domain_id, self._domain_offset, self.pf.periodicity,
-            index_fields, particle_octree, pdom_ind)
+            index_fields, particle_octree, pdom_ind, self.pf.geometry)
         vals = op.finalize()
         if vals is None: return
         if isinstance(vals, list):

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -970,7 +970,7 @@
 def load_particles(data, length_unit = None, bbox=None,
                    sim_time=0.0, mass_unit = None, time_unit = None,
                    velocity_unit=None, periodicity=(True, True, True),
-                   n_ref = 64, over_refine_factor = 1):
+                   n_ref = 64, over_refine_factor = 1, geometry = "cartesian"):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -1083,7 +1083,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    spf = StreamParticlesDataset(handler)
+    spf = StreamParticlesDataset(handler, geometry = geometry)
     spf.n_ref = n_ref
     spf.over_refine_factor = over_refine_factor
 

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -95,7 +95,7 @@
             if self.update_values == 1:
                 for j in range(nf):
                     field_pointers[j][i] = field_vals[j] 
-        
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def process_grid(self, gobj,
@@ -424,3 +424,56 @@
         return
 
 deposit_mesh_id = MeshIdentifier
+
+cdef class NNParticleField(ParticleDepositOperation):
+    cdef np.float64_t *nnfield
+    cdef np.float64_t *distfield
+    cdef public object onnfield
+    cdef public object odistfield
+    def initialize(self):
+        self.onnfield = np.zeros(self.nvals, dtype="float64", order='F')
+        cdef np.ndarray arr = self.onnfield
+        self.nnfield = <np.float64_t*> arr.data
+
+        self.odistfield = np.zeros(self.nvals, dtype="float64", order='F')
+        self.odistfield[:] = np.inf
+        arr = self.odistfield
+        self.distfield = <np.float64_t*> arr.data
+
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, 
+                      np.float64_t ppos[3],
+                      np.float64_t *fields,
+                      np.int64_t domain_ind
+                      ):
+        # This one is a bit slow.  Every grid cell is going to be iterated
+        # over, and we're going to deposit particles in it.
+        cdef int ii[3], i, j, k
+        cdef np.int64_t ggind
+        cdef np.float64_t r2, gpos[3]
+        gpos[0] = left_edge[0] + 0.5 * dds[0]
+        for i in range(dim[0]):
+            gpos[1] = left_edge[1] + 0.5 * dds[1]
+            for j in range(dim[1]):
+                gpos[2] = left_edge[2] + 0.5 * dds[2]
+                for k in range(dim[2]):
+                    ggind = gind(i, j, k, dim) + offset
+                    r2 = ((ppos[0] - gpos[0])*(ppos[0] - gpos[0]) +
+                          (ppos[1] - gpos[1])*(ppos[1] - gpos[1]) +
+                          (ppos[2] - gpos[2])*(ppos[2] - gpos[2]))
+                    if r2 < self.distfield[ggind]:
+                        self.distfield[ggind] = r2
+                        self.nnfield[ggind] = fields[0]
+                    gpos[2] += dds[2]
+                gpos[1] += dds[1]
+            gpos[0] += dds[0]
+        return
+        
+    def finalize(self):
+        return self.onnfield
+
+deposit_nearest = NNParticleField
+

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -36,27 +36,6 @@
     np.int64_t pn       # Particle number
     np.float64_t r2     # radius**2
 
- at cython.cdivision(True)
- at cython.boundscheck(False)
- at cython.wraparound(False)
-cdef inline np.float64_t r2dist(np.float64_t ppos[3],
-                                np.float64_t cpos[3],
-                                np.float64_t DW[3],
-                                bint periodicity[3]):
-    cdef int i
-    cdef np.float64_t r2, DR
-    r2 = 0.0
-    for i in range(3):
-        DR = (ppos[i] - cpos[i])
-        if not periodicity[i]:
-            pass
-        elif (DR > DW[i]/2.0):
-            DR -= DW[i]
-        elif (DR < -DW[i]/2.0):
-            DR += DW[i]
-        r2 += DR * DR
-    return r2
-
 cdef class ParticleSmoothOperation:
     # We assume each will allocate and define their own temporary storage
     cdef public object nvals
@@ -71,6 +50,7 @@
     cdef np.float64_t *ppos
     # Note that we are preallocating here, so this is *not* threadsafe.
     cdef NeighborList *neighbors
+    cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
                                np.float64_t dds[3], np.float64_t *ppos,
                                np.float64_t **fields, np.int64_t nneighbors,

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -18,7 +18,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free, realloc
 cimport cython
-from libc.math cimport sqrt, fabs
+from libc.math cimport sqrt, fabs, sin, cos
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, \
@@ -37,6 +37,40 @@
     else:
         return 1
 
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef np.float64_t r2dist(np.float64_t ppos[3],
+                         np.float64_t cpos[3],
+                         np.float64_t DW[3],
+                         bint periodicity[3],
+                         np.float64_t max_dist2):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if not periodicity[i]:
+            pass
+        elif (DR > DW[i]/2.0):
+            DR -= DW[i]
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]
+        r2 += DR * DR
+        if max_dist2 >= 0.0 and r2 > max_dist2:
+            return -1.0
+    return r2
+
+cdef void spherical_coord_setup(np.float64_t ipos[3], np.float64_t opos[3]):
+    opos[0] = ipos[0] * sin(ipos[1]) * cos(ipos[2])
+    opos[1] = ipos[0] * sin(ipos[1]) * sin(ipos[2])
+    opos[2] = ipos[0] * cos(ipos[1])
+
+cdef void cart_coord_setup(np.float64_t ipos[3], np.float64_t opos[3]):
+    opos[0] = ipos[0]
+    opos[1] = ipos[1]
+    opos[2] = ipos[2]
+
 cdef class ParticleSmoothOperation:
     def __init__(self, nvals, nfields, max_neighbors):
         # This is the set of cells, in grids, blocks or octs, we are handling.
@@ -66,7 +100,8 @@
                      periodicity = (True, True, True),
                      index_fields = None,
                      OctreeContainer particle_octree = None,
-                     np.ndarray[np.int64_t, ndim=1] pdom_ind = None):
+                     np.ndarray[np.int64_t, ndim=1] pdom_ind = None,
+                     geometry = "cartesian"):
         # This will be a several-step operation.
         #
         # We first take all of our particles and assign them to Octs.  If they
@@ -108,6 +143,25 @@
         cdef np.ndarray[np.int64_t, ndim=2] doff_m
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         cdef np.ndarray[np.float64_t, ndim=4] iarr
+        cdef np.ndarray[np.float64_t, ndim=2] cart_positions
+        if geometry == "cartesian":
+            self.pos_setup = cart_coord_setup
+            cart_positions = positions
+        elif geometry == "spherical":
+            self.pos_setup = spherical_coord_setup
+            cart_positions = np.empty((positions.shape[0], 3), dtype="float64")
+
+            cart_positions[:,0] = positions[:,0] * \
+                                  np.sin(positions[:,1]) * \
+                                  np.cos(positions[:,2])
+            cart_positions[:,1] = positions[:,0] * \
+                                  np.sin(positions[:,1]) * \
+                                  np.sin(positions[:,2])
+            cart_positions[:,2] = positions[:,0] * \
+                                  np.cos(positions[:,1])
+            periodicity = (False, False, False)
+        else:
+            raise NotImplementedError
         dims[0] = dims[1] = dims[2] = (1 << mesh_octree.oref)
         cdef int nz = dims[0] * dims[1] * dims[2]
         numpart = positions.shape[0]
@@ -174,6 +228,7 @@
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
         ppos = <np.float64_t *> positions.data
+        cart_pos = <np.float64_t *> cart_positions.data
         doffs = <np.int64_t*> doff.data
         pinds = <np.int64_t*> pind.data
         pcounts = <np.int64_t*> pcount.data
@@ -213,7 +268,7 @@
             free(neighbors)
             nproc += 1
             self.neighbor_process(dims, moi.left_edge, moi.dds,
-                         ppos, field_pointers, nneighbors, nind, doffs,
+                         cart_pos, field_pointers, nneighbors, nind, doffs,
                          pinds, pcounts, offset, index_field_pointers)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
@@ -253,7 +308,7 @@
         if self.curn < self.maxn:
             cur = &self.neighbors[self.curn]
             cur.pn = pn
-            cur.r2 = r2dist(ppos, cpos, self.DW, self.periodicity)
+            cur.r2 = r2dist(ppos, cpos, self.DW, self.periodicity, -1)
             self.curn += 1
             if self.curn == self.maxn:
                 # This time we sort it, so that future insertions will be able
@@ -262,7 +317,10 @@
                       Neighbor_compare)
             return
         # This will go (curn - 1) through 0.
-        r2_c = r2dist(ppos, cpos, self.DW, self.periodicity)
+        r2_o = self.neighbors[self.curn - 1].r2
+        r2_c = r2dist(ppos, cpos, self.DW, self.periodicity, r2_o)
+        # Early terminate
+        if r2_c < 0: return
         pn_c = pn
         for i in range((self.curn - 1), -1, -1):
             # First we evaluate against i.  If our candidate radius is greater
@@ -320,15 +378,16 @@
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
         cdef int i, j, k, ntot, nntot, m
-        cdef np.float64_t cpos[3]
+        cdef np.float64_t cpos[3], opos[3]
         cpos[0] = left_edge[0] + 0.5*dds[0]
         for i in range(dim[0]):
             cpos[1] = left_edge[1] + 0.5*dds[1]
             for j in range(dim[1]):
                 cpos[2] = left_edge[2] + 0.5*dds[2]
                 for k in range(dim[2]):
+                    self.pos_setup(cpos, opos)
                     self.neighbor_find(nneighbors, nind, doffs, pcounts,
-                        pinds, ppos, cpos)
+                        pinds, ppos, opos)
                     # Now we have all our neighbors in our neighbor list.
                     if self.curn <-1*self.maxn:
                         ntot = nntot = 0
@@ -337,7 +396,7 @@
                             nntot += 1
                             ntot += pcounts[nind[m]]
                         print "SOMETHING WRONG", self.curn, nneighbors, ntot, nntot
-                    self.process(offset, i, j, k, dim, cpos, fields,
+                    self.process(offset, i, j, k, dim, opos, fields,
                                  index_fields)
                     cpos[2] += dds[2]
                 cpos[1] += dds[1]
@@ -407,3 +466,76 @@
         return
 
 volume_weighted_smooth = VolumeWeightedSmooth
+
+cdef class NearestNeighborSmooth(ParticleSmoothOperation):
+    cdef np.float64_t *fp
+    cdef public object vals
+    def initialize(self):
+        cdef np.ndarray tarr
+        assert(self.nfields == 1)
+        tarr = np.zeros(self.nvals, dtype="float64", order="F")
+        self.vals = tarr
+        self.fp = <np.float64_t *> tarr.data
+
+    def finalize(self):
+        return self.vals
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields,
+                      np.float64_t **index_fields):
+        # We have our i, j, k for our cell, as well as the cell position.
+        # We also have a list of neighboring particles with particle numbers.
+        cdef np.int64_t pn
+        # We get back our mass 
+        # rho_i = sum(j = 1 .. n) m_j * W_ij
+        pn = self.neighbors[0].pn
+        self.fp[gind(i,j,k,dim) + offset] = fields[0][pn]
+        #self.fp[gind(i,j,k,dim) + offset] = self.neighbors[0].r2
+        return
+
+nearest_smooth = NearestNeighborSmooth
+
+cdef class IDWInterpolationSmooth(ParticleSmoothOperation):
+    cdef np.float64_t *fp
+    cdef public int p2
+    cdef public object vals
+    def initialize(self):
+        cdef np.ndarray tarr
+        assert(self.nfields == 1)
+        tarr = np.zeros(self.nvals, dtype="float64", order="F")
+        self.vals = tarr
+        self.fp = <np.float64_t *> tarr.data
+        self.p2 = 2 # Power, for IDW, in units of 2.  So we only do even p's.
+
+    def finalize(self):
+        return self.vals
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields,
+                      np.float64_t **index_fields):
+        # We have our i, j, k for our cell, as well as the cell position.
+        # We also have a list of neighboring particles with particle numbers.
+        cdef np.int64_t pn, ni, di
+        cdef np.float64_t total_weight = 0.0, total_value = 0.0, r2, val, w
+        # We're going to do a very simple IDW average
+        if self.neighbors[0].r2 == 0.0:
+            pn = self.neighbors[0].pn
+            self.fp[gind(i,j,k,dim) + offset] = fields[0][pn]
+        for ni in range(self.curn):
+            r2 = self.neighbors[ni].r2
+            val = fields[0][self.neighbors[ni].pn]
+            w = r2
+            for di in range(self.p2 - 1):
+                w *= r2
+            total_value += w * val
+            total_weight += w
+        self.fp[gind(i,j,k,dim) + offset] = total_value / total_weight
+        return
+
+idw_smooth = IDWInterpolationSmooth

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -93,6 +93,7 @@
     "kB": (boltzmann_constant_erg_per_K,
            dimensions.energy/dimensions.temperature),
     "R": (rankine_per_kelvin, dimensions.temperature),
+    "photons": (1.0, dimensions.dimensionless),
 
     # for AstroPy compatibility
     "solMass": (mass_sun_grams, dimensions.mass),

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -24,7 +24,6 @@
     OctreeContainer, OctInfo
 from yt.geometry.oct_visitors cimport \
     Oct
-from yt.geometry.particle_smooth cimport r2dist
 from .amr_kdtools cimport _find_node, Node
 from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
     vc_index, vc_pos_index

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -16,6 +16,7 @@
 
 import numpy as np
 import h5py
+from matplotlib.patches import Circle
 
 from yt.funcs import *
 from yt.extern.six import add_metaclass
@@ -865,21 +866,49 @@
         plot._axes.text(x, y, self.text, **kwargs)
 
 class HaloCatalogCallback(PlotCallback):
+    """
+    annotate_halos(halo_catalog, circle_kwargs=None,
+        width = None, annotate_field=False,
+        font_kwargs = None, factor = 1.0)
+
+    Plots circles at the locations of all the halos
+    in a halo catalog with radii corresponding to the
+    virial radius of each halo. 
+
+    circle_kwargs: Contains the arguments controlling the
+        appearance of the circles, supplied to the 
+        Matplotlib patch Circle.
+    width: the width over which to select halos to plot,
+        useful when overplotting to a slice plot. Accepts
+        a tuple in the form (1.0, 'Mpc').
+    annotate_field: Accepts a field contained in the 
+        halo catalog to add text to the plot near the halo.
+        Example: annotate_field = 'particle_mass' will
+        write the halo mass next to each halo.
+    font_kwargs: Contains the arguments controlling the text
+        appearance of the annotated field.
+    factor: A number the virial radius is multiplied by for
+        plotting the circles. Ex: factor = 2.0 will plot
+        circles with twice the radius of each halo virial radius.
+    """
 
     _type_name = 'halos'
     region = None
     _descriptor = None
 
-    def __init__(self, halo_catalog, col='white', alpha =1, 
-            width = None, annotate_field = False, font_kwargs = None):
+    def __init__(self, halo_catalog, circle_kwargs = None, 
+            width = None, annotate_field = False,
+            font_kwargs = None, factor = 1.0):
 
         PlotCallback.__init__(self)
         self.halo_catalog = halo_catalog
-        self.color = col
-        self.alpha = alpha
         self.width = width
         self.annotate_field = annotate_field
         self.font_kwargs = font_kwargs
+        self.factor = factor
+        if circle_kwargs is None:
+            circle_kwargs = {'edgecolor':'white', 'facecolor':'None'}
+        self.circle_kwargs = circle_kwargs
 
     def __call__(self, plot):
         data = plot.data
@@ -898,20 +927,20 @@
         plot._axes.hold(True)
 
         # Set up scales for pixel size and original data
-        units = 'Mpccm'
         pixel_scale = self.pixel_scale(plot)[0]
         data_scale = data.pf.length_unit
+        units = data_scale.units
 
         # Convert halo positions to code units of the plotted data
         # and then to units of the plotted window
         px = halo_data[field_x][:].in_units(units) / data_scale
         py = halo_data[field_y][:].in_units(units) / data_scale
         px, py = self.convert_to_plot(plot,[px,py])
+
+        # Convert halo radii to a radius in pixels
+        radius = halo_data['virial_radius'][:].in_units(units)
+        radius = np.array(radius*pixel_scale*self.factor/data_scale)
         
-        # Convert halo radii to a radius in pixels
-        radius = halo_data['radius'][:].in_units(units)
-        radius = radius*pixel_scale/data_scale
-
         if self.width:
             pz = halo_data[field_z][:].in_units(units)/data_scale
             pz = data.pf.arr(pz, 'code_length')
@@ -927,8 +956,10 @@
             py = py[indices]
             radius = radius[indices]
 
-        plot._axes.scatter(px, py, edgecolors='None', marker='o',
-                           s=radius, c=self.color,alpha=self.alpha)
+        for x,y,r in zip(px, py, radius):
+            plot._axes.add_artist(Circle(xy=(x,y), 
+                radius = r, **self.circle_kwargs)) 
+
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)

diff -r 3bac46ab60743e9e07407b449eb88f1018396975 -r 3c9af020cd9f1b64b90d06ad94f01ee36458fff7 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -883,7 +883,7 @@
                 for k, v in self.plots.iteritems():
                     names.append(v.save(name, mpl_kwargs))
                 return names
-            fn = "%s_%s%s" % (prefix, middle, suffix)
+            fn = "%s_%s%s" % (prefix, middle, '.png')
             names.append(fn)
             self.plots[f].save(fn, mpl_kwargs)
         return names


https://bitbucket.org/yt_analysis/yt/commits/3e26d6ff17a6/
Changeset:   3e26d6ff17a6
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-15 00:41:08
Summary:     Merging with rankine PR.
Affected #:  3 files

diff -r 085376fffe7482785e0f7da657c417444f14bdd3 -r 3e26d6ff17a6a33a550e8742774e783ddc51008b yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -18,7 +18,8 @@
     mass_sun_grams, sec_per_year, sec_per_day, sec_per_hr, \
     sec_per_min, temp_sun_kelvin, luminosity_sun_ergs_per_sec, \
     metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \
-    cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams
+    cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \
+    boltzmann_constant_erg_per_K, rankine_per_kelvin
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -89,6 +90,9 @@
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "counts": (1.0, dimensions.dimensionless),
+    "kB": (boltzmann_constant_erg_per_K,
+           dimensions.energy/dimensions.temperature),
+    "R": (rankine_per_kelvin, dimensions.temperature),
     "photons": (1.0, dimensions.dimensionless),
 
     # for AstroPy compatibility

diff -r 085376fffe7482785e0f7da657c417444f14bdd3 -r 3e26d6ff17a6a33a550e8742774e783ddc51008b yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -45,6 +45,7 @@
 mh = mp
 clight = speed_of_light_cgs
 kboltz = boltzmann_constant_cgs
+kb = kboltz
 hcgs = planck_constant_cgs
 sigma_thompson = cross_section_thompson_cgs
 Na = 1 / amu_cgs

diff -r 085376fffe7482785e0f7da657c417444f14bdd3 -r 3e26d6ff17a6a33a550e8742774e783ddc51008b yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -75,6 +75,7 @@
 keV_per_K = 1.0 / K_per_keV
 keV_per_erg = 1.0 / erg_per_keV
 eV_per_erg = 1.0 / erg_per_eV
+rankine_per_kelvin = 9./5.
 
 # Solar System masses
 # Standish, E.M. (1995) "Report of the IAU WGAS Sub-Group on Numerical Standards",


https://bitbucket.org/yt_analysis/yt/commits/0fe6c382c015/
Changeset:   0fe6c382c015
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-15 21:29:06
Summary:     Adding support for fahrenheit and celsius.

Also fixes an issue with in-place YTArray copying instead of doing in-place
operations.
Affected #:  7 files

diff -r 3e26d6ff17a6a33a550e8742774e783ddc51008b -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -31,7 +31,8 @@
 # classes
 from yt.units.unit_object import Unit, UnitParseError
 # objects
-from yt.units.unit_lookup_table import default_unit_symbol_lut, unit_prefixes
+from yt.units.unit_lookup_table import \
+    default_unit_symbol_lut, unit_prefixes, prefixable_units
 # unit definitions
 from yt.utilities.physical_constants import \
     cm_per_pc, sec_per_year, cm_per_km, cm_per_mpc, \
@@ -46,13 +47,17 @@
 
     # go through all possible prefix combos
     for symbol in default_unit_symbol_lut.keys():
-        for prefix in unit_prefixes.keys():
+        if symbol in prefixable_units:
+            keys = unit_prefixes.keys()
+        else:
+            keys = [symbol]
+        for prefix in keys:
             new_symbol = "%s%s" % (prefix, symbol)
 
             # test if we have seen this symbol
             if new_symbol in full_set:
                 print "Duplicate symbol: %s" % new_symbol
-                yield assert_true, False
+                raise RuntimeError
 
             full_set.add(new_symbol)
     yield assert_true, True
@@ -417,7 +422,7 @@
     yield assert_true, u2.dimensions == mass_density
     yield assert_true, u3.dimensions == mass_density
 
-    yield assert_allclose, get_conversion_factor(u1, u3), \
+    yield assert_allclose, get_conversion_factor(u1, u3)[0], \
         Msun_cgs / Mpc_cgs**3, 1e-12
 
 def test_is_code_unit():

diff -r 3e26d6ff17a6a33a550e8742774e783ddc51008b -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -27,7 +27,8 @@
 from numpy.testing import \
     assert_array_equal, \
     assert_equal, assert_raises, \
-    assert_array_almost_equal_nulp
+    assert_array_almost_equal_nulp, \
+    assert_array_almost_equal
 from numpy import array
 from yt.units.yt_array import \
     YTArray, YTQuantity, \
@@ -418,7 +419,9 @@
     yield assert_equal, km_in_cm.in_mks(), 1e3
     yield assert_equal, km_in_cm.units, cm_unit
 
+    km_view = km.ndarray_view()
     km.convert_to_units('cm')
+    assert_true(km_view.base is km.base)
 
     yield assert_equal, km, YTQuantity(1, 'km')
     yield assert_equal, km.in_cgs(), 1e5
@@ -426,6 +429,7 @@
     yield assert_equal, km.units, cm_unit
 
     km.convert_to_units('kpc')
+    assert_true(km_view.base is km.base)
 
     yield assert_array_almost_equal_nulp, km, YTQuantity(1, 'km')
     yield assert_array_almost_equal_nulp, km.in_cgs(), YTQuantity(1e5, 'cm')
@@ -453,6 +457,49 @@
     yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'
     yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
 
+def test_temperature_conversions():
+    """
+    Test conversions between various supported temperatue scales.
+
+    Also ensure we only allow compound units with temperature
+    scales that have a proper zero point.
+
+    """
+    from yt.units.unit_object import InvalidUnitOperation
+
+    km = YTQuantity(1, 'km')
+    balmy = YTQuantity(300, 'K')
+    balmy_F = YTQuantity(80.33, 'F')
+    balmy_C = YTQuantity(26.85, 'C')
+    balmy_R = YTQuantity(540, 'R')
+
+    assert_array_almost_equal(balmy.in_units('F'), balmy_F)
+    assert_array_almost_equal(balmy.in_units('C'), balmy_C)
+    assert_array_almost_equal(balmy.in_units('R'), balmy_R)
+
+    balmy_view = balmy.ndarray_view()
+
+    balmy.convert_to_units('F')
+    yield assert_true, balmy_view.base is balmy.base
+    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
+
+    balmy.convert_to_units('C')
+    yield assert_true, balmy_view.base is balmy.base
+    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_C)
+
+    balmy.convert_to_units('R')
+    yield assert_true, balmy_view.base is balmy.base
+    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_R)
+
+    balmy.convert_to_units('F')
+    yield assert_true, balmy_view.base is balmy.base
+    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
+
+    yield assert_raises, InvalidUnitOperation, np.multiply, balmy, km
+
+    # Does CGS convergion from F to K work?
+    yield assert_array_almost_equal, balmy.in_cgs(), YTQuantity(300, 'K')
+
 
 def test_yt_array_yt_quantity_ops():
     """

diff -r 3e26d6ff17a6a33a550e8742774e783ddc51008b -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -19,7 +19,8 @@
     sec_per_min, temp_sun_kelvin, luminosity_sun_ergs_per_sec, \
     metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \
     cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \
-    boltzmann_constant_erg_per_K, rankine_per_kelvin
+    boltzmann_constant_erg_per_K, kelvin_per_rankine, \
+    speed_of_light_cm_per_s
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -38,6 +39,7 @@
     "erg":  (1.0, dimensions.energy),
     "esu":  (1.0, dimensions.charge),
     "gauss": (1.0, dimensions.magnetic_field),
+    "C" : (1.0, dimensions.temperature, -273.15),
 
     # some SI
     "m": (1.0e2, dimensions.length),
@@ -48,6 +50,8 @@
     # Imperial units
     "ft": (30.48, dimensions.length),
     "mile": (160934, dimensions.length),
+    "F": (kelvin_per_rankine, dimensions.temperature, -459.67),
+    "R": (kelvin_per_rankine, dimensions.temperature),
 
     # dimensionless stuff
     "h": (1.0, dimensions.dimensionless), # needs to be added for rho_crit_now
@@ -59,6 +63,9 @@
     "day": (sec_per_day, dimensions.time),
     "yr":  (sec_per_year, dimensions.time),
 
+    # Velocities
+    "c": (speed_of_light_cm_per_s, dimensions.velocity),
+
     # Solar units
     "Msun": (mass_sun_grams, dimensions.mass),
     "msun": (mass_sun_grams, dimensions.mass),
@@ -92,7 +99,6 @@
     "counts": (1.0, dimensions.dimensionless),
     "kB": (boltzmann_constant_erg_per_K,
            dimensions.energy/dimensions.temperature),
-    "R": (rankine_per_kelvin, dimensions.temperature),
     "photons": (1.0, dimensions.dimensionless),
 
     # for AstroPy compatibility

diff -r 3e26d6ff17a6a33a550e8742774e783ddc51008b -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -21,7 +21,8 @@
 from sympy.parsing.sympy_parser import \
     parse_expr, auto_number, rationalize
 from keyword import iskeyword
-from yt.units import dimensions as dimensions_mod
+from yt.units.dimensions import \
+    base_dimensions, temperature
 from yt.units.unit_lookup_table import \
     latex_symbol_lut, unit_prefixes, \
     prefixable_units, cgs_base_units, \
@@ -114,10 +115,11 @@
     is_number = False
 
     # Extra attributes
-    __slots__ = ["expr", "is_atomic", "cgs_value", "dimensions", "registry"]
+    __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
+                 "registry"]
 
-    def __new__(cls, unit_expr=sympy_one, cgs_value=None, dimensions=None,
-                registry=None, **assumptions):
+    def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
+                dimensions=None, registry=None, **assumptions):
         """
         Create a new unit. May be an atomic unit (like a gram) or combinations
         of atomic units (like g / cm**3).
@@ -130,7 +132,11 @@
             The unit's value in cgs.
         dimensions : sympy.core.expr.Expr
             A sympy expression representing the dimensionality of this unit.
-            It must contain only mass, length, time, temperature and angle symbols.
+            It must contain only mass, length, time, temperature and angle
+            symbols.
+        offset : float
+            The offset necessary to normalize temperature units to a common
+            zero point.
         registry : UnitRegistry object
             The unit registry we use to interpret unit symbols.
 
@@ -185,7 +191,12 @@
             validate_dimensions(dimensions)
         else:
             # lookup the unit symbols
-            cgs_value, dimensions = _get_unit_data_from_expr(unit_expr, registry.lut)
+            try:
+                cgs_value, dimensions = \
+                    _get_unit_data_from_expr(unit_expr, registry.lut)
+            except ValueError:
+                cgs_value, dimensions, cgs_offset = \
+                    _get_unit_data_from_expr(unit_expr, registry.lut)
 
         # Create obj with superclass construct.
         obj = Expr.__new__(cls, **assumptions)
@@ -194,6 +205,7 @@
         obj.expr = unit_expr
         obj.is_atomic = is_atomic
         obj.cgs_value = cgs_value
+        obj.cgs_offset = cgs_offset
         obj.dimensions = dimensions
         obj.registry = registry
 
@@ -240,24 +252,46 @@
     def __mul__(self, u):
         """ Multiply Unit with u (Unit object). """
         if not isinstance(u, Unit):
-            raise InvalidUnitOperation("Tried to multiply a Unit object with " \
-                                       "'%s' (type %s). This behavior is " \
-                                       "undefined." % (u, type(u)) )
+            raise InvalidUnitOperation("Tried to multiply a Unit object with "
+                                       "'%s' (type %s). This behavior is "
+                                       "undefined." % (u, type(u)))
+
+        cgs_offset = 0.0
+        if self.cgs_offset or u.cgs_offset:
+            if u.dimensions is temperature and self.is_dimensionless:
+                cgs_offset = u.cgs_offset
+            elif self.dimensions is temperature and u.is_dimensionless:
+                cgs_offset = self.cgs_offset
+            else:
+                raise InvalidUnitOperation("Quantities with units of Farhenheit "
+                                           "and Celcius cannot be multiplied.")
 
         return Unit(self.expr * u.expr,
                     cgs_value=(self.cgs_value * u.cgs_value),
+                    cgs_offset=cgs_offset,
                     dimensions=(self.dimensions * u.dimensions),
                     registry=self.registry)
 
     def __div__(self, u):
         """ Divide Unit by u (Unit object). """
         if not isinstance(u, Unit):
-            raise InvalidUnitOperation("Tried to divide a Unit object by '%s' "\
+            raise InvalidUnitOperation("Tried to divide a Unit object by '%s' "
                                        "(type %s). This behavior is "
-                                       "undefined." % (u, type(u)) )
+                                       "undefined." % (u, type(u)))
+
+        cgs_offset = 0.0
+        if self.cgs_offset or u.cgs_offset:
+            if u.dimensions is dims.temperature and self.is_dimensionless:
+                cgs_offset = u.cgs_offset
+            elif self.dimensions is dims.temperature and u.is_dimensionless:
+                cgs_offset = self.cgs_offset
+            else:
+                raise InvalidUnitOperation("Quantities with units of Farhenheit "
+                                           "and Celcius cannot be multiplied.")
 
         return Unit(self.expr / u.expr,
                     cgs_value=(self.cgs_value / u.cgs_value),
+                    cgs_offset=cgs_offset,
                     dimensions=(self.dimensions / u.dimensions),
                     registry=self.registry)
 
@@ -294,10 +328,11 @@
             memodict = {}
         expr = str(self.expr)
         cgs_value = copy.deepcopy(self.cgs_value)
+        cgs_offset = copy.deepcopy(self.cgs_offset)
         dimensions = copy.deepcopy(self.dimensions)
         lut = copy.deepcopy(self.registry.lut)
         registry = UnitRegistry(lut=lut)
-        return Unit(expr, cgs_value, dimensions, registry)
+        return Unit(expr, cgs_value, cgs_offset, dimensions, registry)
 
     #
     # End unit operations
@@ -347,8 +382,8 @@
 
         """
         units_string = self._get_system_unit_string(mks_base_units)
-        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent()) /
-                     get_conversion_factor(self, units_string))
+        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent())[0] /
+                     get_conversion_factor(self, Unit(units_string))[0])
         return Unit(units_string, cgs_value=cgs_value,
                     dimensions=self.dimensions, registry=self.registry)
 
@@ -383,41 +418,19 @@
     -------
     conversion_factor : float
         `old_units / new_units`
+    offset : float or None
+        Offset between the old unit and new unit.
 
     """
-    # if args are not Unit objects, construct them
-    if not isinstance(old_units, Unit):
-        old_units = Unit(old_units)
-    if not isinstance(new_units, Unit):
-        new_units = Unit(new_units)
-
-    if not old_units.same_dimensions_as(new_units):
-        raise InvalidUnitOperation(
-            "Cannot convert from %s to %s because the dimensions do not "
-            "match: %s and %s" % (old_units, new_units, old_units.dimensions,
-                                  new_units.dimensions))
-
-    return old_units.cgs_value / new_units.cgs_value
-
-
-def convert_values(values, old_units, new_units):
-    """
-    Take data given in old units and convert to values in new units.
-
-    Parameters
-    ----------
-    values : array_like
-        The number or array we will convert.
-    old_units : str or Unit object
-        The units values are supplied in.
-    new_units : str or Unit object
-        The units values will be returned in.
-
-    Returns values in new units.
-
-    """
-    return values * get_conversion_factor(old_units, new_units)
-
+    ratio = old_units.cgs_value / new_units.cgs_value
+    if old_units.cgs_offset == 0 and new_units.cgs_offset == 0:
+        return (ratio, None)
+    else:
+        if old_units.dimensions is temperature:
+            return ratio, ratio*old_units.cgs_offset - new_units.cgs_offset
+        else:
+            raise InvalidUnitOperation("Fahrenheit and Celsius are only can "
+                                       "only be used to represent temperature.")
 
 #
 # Helper functions
@@ -444,7 +457,6 @@
         return _lookup_unit_symbol(str(unit_expr), unit_symbol_lut)
 
     if isinstance(unit_expr, Number):
-        # not sure if this should be (1, 1)...
         return (float(unit_expr), sympy_one)
 
     if isinstance(unit_expr, Pow):
@@ -515,7 +527,7 @@
         for dim in dimensions.args:
             validate_dimensions(dim)
     elif isinstance(dimensions, Symbol):
-        if dimensions not in dimensions_mod.base_dimensions:
+        if dimensions not in base_dimensions:
             raise UnitParseError("Dimensionality expression contains an "
                                  "unknown symbol '%s'." % dimensions)
     elif isinstance(dimensions, Pow):

diff -r 3e26d6ff17a6a33a550e8742774e783ddc51008b -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -414,10 +414,14 @@
 
         """
         new_units = self._unit_repr_check_same(units)
-        conversion_factor = self.units.get_conversion_factor(new_units)
+        (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
         self.units = new_units
         self *= conversion_factor
+
+        if offset:
+            np.subtract(self, offset*self.uq, self)
+
         return self
 
     def convert_to_cgs(self):
@@ -450,11 +454,14 @@
 
         """
         new_units = self._unit_repr_check_same(units)
-        conversion_factor = self.units.get_conversion_factor(new_units)
+        (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
         new_array = self * conversion_factor
         new_array.units = new_units
 
+        if offset:
+            np.subtract(new_array, offset*new_array.uq, new_array)
+
         return new_array
 
     def in_cgs(self):
@@ -672,7 +679,8 @@
     def __iadd__(self, other):
         """ See __add__. """
         oth = sanitize_units_add(self, other, "addition")
-        return np.add(self, oth, out=self)
+        np.add(self, oth, out=self)
+        return self
 
     def __sub__(self, right_object):
         """
@@ -691,7 +699,8 @@
     def __isub__(self, other):
         """ See __sub__. """
         oth = sanitize_units_add(self, other, "subtraction")
-        return np.subtract(self, oth, out=self)
+        np.subtract(self, oth, out=self)
+        return self
 
     def __neg__(self):
         """ Negate the data. """
@@ -718,7 +727,8 @@
     def __imul__(self, other):
         """ See __mul__. """
         oth = sanitize_units_mul(self, other)
-        return np.multiply(self, oth, out=self)
+        np.multiply(self, oth, out=self)
+        return self
 
     def __div__(self, right_object):
         """
@@ -736,7 +746,8 @@
     def __idiv__(self, other):
         """ See __div__. """
         oth = sanitize_units_mul(self, other)
-        return np.divide(self, oth, out=self)
+        np.divide(self, oth, out=self)
+        return self
 
     def __truediv__(self, right_object):
         ro = sanitize_units_mul(self, right_object)
@@ -750,7 +761,8 @@
     def __itruediv__(self, other):
         """ See __div__. """
         oth = sanitize_units_mul(self, other)
-        return np.true_divide(self, oth, out=self)
+        np.true_divide(self, oth, out=self)
+        return self
 
     def __floordiv__(self, right_object):
         ro = sanitize_units_mul(self, right_object)
@@ -764,7 +776,8 @@
     def __ifloordiv__(self, other):
         """ See __div__. """
         oth = sanitize_units_mul(self, other)
-        return np.floor_divide(self, oth, out=self)
+        np.floor_divide(self, oth, out=self)
+        return self
 
     #Should these raise errors?  I need to come back and check this.
     def __or__(self, right_object):
@@ -774,7 +787,8 @@
         return YTArray(super(YTArray, self).__ror__(left_object))
 
     def __ior__(self, other):
-        return np.bitwise_or(self, other, out=self)
+        np.bitwise_or(self, other, out=self)
+        return self
 
     def __xor__(self, right_object):
         return YTArray(super(YTArray, self).__xor__(right_object))
@@ -783,7 +797,8 @@
         return YTArray(super(YTArray, self).__rxor__(left_object))
 
     def __ixor__(self, other):
-        return np.bitwise_xor(self, other, out=self)
+        np.bitwise_xor(self, other, out=self)
+        return self
 
     def __and__(self, right_object):
         return YTArray(super(YTArray, self).__and__(right_object))
@@ -792,7 +807,8 @@
         return YTArray(super(YTArray, self).__rand__(left_object))
 
     def __iand__(self, other):
-        return np.bitwise_and(self, other, out=self)
+        np.bitwise_and(self, other, out=self)
+        return self
 
     def __pow__(self, power):
         """

diff -r 3e26d6ff17a6a33a550e8742774e783ddc51008b -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -6,7 +6,7 @@
 mass_hydrogen_cgs = 1.007947*amu_cgs
 
 # Velocities
-speed_of_light_cgs = YTQuantity(2.99792458e10, 'cm/s')
+speed_of_light_cgs = YTQuantity(speed_of_light_cm_per_s, 'cm/s')
 
 # Cross Sections
 # 8*pi/3 (alpha*hbar*c/(2*pi))**2
@@ -44,6 +44,7 @@
 qp = charge_proton_cgs
 mh = mp
 clight = speed_of_light_cgs
+speed_of_light = speed_of_light_cgs
 kboltz = boltzmann_constant_cgs
 kb = kboltz
 hcgs = planck_constant_cgs

diff -r 3e26d6ff17a6a33a550e8742774e783ddc51008b -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -67,6 +67,9 @@
 sec_per_min  = 60.0
 day_per_year = 365.25
 
+# velocities
+speed_of_light_cm_per_s = 2.99792458e10
+
 # temperature / energy
 boltzmann_constant_erg_per_K = 1.3806488e-16
 erg_per_eV = 1.602176562e-12
@@ -75,7 +78,7 @@
 keV_per_K = 1.0 / K_per_keV
 keV_per_erg = 1.0 / erg_per_keV
 eV_per_erg = 1.0 / erg_per_eV
-rankine_per_kelvin = 9./5.
+kelvin_per_rankine = 5./9.
 
 # Solar System masses
 # Standish, E.M. (1995) "Report of the IAU WGAS Sub-Group on Numerical Standards",


https://bitbucket.org/yt_analysis/yt/commits/608b8facdfe5/
Changeset:   608b8facdfe5
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-15 22:27:59
Summary:     Merging with mainline tip.
Affected #:  26 files

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3f810954006851303837edb8fd85ee6583a883122b0f4867903562546c4f19d2"
+  "signature": "sha256:ba8b6a53571695ae1d0c236ad43875823746e979a329a9d35ab0a8b899cebbba"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,7 +21,7 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "from yt.analysis_modules.ppv_cube.api import PPVCube"
      ],
      "language": "python",
      "metadata": {},

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:7fc053480ba7896bfa5905bd69f7b3dd326364fbab324975b76f79640f2e0adf"
+  "signature": "sha256:4745a15abb6512547b50280b92c22567f89255189fd968ca706ef7c39d48024f"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -91,7 +91,7 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import SZProjection\n",
+      "from yt.analysis_modules.sunyaev_zeldovich.api import SZProjection\n",
       "\n",
       "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -35,7 +35,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import AbsorptionSpectrum
+  from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
   sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
 

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -26,7 +26,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import LightRay
+  from yt.analysis_modules.cosmological_observation.api import LightRay
   lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
                 'Enzo', 0.0, 0.1)
 

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -386,7 +386,7 @@
    from yt.mods import *
    from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
    from yt.utilities.cosmology import Cosmology
-   from yt.analysis_modules.api import *
+   from yt.analysis_modules.photon_simulator.api import *
    import aplpy
 
    R = 1000. # in kpc

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
--- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
+++ b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
@@ -10,7 +10,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import CosmologySplice
+  from yt.analysis_modules.cosmological_observation.api import CosmologySplice
   my_splice = CosmologySplice('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
   my_splice.plan_cosmology_splice(0.0, 0.1, filename='redshifts.out')
 

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc doc/source/cookbook/boolean_data_objects.py
--- a/doc/source/cookbook/boolean_data_objects.py
+++ /dev/null
@@ -1,32 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-
-ds = yt.load("Enzo_64/DD0043/data0043")  # load data
-# Make a few data ojbects to start. Two boxes and two spheres.
-re1 = ds.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4], [0.6, 0.6, 0.6])
-re2 = ds.region([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.6, 0.6, 0.6])
-sp1 = ds.sphere([0.5, 0.5, 0.5], 0.05)
-sp2 = ds.sphere([0.1, 0.2, 0.3], 0.1)
-
-# The "AND" operator. This will make a region identical to re2.
-bool1 = ds.boolean([re1, "AND", re2])
-xp = bool1["particle_position_x"]
-
-# The "OR" operator. This will make a region identical to re1.
-bool2 = ds.boolean([re1, "OR", re2])
-
-# The "NOT" operator. This will make a region like re1, but with the corner
-# that re2 covers cut out.
-bool3 = ds.boolean([re1, "NOT", re2])
-
-# Disjoint regions can be combined with the "OR" operator.
-bool4 = ds.boolean([sp1, "OR", sp2])
-
-# Find oddly-shaped overlapping regions.
-bool5 = ds.boolean([re2, "AND", sp1])
-
-# Nested logic with parentheses.
-# This is re1 with the oddly-shaped region cut out.
-bool6 = ds.boolean([re1, "NOT", "(", re1, "AND", sp1, ")"])

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc doc/source/cookbook/constructing_data_objects.rst
--- a/doc/source/cookbook/constructing_data_objects.rst
+++ b/doc/source/cookbook/constructing_data_objects.rst
@@ -16,16 +16,6 @@
 
 .. yt_cookbook:: find_clumps.py
 
-Boolean Data Objects
-~~~~~~~~~~~~~~~~~~~~
-
-Below shows the creation of a number of boolean data objects, which are built
-upon previously-defined data objects. The boolean data objects can be used like
-any other, except for a few cases.  Please see :ref:`boolean_data_objects` for
-more information.
-
-.. yt_cookbook:: boolean_data_objects.py
-
 Extracting Fixed Resolution Data
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ /dev/null
@@ -1,117 +0,0 @@
-"""
-API for yt.analysis_modules
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .absorption_spectrum.api import \
-    AbsorptionSpectrum
-
-from .coordinate_transformation.api import \
-    spherical_regrid
-
-from .cosmological_observation.api import \
-    CosmologySplice, \
-    LightCone, \
-    find_unique_solutions, \
-    project_unique_light_cones, \
-    LightRay
-
-from .halo_finding.api import \
-    Halo, \
-    HOPHalo, \
-    parallelHOPHalo, \
-    LoadedHalo, \
-    FOFHalo, \
-    HaloList, \
-    HOPHaloList, \
-    FOFHaloList, \
-    parallelHOPHaloList, \
-    LoadedHaloList, \
-    GenericHaloFinder, \
-    parallelHF, \
-    HOPHaloFinder, \
-    FOFHaloFinder, \
-    HaloFinder, \
-    LoadHaloes
-
-from .halo_mass_function.api import \
-    HaloMassFcn, \
-    TransferFunction, \
-    integrate_inf
-
-from .halo_merger_tree.api import \
-    DatabaseFunctions, \
-    MergerTree, \
-    MergerTreeConnect, \
-    Node, \
-    Link, \
-    MergerTreeDotOutput, \
-    MergerTreeTextOutput
-
-from .halo_profiler.api import \
-    VirialFilter, \
-    HaloProfiler, \
-    FakeProfile
-
-from .level_sets.api import \
-    identify_contours, \
-    Clump, \
-    find_clumps, \
-    get_lowest_clumps, \
-    write_clump_index, \
-    write_clumps, \
-    write_old_clump_index, \
-    write_old_clumps, \
-    write_old_clump_info, \
-    _DistanceToMainClump, \
-    recursive_all_clumps, \
-    return_all_clumps, \
-    return_bottom_clumps, \
-    recursive_bottom_clumps, \
-    clump_list_sort
-
-from .radial_column_density.api import \
-    RadialColumnDensity
-
-from .spectral_integrator.api import \
-     add_xray_emissivity_field
-
-from .star_analysis.api import \
-    StarFormationRate, \
-    SpectrumBuilder
-
-from .two_point_functions.api import \
-    TwoPointFunctions, \
-    FcnSet
-
-from .sunyaev_zeldovich.api import SZProjection
-
-from .radmc3d_export.api import \
-    RadMC3DWriter
-
-from .particle_trajectories.api import \
-    ParticleTrajectories
-
-from .photon_simulator.api import \
-     PhotonList, \
-     EventList, \
-     SpectralModel, \
-     XSpecThermalModel, \
-     XSpecAbsorbModel, \
-     TableApecModel, \
-     TableAbsorbModel, \
-     PhotonModel, \
-     ThermalPhotonModel
-
-from .ppv_cube.api import \
-    PPVCube

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc yt/analysis_modules/coordinate_transformation/api.py
--- a/yt/analysis_modules/coordinate_transformation/api.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-API for coordinate_transformation
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .transforms import \
-    spherical_regrid

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc yt/analysis_modules/coordinate_transformation/setup.py
--- a/yt/analysis_modules/coordinate_transformation/setup.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('coordinate_transformation',
-        parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ /dev/null
@@ -1,117 +0,0 @@
-"""
-Transformations between coordinate systems
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.funcs import *
-
-from yt.utilities.linear_interpolators import \
-    TrilinearFieldInterpolator
-
-def spherical_regrid(pf, nr, ntheta, nphi, rmax, fields,
-                     center=None, smoothed=True):
-    """
-    This function takes a parameter file (*pf*) along with the *nr*, *ntheta*
-    and *nphi* points to generate out to *rmax*, and it grids *fields* onto
-    those points and returns a dict.  *center* if supplied will be the center,
-    otherwise the most dense point will be chosen.  *smoothed* governs whether
-    regular covering grids or smoothed covering grids will be used.
-    """
-    mylog.warning("This code may produce some artifacts of interpolation")
-    mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
-    if center is None: center = pf.h.find_max("Density")[1]
-    fields = ensure_list(fields)
-    r,theta,phi = np.mgrid[0:rmax:nr*1j,
-                           0:np.pi:ntheta*1j,
-                           0:2*np.pi:nphi*1j]
-    new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
-    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
-    new_grid['z'] = r*np.cos(theta)             + center[2]
-    sphere = pf.sphere(center, rmax)
-    return arbitrary_regrid(new_grid, sphere, fields, smoothed)
-
-def arbitrary_regrid(new_grid, data_source, fields, smoothed=True):
-    """
-    This function accepts a dict of points 'x', 'y' and 'z' and a data source
-    from which to interpolate new points, along with a list of fields it needs
-    to regrid onto those xyz points.  It then returns interpolated points.
-    This has not been well-tested other than for regular spherical regridding.
-    """
-    fields = ensure_list(fields)
-    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
-    for field in fields:
-        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = np.argsort(data_source.grid_levels[:,0])
-    ng = len(data_source._grids)
-
-    for i,grid in enumerate(data_source._grids[grid_order][::-1]):
-        mylog.info("Regridding grid % 4i / % 4i (%s - %s)", i, ng, grid.id, grid.Level)
-        cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
-
-        # makes x0,x1,y0,y1,z0,z1
-        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
-
-        
-        # Now we figure out which of our points are inside this grid
-        # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
-        for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
-            # &= does a logical_and on the array
-            point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
-                         & ( new_grid[ax]     <= grid.RightEdge[i] ) )
-        point_ind &= (new_grid['handled'] == False) # only want unhandled points
-
-        # If we don't have any, we can just leave
-        if point_ind.sum() == 0: continue
-
-        # because of the funky way the interpolator takes points, we have to make a
-        # new dict of just the points inside this grid
-        point_grid = {'x' : new_grid['x'][point_ind],
-                      'y' : new_grid['y'][point_ind],
-                      'z' : new_grid['z'][point_ind]}
-
-        # Now we know which of the points in new_grid are inside this grid
-        for field in fields:
-            interpolator = TrilinearFieldInterpolator(
-                cg[field],bounds,['x','y','z'])
-            new_grid[field][point_ind] = interpolator(point_grid)
-
-        new_grid['handled'][point_ind] = True
-
-    mylog.info("Finished with %s dangling points",
-        new_grid['handled'].size - new_grid['handled'].sum())
-
-    return new_grid
-
-"""
-# The following will work to plot through different slices:
-
-import pylab
-for i in range(n_theta):
-    print "Doing % 3i / % 3i" % (i, n_theta)
-    pylab.clf()
-    ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
-    ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  np.log10(sph_grid[field][:,i,:]))
-    pylab.savefig("polar/latitude_%03i.png" % i)
-
-for i in range(n_phi):
-    print "Doing % 3i / % 3i" % (i, n_phi)
-    pylab.clf()
-    ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
-    ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  np.log10(sph_grid[field][:,:,i]))
-    pylab.savefig("polar/longitude_%03i.png" % i)
-"""

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -55,32 +55,37 @@
           my_slice["projection_center"][my_slice["projection_axis"]] \
             + 0.5 * my_slice["box_depth_fraction"]
         if (depthLeft < 0):
-            cut_mask = ("((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= 0) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= %f)) | " + \
-              "((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= 1))") % \
+            cut_mask = ("((obj[\"%s\"] + 0.5*obj[\"d%s\"] >= 0) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"d%s\"] <= %f)) | " + \
+              "((obj[\"%s\"] + 0.5*obj[\"d%s\"] >= %f) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"d%s\"] <= 1))") % \
                 (axis, axis, axis, axis, depthRight, 
                  axis, axis, (depthLeft+1), axis, axis)
         elif (depthRight > 1):
-            cut_mask = ("((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= 0) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= %f)) | " + \
-              "((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= 1))") % \
+            cut_mask = ("((obj[\"%s\"] + 0.5*obj[\"d%s\"] >= 0) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"d%s\"] <= %f)) | " + \
+              "((obj[\"%s\"] + 0.5*obj[\"d%s\"] >= %f) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"d%s\"] <= 1))") % \
                 (axis, axis, axis, axis, (depthRight-1),
                  axis, axis, depthLeft, axis, axis)
         else:
-            cut_mask = ("(grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"%s\"] <= %f)") % \
+            cut_mask = ("(obj[\"%s\"] + 0.5*obj[\"d%s\"] >= %f) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"%s\"] <= %f)") % \
               (axis, axis, depthLeft, axis, axis, depthRight)
 
         these_field_cuts.append(cut_mask)
 
+    data_source = my_slice["object"].all_data()
+    cut_region = data_source.cut_region(these_field_cuts)
+        
     # Make projection.
     proj = my_slice["object"].proj(field, my_slice["projection_axis"], 
         weight_field, center=region_center,
-        field_parameters=dict(field_cuts=these_field_cuts))
+        data_source=cut_region)
     proj_field = proj.field[0]
 
+    del data_source, cut_region
+    
     # 2. The Tile Problem
     # Tile projection to specified width.
 

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""
-API for halo_profiler
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .halo_filters import \
-    VirialFilter
-
-from .multi_halo_profiler import \
-    HaloProfiler, \
-    FakeProfile, \
-    standard_fields

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-HaloProfiler re-centering functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-
-from yt.fields.local_fields import \
-    add_field
-
-centering_registry = {}
-
-def add_function(name):
-   def wrapper(func):
-       centering_registry[name] = func
-       return func
-   return wrapper
-
-#### Dark Matter Density ####
-
- at add_function("Min_Dark_Matter_Density")
-def find_minimum_dm_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Dark_Matter_Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("Max_Dark_Matter_Density")
-def find_maximum_dm_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Dark_Matter_Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("CoM_Dark_Matter_Density")
-def find_CoM_dm_density(data):
-   dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=False, 
-                                                      use_particles=True,
-                                                      preload=False)
-   return (dc_x, dc_y, dc_z)
-
-#### Gas Density ####
-
- at add_function("Min_Gas_Density")
-def find_minimum_gas_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("Max_Gas_Density")
-def find_maximum_gas_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("CoM_Gas_Density")
-def find_CoM_gas_density(data):
-   dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
-                                                      use_particles=False,
-                                                      preload=False)
-   return (dc_x, dc_y, dc_z)
-
-#### Total Density ####
-
- at add_function("Min_Total_Density")
-def find_minimum_total_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Matter_Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("Max_Total_Density")
-def find_maximum_total_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Matter_Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("CoM_Total_Density")
-def find_CoM_total_density(data):
-   dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
-                                                      use_particles=True,
-                                                      preload=False)
-   return (dc_x, dc_y, dc_z)
-
-#### Temperature ####
-
- at add_function("Min_Temperature")
-def find_minimum_temperature(data):
-    ma, mini, mx, my, mz, mg = data.quantities['MinLocation']('Temperature',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("Max_Temperature")
-def find_maximum_temperature(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Temperature',
-                                                              preload=False)
-    return (mx, my, mz)
-

diff -r 0fe6c382c01525552be57eb2df8e9e0f1acfeda0 -r 608b8facdfe50c94d317ad6667238000646ffedc yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-Halo filters to be used with the HaloProfiler.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from copy import deepcopy
-import numpy as np
-
-from yt.funcs import *
-from yt.utilities.physical_constants import TINY
-
-def VirialFilter(profile, overdensity_field='ActualOverdensity',
-                 virial_overdensity=200., must_be_virialized=True,
-                 virial_filters=[['TotalMassMsun', '>=','1e14']],
-                 virial_quantities=['TotalMassMsun', 'RadiusMpc'],
-                 virial_index=None, use_log=False):
-    r"""Filter halos by virial quantities.
-    
-    Return values are a True or False whether the halo passed the filter, 
-    along with a dictionary of virial quantities for the fields specified in 
-    the virial_quantities keyword.  Thresholds for virial quantities are 
-    given with the virial_filters keyword in the following way:
-    [field, condition, value].
-    
-    This is typically used as part of a call to `add_halo_filter`.
-    
-    Parameters
-    ----------
-    overdensity_field : string
-        The field used for interpolation with the 
-        specified critical value given with 'virial_overdensity'.  
-        Default='ActualOverdensity'.
-    virial_overdensity : float
-        The value used to determine the outer radius of the virialized halo.
-        Default: 200.
-    must_be_virialized : bool
-        If no values in the profile are above the 
-        value of virial_overdensity, the halo does not pass the filter.  
-        Default: True.
-    virial_filters : array_like
-        Conditional filters based on virial quantities 
-        given in the following way: [field, condition, value].  
-        Default: [['TotalMassMsun', '>=','1e14']].
-    virial_quantities : array_like
-        Fields for which interpolated values should 
-        be calculated and returned.  Default: ['TotalMassMsun', 'RadiusMpc'].
-    virial_index : array_like
-        If given as a list, the index of the radial profile 
-        which is used for interpolation is placed here.  Default: None.
-    use_log : bool
-        If True, interpolation is done in log space.  
-        Default: False.
-    
-    Examples
-    --------
-    >>> hp.add_halo_filter(HP.VirialFilter, must_be_virialized=True,
-                   overdensity_field='ActualOverdensity',
-                   virial_overdensity=200,
-                   virial_filters=[['TotalMassMsun','>=','1e14']],
-                   virial_quantities=['TotalMassMsun','RadiusMpc'])
-    
-    """
-
-    fields = deepcopy(virial_quantities)
-    if virial_filters is None: virial_filters = []
-    for vfilter in virial_filters:
-        if not vfilter[0] in fields:
-            fields.append(vfilter[0])
-    
-    overDensity = []
-    temp_profile = dict((field, []) for field in fields)
-
-    for q in range(len(profile[overdensity_field])):
-        good = True
-        if (profile[overdensity_field][q] != profile[overdensity_field][q]):
-            good = False
-            continue
-        for field in fields:
-            if (profile[field][q] != profile[field][q]):
-                good = False
-                break
-        if good:
-            overDensity.append(profile[overdensity_field][q])
-            for field in fields:
-                temp_profile[field].append(profile[field][q])
-
-    if use_log:
-        for field in temp_profile.keys():
-            temp_profile[field] = np.log10(np.clip(temp_profile[field], TINY,
-                                                   max(temp_profile[field])))
-
-    virial = dict((field, 0.0) for field in fields)
-
-    if (not (np.array(overDensity) >= virial_overdensity).any()) and \
-            must_be_virialized:
-        mylog.debug("This halo is not virialized!")
-        return [False, {}]
-
-    if (len(overDensity) < 2):
-        mylog.debug("Skipping halo with no valid points in profile.")
-        return [False, {}]
-
-    if (overDensity[1] <= virial_overdensity):
-        index = 0
-    elif (overDensity[-1] >= virial_overdensity):
-        index = -2
-    else:
-        for q in (np.arange(len(overDensity),0,-1)-1):
-            if (overDensity[q] < virial_overdensity) and (overDensity[q-1] >= virial_overdensity):
-                index = q - 1
-                break
-
-    if type(virial_index) is list:
-        virial_index.append(index)
-
-    for field in fields:
-        if (overDensity[index+1] - overDensity[index]) == 0:
-            mylog.debug("Overdensity profile has slope of zero.")
-            return [False, {}]
-        else:
-            slope = (temp_profile[field][index+1] - temp_profile[field][index]) / \
-                (overDensity[index+1] - overDensity[index])
-            value = slope * (virial_overdensity - overDensity[index]) + \
-                temp_profile[field][index]
-            virial[field] = value
-
-    if use_log:
-        for field in virial.keys():
-            virial[field] = np.power(10, virial[field])
-
-    for vfilter in virial_filters:
-        if eval("%s %s %s" % (virial[vfilter[0]],vfilter[1],vfilter[2])):
-            mylog.debug("(%s %s %s) returned True for %s." % \
-                            (vfilter[0],vfilter[1],vfilter[2],virial[vfilter[0]]))
-            continue
-        else:
-            mylog.debug("(%s %s %s) returned False for %s." % \
-                            (vfilter[0],vfilter[1],vfilter[2],virial[vfilter[0]]))
-            return [False, {}]
-
-    return [True, dict((("%s_%s" % (q, virial_overdensity)), virial[q])
-                       for q in virial_quantities)]
-

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/9b4821c404ce/
Changeset:   9b4821c404ce
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-17 23:13:21
Summary:     Fixing typos and grammar issues.
Affected #:  1 file

diff -r 608b8facdfe50c94d317ad6667238000646ffedc -r 9b4821c404ce98620c3e3f0a5d7704bfa12147a8 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -263,7 +263,7 @@
             elif self.dimensions is temperature and u.is_dimensionless:
                 cgs_offset = self.cgs_offset
             else:
-                raise InvalidUnitOperation("Quantities with units of Farhenheit "
+                raise InvalidUnitOperation("Quantities with units of Fahrenheit "
                                            "and Celcius cannot be multiplied.")
 
         return Unit(self.expr * u.expr,
@@ -429,8 +429,9 @@
         if old_units.dimensions is temperature:
             return ratio, ratio*old_units.cgs_offset - new_units.cgs_offset
         else:
-            raise InvalidUnitOperation("Fahrenheit and Celsius are only can "
-                                       "only be used to represent temperature.")
+            raise InvalidUnitOperation(
+                "Fahrenheit and Celsius are not absoulte temperature scales "
+                "and cannot be used in compound unit symbols.")
 
 #
 # Helper functions


https://bitbucket.org/yt_analysis/yt/commits/b58796ca1988/
Changeset:   b58796ca1988
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-17 23:14:53
Summary:     Merged in ngoldbaum/yt/yt-3.0 (pull request #993)

Adding boltzmann's constant, speed of light, Fahrenheit, Celsius, and Rankine units.
Affected #:  7 files

diff -r dd9c415ca94dcf54849b3760a290a783f5cd44e4 -r b58796ca19881fca5b1244e13152816ad2164200 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -31,7 +31,8 @@
 # classes
 from yt.units.unit_object import Unit, UnitParseError
 # objects
-from yt.units.unit_lookup_table import default_unit_symbol_lut, unit_prefixes
+from yt.units.unit_lookup_table import \
+    default_unit_symbol_lut, unit_prefixes, prefixable_units
 # unit definitions
 from yt.utilities.physical_constants import \
     cm_per_pc, sec_per_year, cm_per_km, cm_per_mpc, \
@@ -46,13 +47,17 @@
 
     # go through all possible prefix combos
     for symbol in default_unit_symbol_lut.keys():
-        for prefix in unit_prefixes.keys():
+        if symbol in prefixable_units:
+            keys = unit_prefixes.keys()
+        else:
+            keys = [symbol]
+        for prefix in keys:
             new_symbol = "%s%s" % (prefix, symbol)
 
             # test if we have seen this symbol
             if new_symbol in full_set:
                 print "Duplicate symbol: %s" % new_symbol
-                yield assert_true, False
+                raise RuntimeError
 
             full_set.add(new_symbol)
     yield assert_true, True
@@ -417,7 +422,7 @@
     yield assert_true, u2.dimensions == mass_density
     yield assert_true, u3.dimensions == mass_density
 
-    yield assert_allclose, get_conversion_factor(u1, u3), \
+    yield assert_allclose, get_conversion_factor(u1, u3)[0], \
         Msun_cgs / Mpc_cgs**3, 1e-12
 
 def test_is_code_unit():

diff -r dd9c415ca94dcf54849b3760a290a783f5cd44e4 -r b58796ca19881fca5b1244e13152816ad2164200 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -27,7 +27,8 @@
 from numpy.testing import \
     assert_array_equal, \
     assert_equal, assert_raises, \
-    assert_array_almost_equal_nulp
+    assert_array_almost_equal_nulp, \
+    assert_array_almost_equal
 from numpy import array
 from yt.units.yt_array import \
     YTArray, YTQuantity, \
@@ -418,7 +419,9 @@
     yield assert_equal, km_in_cm.in_mks(), 1e3
     yield assert_equal, km_in_cm.units, cm_unit
 
+    km_view = km.ndarray_view()
     km.convert_to_units('cm')
+    assert_true(km_view.base is km.base)
 
     yield assert_equal, km, YTQuantity(1, 'km')
     yield assert_equal, km.in_cgs(), 1e5
@@ -426,6 +429,7 @@
     yield assert_equal, km.units, cm_unit
 
     km.convert_to_units('kpc')
+    assert_true(km_view.base is km.base)
 
     yield assert_array_almost_equal_nulp, km, YTQuantity(1, 'km')
     yield assert_array_almost_equal_nulp, km.in_cgs(), YTQuantity(1e5, 'cm')
@@ -453,6 +457,49 @@
     yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'
     yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
 
+def test_temperature_conversions():
+    """
+    Test conversions between various supported temperatue scales.
+
+    Also ensure we only allow compound units with temperature
+    scales that have a proper zero point.
+
+    """
+    from yt.units.unit_object import InvalidUnitOperation
+
+    km = YTQuantity(1, 'km')
+    balmy = YTQuantity(300, 'K')
+    balmy_F = YTQuantity(80.33, 'F')
+    balmy_C = YTQuantity(26.85, 'C')
+    balmy_R = YTQuantity(540, 'R')
+
+    assert_array_almost_equal(balmy.in_units('F'), balmy_F)
+    assert_array_almost_equal(balmy.in_units('C'), balmy_C)
+    assert_array_almost_equal(balmy.in_units('R'), balmy_R)
+
+    balmy_view = balmy.ndarray_view()
+
+    balmy.convert_to_units('F')
+    yield assert_true, balmy_view.base is balmy.base
+    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
+
+    balmy.convert_to_units('C')
+    yield assert_true, balmy_view.base is balmy.base
+    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_C)
+
+    balmy.convert_to_units('R')
+    yield assert_true, balmy_view.base is balmy.base
+    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_R)
+
+    balmy.convert_to_units('F')
+    yield assert_true, balmy_view.base is balmy.base
+    yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
+
+    yield assert_raises, InvalidUnitOperation, np.multiply, balmy, km
+
+    # Does CGS convergion from F to K work?
+    yield assert_array_almost_equal, balmy.in_cgs(), YTQuantity(300, 'K')
+
 
 def test_yt_array_yt_quantity_ops():
     """

diff -r dd9c415ca94dcf54849b3760a290a783f5cd44e4 -r b58796ca19881fca5b1244e13152816ad2164200 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -18,7 +18,9 @@
     mass_sun_grams, sec_per_year, sec_per_day, sec_per_hr, \
     sec_per_min, temp_sun_kelvin, luminosity_sun_ergs_per_sec, \
     metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \
-    cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams
+    cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \
+    boltzmann_constant_erg_per_K, kelvin_per_rankine, \
+    speed_of_light_cm_per_s
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -37,6 +39,7 @@
     "erg":  (1.0, dimensions.energy),
     "esu":  (1.0, dimensions.charge),
     "gauss": (1.0, dimensions.magnetic_field),
+    "C" : (1.0, dimensions.temperature, -273.15),
 
     # some SI
     "m": (1.0e2, dimensions.length),
@@ -47,6 +50,8 @@
     # Imperial units
     "ft": (30.48, dimensions.length),
     "mile": (160934, dimensions.length),
+    "F": (kelvin_per_rankine, dimensions.temperature, -459.67),
+    "R": (kelvin_per_rankine, dimensions.temperature),
 
     # dimensionless stuff
     "h": (1.0, dimensions.dimensionless), # needs to be added for rho_crit_now
@@ -58,6 +63,9 @@
     "day": (sec_per_day, dimensions.time),
     "yr":  (sec_per_year, dimensions.time),
 
+    # Velocities
+    "c": (speed_of_light_cm_per_s, dimensions.velocity),
+
     # Solar units
     "Msun": (mass_sun_grams, dimensions.mass),
     "msun": (mass_sun_grams, dimensions.mass),
@@ -89,6 +97,8 @@
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "counts": (1.0, dimensions.dimensionless),
+    "kB": (boltzmann_constant_erg_per_K,
+           dimensions.energy/dimensions.temperature),
     "photons": (1.0, dimensions.dimensionless),
 
     # for AstroPy compatibility

diff -r dd9c415ca94dcf54849b3760a290a783f5cd44e4 -r b58796ca19881fca5b1244e13152816ad2164200 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -21,7 +21,8 @@
 from sympy.parsing.sympy_parser import \
     parse_expr, auto_number, rationalize
 from keyword import iskeyword
-from yt.units import dimensions as dimensions_mod
+from yt.units.dimensions import \
+    base_dimensions, temperature
 from yt.units.unit_lookup_table import \
     latex_symbol_lut, unit_prefixes, \
     prefixable_units, cgs_base_units, \
@@ -114,10 +115,11 @@
     is_number = False
 
     # Extra attributes
-    __slots__ = ["expr", "is_atomic", "cgs_value", "dimensions", "registry"]
+    __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
+                 "registry"]
 
-    def __new__(cls, unit_expr=sympy_one, cgs_value=None, dimensions=None,
-                registry=None, **assumptions):
+    def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
+                dimensions=None, registry=None, **assumptions):
         """
         Create a new unit. May be an atomic unit (like a gram) or combinations
         of atomic units (like g / cm**3).
@@ -130,7 +132,11 @@
             The unit's value in cgs.
         dimensions : sympy.core.expr.Expr
             A sympy expression representing the dimensionality of this unit.
-            It must contain only mass, length, time, temperature and angle symbols.
+            It must contain only mass, length, time, temperature and angle
+            symbols.
+        offset : float
+            The offset necessary to normalize temperature units to a common
+            zero point.
         registry : UnitRegistry object
             The unit registry we use to interpret unit symbols.
 
@@ -185,7 +191,12 @@
             validate_dimensions(dimensions)
         else:
             # lookup the unit symbols
-            cgs_value, dimensions = _get_unit_data_from_expr(unit_expr, registry.lut)
+            try:
+                cgs_value, dimensions = \
+                    _get_unit_data_from_expr(unit_expr, registry.lut)
+            except ValueError:
+                cgs_value, dimensions, cgs_offset = \
+                    _get_unit_data_from_expr(unit_expr, registry.lut)
 
         # Create obj with superclass construct.
         obj = Expr.__new__(cls, **assumptions)
@@ -194,6 +205,7 @@
         obj.expr = unit_expr
         obj.is_atomic = is_atomic
         obj.cgs_value = cgs_value
+        obj.cgs_offset = cgs_offset
         obj.dimensions = dimensions
         obj.registry = registry
 
@@ -240,24 +252,46 @@
     def __mul__(self, u):
         """ Multiply Unit with u (Unit object). """
         if not isinstance(u, Unit):
-            raise InvalidUnitOperation("Tried to multiply a Unit object with " \
-                                       "'%s' (type %s). This behavior is " \
-                                       "undefined." % (u, type(u)) )
+            raise InvalidUnitOperation("Tried to multiply a Unit object with "
+                                       "'%s' (type %s). This behavior is "
+                                       "undefined." % (u, type(u)))
+
+        cgs_offset = 0.0
+        if self.cgs_offset or u.cgs_offset:
+            if u.dimensions is temperature and self.is_dimensionless:
+                cgs_offset = u.cgs_offset
+            elif self.dimensions is temperature and u.is_dimensionless:
+                cgs_offset = self.cgs_offset
+            else:
+                raise InvalidUnitOperation("Quantities with units of Fahrenheit "
+                                           "and Celcius cannot be multiplied.")
 
         return Unit(self.expr * u.expr,
                     cgs_value=(self.cgs_value * u.cgs_value),
+                    cgs_offset=cgs_offset,
                     dimensions=(self.dimensions * u.dimensions),
                     registry=self.registry)
 
     def __div__(self, u):
         """ Divide Unit by u (Unit object). """
         if not isinstance(u, Unit):
-            raise InvalidUnitOperation("Tried to divide a Unit object by '%s' "\
+            raise InvalidUnitOperation("Tried to divide a Unit object by '%s' "
                                        "(type %s). This behavior is "
-                                       "undefined." % (u, type(u)) )
+                                       "undefined." % (u, type(u)))
+
+        cgs_offset = 0.0
+        if self.cgs_offset or u.cgs_offset:
+            if u.dimensions is dims.temperature and self.is_dimensionless:
+                cgs_offset = u.cgs_offset
+            elif self.dimensions is dims.temperature and u.is_dimensionless:
+                cgs_offset = self.cgs_offset
+            else:
+                raise InvalidUnitOperation("Quantities with units of Farhenheit "
+                                           "and Celcius cannot be multiplied.")
 
         return Unit(self.expr / u.expr,
                     cgs_value=(self.cgs_value / u.cgs_value),
+                    cgs_offset=cgs_offset,
                     dimensions=(self.dimensions / u.dimensions),
                     registry=self.registry)
 
@@ -294,10 +328,11 @@
             memodict = {}
         expr = str(self.expr)
         cgs_value = copy.deepcopy(self.cgs_value)
+        cgs_offset = copy.deepcopy(self.cgs_offset)
         dimensions = copy.deepcopy(self.dimensions)
         lut = copy.deepcopy(self.registry.lut)
         registry = UnitRegistry(lut=lut)
-        return Unit(expr, cgs_value, dimensions, registry)
+        return Unit(expr, cgs_value, cgs_offset, dimensions, registry)
 
     #
     # End unit operations
@@ -347,8 +382,8 @@
 
         """
         units_string = self._get_system_unit_string(mks_base_units)
-        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent()) /
-                     get_conversion_factor(self, units_string))
+        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent())[0] /
+                     get_conversion_factor(self, Unit(units_string))[0])
         return Unit(units_string, cgs_value=cgs_value,
                     dimensions=self.dimensions, registry=self.registry)
 
@@ -383,41 +418,20 @@
     -------
     conversion_factor : float
         `old_units / new_units`
+    offset : float or None
+        Offset between the old unit and new unit.
 
     """
-    # if args are not Unit objects, construct them
-    if not isinstance(old_units, Unit):
-        old_units = Unit(old_units)
-    if not isinstance(new_units, Unit):
-        new_units = Unit(new_units)
-
-    if not old_units.same_dimensions_as(new_units):
-        raise InvalidUnitOperation(
-            "Cannot convert from %s to %s because the dimensions do not "
-            "match: %s and %s" % (old_units, new_units, old_units.dimensions,
-                                  new_units.dimensions))
-
-    return old_units.cgs_value / new_units.cgs_value
-
-
-def convert_values(values, old_units, new_units):
-    """
-    Take data given in old units and convert to values in new units.
-
-    Parameters
-    ----------
-    values : array_like
-        The number or array we will convert.
-    old_units : str or Unit object
-        The units values are supplied in.
-    new_units : str or Unit object
-        The units values will be returned in.
-
-    Returns values in new units.
-
-    """
-    return values * get_conversion_factor(old_units, new_units)
-
+    ratio = old_units.cgs_value / new_units.cgs_value
+    if old_units.cgs_offset == 0 and new_units.cgs_offset == 0:
+        return (ratio, None)
+    else:
+        if old_units.dimensions is temperature:
+            return ratio, ratio*old_units.cgs_offset - new_units.cgs_offset
+        else:
+            raise InvalidUnitOperation(
+                "Fahrenheit and Celsius are not absoulte temperature scales "
+                "and cannot be used in compound unit symbols.")
 
 #
 # Helper functions
@@ -444,7 +458,6 @@
         return _lookup_unit_symbol(str(unit_expr), unit_symbol_lut)
 
     if isinstance(unit_expr, Number):
-        # not sure if this should be (1, 1)...
         return (float(unit_expr), sympy_one)
 
     if isinstance(unit_expr, Pow):
@@ -515,7 +528,7 @@
         for dim in dimensions.args:
             validate_dimensions(dim)
     elif isinstance(dimensions, Symbol):
-        if dimensions not in dimensions_mod.base_dimensions:
+        if dimensions not in base_dimensions:
             raise UnitParseError("Dimensionality expression contains an "
                                  "unknown symbol '%s'." % dimensions)
     elif isinstance(dimensions, Pow):

diff -r dd9c415ca94dcf54849b3760a290a783f5cd44e4 -r b58796ca19881fca5b1244e13152816ad2164200 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -414,10 +414,14 @@
 
         """
         new_units = self._unit_repr_check_same(units)
-        conversion_factor = self.units.get_conversion_factor(new_units)
+        (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
         self.units = new_units
         self *= conversion_factor
+
+        if offset:
+            np.subtract(self, offset*self.uq, self)
+
         return self
 
     def convert_to_cgs(self):
@@ -450,11 +454,14 @@
 
         """
         new_units = self._unit_repr_check_same(units)
-        conversion_factor = self.units.get_conversion_factor(new_units)
+        (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
         new_array = self * conversion_factor
         new_array.units = new_units
 
+        if offset:
+            np.subtract(new_array, offset*new_array.uq, new_array)
+
         return new_array
 
     def in_cgs(self):
@@ -672,7 +679,8 @@
     def __iadd__(self, other):
         """ See __add__. """
         oth = sanitize_units_add(self, other, "addition")
-        return np.add(self, oth, out=self)
+        np.add(self, oth, out=self)
+        return self
 
     def __sub__(self, right_object):
         """
@@ -691,7 +699,8 @@
     def __isub__(self, other):
         """ See __sub__. """
         oth = sanitize_units_add(self, other, "subtraction")
-        return np.subtract(self, oth, out=self)
+        np.subtract(self, oth, out=self)
+        return self
 
     def __neg__(self):
         """ Negate the data. """
@@ -718,7 +727,8 @@
     def __imul__(self, other):
         """ See __mul__. """
         oth = sanitize_units_mul(self, other)
-        return np.multiply(self, oth, out=self)
+        np.multiply(self, oth, out=self)
+        return self
 
     def __div__(self, right_object):
         """
@@ -736,7 +746,8 @@
     def __idiv__(self, other):
         """ See __div__. """
         oth = sanitize_units_mul(self, other)
-        return np.divide(self, oth, out=self)
+        np.divide(self, oth, out=self)
+        return self
 
     def __truediv__(self, right_object):
         ro = sanitize_units_mul(self, right_object)
@@ -750,7 +761,8 @@
     def __itruediv__(self, other):
         """ See __div__. """
         oth = sanitize_units_mul(self, other)
-        return np.true_divide(self, oth, out=self)
+        np.true_divide(self, oth, out=self)
+        return self
 
     def __floordiv__(self, right_object):
         ro = sanitize_units_mul(self, right_object)
@@ -764,7 +776,8 @@
     def __ifloordiv__(self, other):
         """ See __div__. """
         oth = sanitize_units_mul(self, other)
-        return np.floor_divide(self, oth, out=self)
+        np.floor_divide(self, oth, out=self)
+        return self
 
     #Should these raise errors?  I need to come back and check this.
     def __or__(self, right_object):
@@ -774,7 +787,8 @@
         return YTArray(super(YTArray, self).__ror__(left_object))
 
     def __ior__(self, other):
-        return np.bitwise_or(self, other, out=self)
+        np.bitwise_or(self, other, out=self)
+        return self
 
     def __xor__(self, right_object):
         return YTArray(super(YTArray, self).__xor__(right_object))
@@ -783,7 +797,8 @@
         return YTArray(super(YTArray, self).__rxor__(left_object))
 
     def __ixor__(self, other):
-        return np.bitwise_xor(self, other, out=self)
+        np.bitwise_xor(self, other, out=self)
+        return self
 
     def __and__(self, right_object):
         return YTArray(super(YTArray, self).__and__(right_object))
@@ -792,7 +807,8 @@
         return YTArray(super(YTArray, self).__rand__(left_object))
 
     def __iand__(self, other):
-        return np.bitwise_and(self, other, out=self)
+        np.bitwise_and(self, other, out=self)
+        return self
 
     def __pow__(self, power):
         """

diff -r dd9c415ca94dcf54849b3760a290a783f5cd44e4 -r b58796ca19881fca5b1244e13152816ad2164200 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -6,7 +6,7 @@
 mass_hydrogen_cgs = 1.007947*amu_cgs
 
 # Velocities
-speed_of_light_cgs = YTQuantity(2.99792458e10, 'cm/s')
+speed_of_light_cgs = YTQuantity(speed_of_light_cm_per_s, 'cm/s')
 
 # Cross Sections
 # 8*pi/3 (alpha*hbar*c/(2*pi))**2
@@ -44,7 +44,9 @@
 qp = charge_proton_cgs
 mh = mp
 clight = speed_of_light_cgs
+speed_of_light = speed_of_light_cgs
 kboltz = boltzmann_constant_cgs
+kb = kboltz
 hcgs = planck_constant_cgs
 sigma_thompson = cross_section_thompson_cgs
 Na = 1 / amu_cgs

diff -r dd9c415ca94dcf54849b3760a290a783f5cd44e4 -r b58796ca19881fca5b1244e13152816ad2164200 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -67,6 +67,9 @@
 sec_per_min  = 60.0
 day_per_year = 365.25
 
+# velocities
+speed_of_light_cm_per_s = 2.99792458e10
+
 # temperature / energy
 boltzmann_constant_erg_per_K = 1.3806488e-16
 erg_per_eV = 1.602176562e-12
@@ -75,6 +78,7 @@
 keV_per_K = 1.0 / K_per_keV
 keV_per_erg = 1.0 / erg_per_keV
 eV_per_erg = 1.0 / erg_per_eV
+kelvin_per_rankine = 5./9.
 
 # Solar System masses
 # Standish, E.M. (1995) "Report of the IAU WGAS Sub-Group on Numerical Standards",

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list