[yt-svn] commit/yt: 52 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 3 08:53:21 PST 2014


52 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/842ecf5b41f4/
Changeset:   842ecf5b41f4
Branch:      yt
User:        jzuhone
Date:        2014-09-29 02:49:31+00:00
Summary:     Planck units
Affected #:  3 files

diff -r 383cb1f9d7336488520cbffe2551e7153cd36b28 -r 842ecf5b41f4bf79c7493c966c41741e3ee5de9d yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -20,7 +20,8 @@
     metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \
     cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \
     boltzmann_constant_erg_per_K, kelvin_per_rankine, \
-    speed_of_light_cm_per_s
+    speed_of_light_cm_per_s, planck_length, planck_charge, \
+    planck_energy, planck_mass, planck_temperature, planck_time
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -114,6 +115,14 @@
     "d": (1.0, dimensions.time),
     "Angstrom": (cm_per_ang, dimensions.length),
 
+    # Planck units
+    "m_pl": (planck_mass, dimensions.mass),
+    "l_pl": (planck_length, dimensions.length),
+    "t_pl": (planck_time, dimensions.time),
+    "T_pl": (planck_temperature, dimensions.temperature),
+    "q_pl": (planck_charge, dimensions.charge),
+    "E_pl": (planck_energy, dimensions.energy),
+
 }
 
 # Add LaTeX representations for units with trivial representations.

diff -r 383cb1f9d7336488520cbffe2551e7153cd36b28 -r 842ecf5b41f4bf79c7493c966c41741e3ee5de9d yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -1,5 +1,6 @@
 from yt.utilities.physical_ratios import *
 from yt.units.yt_array import YTQuantity
+from math import pi
 
 mass_electron_cgs = YTQuantity(mass_electron_grams, 'g')
 amu_cgs           = YTQuantity(amu_grams, 'g')
@@ -48,5 +49,14 @@
 kboltz = boltzmann_constant_cgs
 kb = kboltz
 hcgs = planck_constant_cgs
+hbar = 0.5*hcgs/pi
 sigma_thompson = cross_section_thompson_cgs
 Na = 1 / amu_cgs
+
+#Planck units
+m_pl = YTQuantity(planck_mass, "g")
+l_pl = YTQuantity(planck_length, "cm")
+t_pl = YTQuantity(planck_time, "s")
+E_pl = YTQuantity(planck_energy, "erg")
+q_pl = YTQuantity(planck_charge, "esu")
+T_pl = YTQuantity(planck_temperature, "K")

diff -r 383cb1f9d7336488520cbffe2551e7153cd36b28 -r 842ecf5b41f4bf79c7493c966c41741e3ee5de9d yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -109,3 +109,11 @@
 # Miscellaneous
 HUGE = 1.0e90
 TINY = 1.0e-40
+
+# Planck units
+planck_mass = 2.17650925245e-05
+planck_length = 1.6161992557e-33
+planck_time = planck_length / speed_of_light_cm_per_s
+planck_energy = planck_mass * speed_of_light_cm_per_s * speed_of_light_cm_per_s
+planck_temperature = planck_energy / boltzmann_constant_erg_per_K
+planck_charge = 5.62274532302e-09


https://bitbucket.org/yt_analysis/yt/commits/19db3af8e816/
Changeset:   19db3af8e816
Branch:      yt
User:        jzuhone
Date:        2014-09-29 14:32:05+00:00
Summary:     First introduction of SI charge units, fixing neptune's name
Affected #:  4 files

diff -r 842ecf5b41f4bf79c7493c966c41741e3ee5de9d -r 19db3af8e816cfc72e655dd7649bd1220a553cd9 yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -44,16 +44,25 @@
 power    = energy / time
 flux     = power / area
 specific_flux = flux / rate
+
+# Gaussian electromagnetic units
 charge   = (energy * length)**Rational(1, 2)  # proper 1/2 power
-
+current  = charge / time
 electric_field = charge / length**2
 magnetic_field = electric_field
 
+# SI electromagnetic units
+current_si = Symbol("(current_si)", positive=True)
+charge_si = current_si * time
+electric_field_si = force / charge_si
+magnetic_field_si = electric_field_si / velocity
+
 solid_angle = angle * angle
 
 derived_dimensions = [rate, velocity, acceleration, jerk, snap, crackle, pop, 
                       momentum, force, energy, power, charge, electric_field, 
                       magnetic_field, solid_angle, flux, specific_flux, volume,
-                      area]
+                      area, current, current_si, charge_si, electric_field_si,
+                      magnetic_field_si]
 
 dimensions = base_dimensions + derived_dimensions

diff -r 842ecf5b41f4bf79c7493c966c41741e3ee5de9d -r 19db3af8e816cfc72e655dd7649bd1220a553cd9 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -40,13 +40,17 @@
     "erg":  (1.0, dimensions.energy),
     "esu":  (1.0, dimensions.charge),
     "gauss": (1.0, dimensions.magnetic_field),
-    "C" : (1.0, dimensions.temperature, -273.15),
+    "C": (1.0, dimensions.temperature, -273.15),
 
     # some SI
     "m": (1.0e2, dimensions.length),
     "J": (1.0e7, dimensions.energy),
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
+    "N": (1.0e5, dimensions.force),
+    "Coulomb": (1.0, dimensions.charge_si),
+    "A": (1.0, dimensions.current_si),
+    "T": (1.0, dimensions.magnetic_field_si),
 
     # Imperial units
     "ft": (30.48, dimensions.length),

diff -r 842ecf5b41f4bf79c7493c966c41741e3ee5de9d -r 19db3af8e816cfc72e655dd7649bd1220a553cd9 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -36,7 +36,7 @@
 mass_mars_cgs = YTQuantity(mass_mars_grams, 'g')
 mass_saturn_cgs = YTQuantity(mass_saturn_grams, 'g')
 mass_uranus_cgs = YTQuantity(mass_uranus_grams, 'g')
-mass_neptun_cgs = YTQuantity(mass_neptun_grams, 'g')
+mass_neptune_cgs = YTQuantity(mass_neptune_grams, 'g')
 
 #Short cuts
 G = gravitational_constant_cgs
@@ -60,3 +60,6 @@
 E_pl = YTQuantity(planck_energy, "erg")
 q_pl = YTQuantity(planck_charge, "esu")
 T_pl = YTQuantity(planck_temperature, "K")
+
+mu_0 = YTQuantity(4.0e-7*pi, "N/A**2")
+eps_0 = (1.0/(clight.in_mks()**2*mu_0)).in_units("Coulomb**2/N/m**2")

diff -r 842ecf5b41f4bf79c7493c966c41741e3ee5de9d -r 19db3af8e816cfc72e655dd7649bd1220a553cd9 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -92,7 +92,7 @@
 mass_mars_grams = mass_sun_grams / 3098708.0
 mass_saturn_grams = mass_sun_grams / 3497.898
 mass_uranus_grams = mass_sun_grams / 22902.98
-mass_neptun_grams = mass_sun_grams / 19412.24
+mass_neptune_grams = mass_sun_grams / 19412.24
 
 # flux
 jansky_cgs = 1.0e-23


https://bitbucket.org/yt_analysis/yt/commits/5972f3619283/
Changeset:   5972f3619283
Branch:      yt
User:        jzuhone
Date:        2014-09-29 16:49:59+00:00
Summary:     More stuff on SI conversions. Not quite sure how to handle this just yet.
Affected #:  2 files

diff -r 19db3af8e816cfc72e655dd7649bd1220a553cd9 -r 5972f361928375f2f4b42d3e7e0c546730665ab2 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -48,9 +48,9 @@
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
     "N": (1.0e5, dimensions.force),
-    "Coulomb": (1.0, dimensions.charge_si),
-    "A": (1.0, dimensions.current_si),
-    "T": (1.0, dimensions.magnetic_field_si),
+    "Coulomb": (2997924580.0, dimensions.charge_si),
+    "A": (2997924580.0, dimensions.current_si),
+    "T": (1.0e4, dimensions.magnetic_field_si),
 
     # Imperial units
     "ft": (30.48, dimensions.length),
@@ -208,3 +208,7 @@
     dimensions.temperature:'K',
     dimensions.angle:'radian',
 }
+
+si_equivalencies = {"Coulomb":"esu",
+                    "T":"gauss",
+                    "A":"esu/s"}

diff -r 19db3af8e816cfc72e655dd7649bd1220a553cd9 -r 5972f361928375f2f4b42d3e7e0c546730665ab2 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -29,6 +29,7 @@
     floor, ceil, trunc, fmax, fmin
 
 from yt.units.unit_object import Unit, UnitParseError
+from yt.units.unit_lookup_table import si_equivalencies
 from yt.units.unit_registry import UnitRegistry
 from yt.units.dimensions import dimensionless
 from yt.utilities.exceptions import \


https://bitbucket.org/yt_analysis/yt/commits/3fd61cf58694/
Changeset:   3fd61cf58694
Branch:      yt
User:        jzuhone
Date:        2014-09-29 17:36:32+00:00
Summary:     Renaming F and C to degF and degC, so we can have Coulombs as C
Affected #:  2 files

diff -r 5972f361928375f2f4b42d3e7e0c546730665ab2 -r 3fd61cf58694c1beef092cbc7e5809843c2d6ee3 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -469,21 +469,21 @@
 
     km = YTQuantity(1, 'km')
     balmy = YTQuantity(300, 'K')
-    balmy_F = YTQuantity(80.33, 'F')
-    balmy_C = YTQuantity(26.85, 'C')
+    balmy_F = YTQuantity(80.33, 'degF')
+    balmy_C = YTQuantity(26.85, 'degC')
     balmy_R = YTQuantity(540, 'R')
 
-    assert_array_almost_equal(balmy.in_units('F'), balmy_F)
-    assert_array_almost_equal(balmy.in_units('C'), balmy_C)
+    assert_array_almost_equal(balmy.in_units('defF'), balmy_F)
+    assert_array_almost_equal(balmy.in_units('degC'), balmy_C)
     assert_array_almost_equal(balmy.in_units('R'), balmy_R)
 
     balmy_view = balmy.ndarray_view()
 
-    balmy.convert_to_units('F')
+    balmy.convert_to_units('degF')
     yield assert_true, balmy_view.base is balmy.base
     yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
 
-    balmy.convert_to_units('C')
+    balmy.convert_to_units('degC')
     yield assert_true, balmy_view.base is balmy.base
     yield assert_array_almost_equal, np.array(balmy), np.array(balmy_C)
 
@@ -497,7 +497,7 @@
 
     yield assert_raises, InvalidUnitOperation, np.multiply, balmy, km
 
-    # Does CGS convergion from F to K work?
+    # Does CGS conversion from F to K work?
     yield assert_array_almost_equal, balmy.in_cgs(), YTQuantity(300, 'K')
 
 

diff -r 5972f361928375f2f4b42d3e7e0c546730665ab2 -r 3fd61cf58694c1beef092cbc7e5809843c2d6ee3 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -40,7 +40,7 @@
     "erg":  (1.0, dimensions.energy),
     "esu":  (1.0, dimensions.charge),
     "gauss": (1.0, dimensions.magnetic_field),
-    "C": (1.0, dimensions.temperature, -273.15),
+    "degC": (1.0, dimensions.temperature, -273.15),
 
     # some SI
     "m": (1.0e2, dimensions.length),
@@ -55,7 +55,7 @@
     # Imperial units
     "ft": (30.48, dimensions.length),
     "mile": (160934, dimensions.length),
-    "F": (kelvin_per_rankine, dimensions.temperature, -459.67),
+    "degF": (kelvin_per_rankine, dimensions.temperature, -459.67),
     "R": (kelvin_per_rankine, dimensions.temperature),
 
     # dimensionless stuff
@@ -191,6 +191,10 @@
     "W",
     "gauss",
     "Jy",
+    "N",
+    "T",
+    "A",
+    "C",
 )
 
 cgs_base_units = {
@@ -209,6 +213,6 @@
     dimensions.angle:'radian',
 }
 
-si_equivalencies = {"Coulomb":"esu",
+si_equivalencies = {"C":"esu",
                     "T":"gauss",
                     "A":"esu/s"}


https://bitbucket.org/yt_analysis/yt/commits/c328c2d835b9/
Changeset:   c328c2d835b9
Branch:      yt
User:        jzuhone
Date:        2014-09-29 18:31:23+00:00
Summary:     current_si goes in base_dimensions
Affected #:  1 file

diff -r 3fd61cf58694c1beef092cbc7e5809843c2d6ee3 -r c328c2d835b9c12455ace8f024bbf9edbbff46e4 yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -19,9 +19,11 @@
 time = Symbol("(time)", positive=True)
 temperature = Symbol("(temperature)", positive=True)
 angle = Symbol("(angle)", positive=True)
+current_si = Symbol("(current_si)", positive=True)
 dimensionless = sympify(1)
 
-base_dimensions = [mass, length, time, temperature, angle, dimensionless]
+base_dimensions = [mass, length, time, temperature, angle, current_si,
+                   dimensionless]
 
 #
 # Derived dimensions
@@ -52,7 +54,6 @@
 magnetic_field = electric_field
 
 # SI electromagnetic units
-current_si = Symbol("(current_si)", positive=True)
 charge_si = current_si * time
 electric_field_si = force / charge_si
 magnetic_field_si = electric_field_si / velocity


https://bitbucket.org/yt_analysis/yt/commits/7c56ba25118a/
Changeset:   7c56ba25118a
Branch:      yt
User:        jzuhone
Date:        2014-09-29 18:31:36+00:00
Summary:     Changing unit name here
Affected #:  1 file

diff -r c328c2d835b9c12455ace8f024bbf9edbbff46e4 -r 7c56ba25118adfe033e40086b7ab2262fa8c174d yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -62,4 +62,4 @@
 T_pl = YTQuantity(planck_temperature, "K")
 
 mu_0 = YTQuantity(4.0e-7*pi, "N/A**2")
-eps_0 = (1.0/(clight.in_mks()**2*mu_0)).in_units("Coulomb**2/N/m**2")
+eps_0 = (1.0/(clight.in_mks()**2*mu_0)).in_units("C**2/N/m**2")


https://bitbucket.org/yt_analysis/yt/commits/3e692258d436/
Changeset:   3e692258d436
Branch:      yt
User:        jzuhone
Date:        2014-09-29 18:32:35+00:00
Summary:     Setting up equivalences between units to allow for conversions and operations between EM SI and Gaussian. This is not working perfectly yet.
Affected #:  3 files

diff -r 7c56ba25118adfe033e40086b7ab2262fa8c174d -r 3e692258d436d7714fb5e3c4bc5e19a06e920875 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -41,6 +41,7 @@
     "esu":  (1.0, dimensions.charge),
     "gauss": (1.0, dimensions.magnetic_field),
     "degC": (1.0, dimensions.temperature, -273.15),
+    "statA": (1.0, dimensions.current),
 
     # some SI
     "m": (1.0e2, dimensions.length),
@@ -48,9 +49,9 @@
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
     "N": (1.0e5, dimensions.force),
-    "Coulomb": (2997924580.0, dimensions.charge_si),
-    "A": (2997924580.0, dimensions.current_si),
-    "T": (1.0e4, dimensions.magnetic_field_si),
+    "C": (2997924580.0, dimensions.charge_si, 0.0, ["esu"]),
+    "A": (2997924580.0, dimensions.current_si, 0.0, ["statA"]),
+    "T": (1.0e4, dimensions.magnetic_field_si, 0.0, ["gauss"]),
 
     # Imperial units
     "ft": (30.48, dimensions.length),
@@ -211,8 +212,4 @@
     dimensions.time:'s',
     dimensions.temperature:'K',
     dimensions.angle:'radian',
-}
-
-si_equivalencies = {"C":"esu",
-                    "T":"gauss",
-                    "A":"esu/s"}
+}
\ No newline at end of file

diff -r 7c56ba25118adfe033e40086b7ab2262fa8c174d -r 3e692258d436d7714fb5e3c4bc5e19a06e920875 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -116,10 +116,10 @@
 
     # Extra attributes
     __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
-                 "registry"]
+                 "registry","equivalences"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
-                dimensions=None, registry=None, **assumptions):
+                equivalences=[], dimensions=None, registry=None, **assumptions):
         """
         Create a new unit. May be an atomic unit (like a gram) or combinations
         of atomic units (like g / cm**3).
@@ -191,12 +191,13 @@
             validate_dimensions(dimensions)
         else:
             # lookup the unit symbols
-            try:
-                cgs_value, dimensions = \
-                    _get_unit_data_from_expr(unit_expr, registry.lut)
-            except ValueError:
-                cgs_value, dimensions, cgs_offset = \
-                    _get_unit_data_from_expr(unit_expr, registry.lut)
+            unit_data = _get_unit_data_from_expr(unit_expr, registry.lut)
+            cgs_value = unit_data[0]
+            dimensions = unit_data[1]
+            if len(unit_data) >= 3:
+                cgs_offset = unit_data[2]
+            if len(unit_data) == 4:
+                equivalences = unit_data[3]
 
         # Create obj with superclass construct.
         obj = Expr.__new__(cls, **assumptions)
@@ -208,6 +209,7 @@
         obj.cgs_offset = cgs_offset
         obj.dimensions = dimensions
         obj.registry = registry
+        obj.equivalences = equivalences
 
         if unit_key:
             registry.unit_objs[unit_key] = obj
@@ -340,6 +342,8 @@
 
     def same_dimensions_as(self, other_unit):
         """ Test if dimensions are the same. """
+        if str(other_unit) in self.equivalences or str(self) in other_unit.equivalences:
+            return True
         return (self.dimensions / other_unit.dimensions) == sympy_one
 
     @property

diff -r 7c56ba25118adfe033e40086b7ab2262fa8c174d -r 3e692258d436d7714fb5e3c4bc5e19a06e920875 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -29,7 +29,6 @@
     floor, ceil, trunc, fmax, fmin
 
 from yt.units.unit_object import Unit, UnitParseError
-from yt.units.unit_lookup_table import si_equivalencies
 from yt.units.unit_registry import UnitRegistry
 from yt.units.dimensions import dimensionless
 from yt.utilities.exceptions import \


https://bitbucket.org/yt_analysis/yt/commits/23b0b772f0e7/
Changeset:   23b0b772f0e7
Branch:      yt
User:        jzuhone
Date:        2014-09-29 19:25:08+00:00
Summary:     Only one equivalence per unit
Affected #:  2 files

diff -r 3e692258d436d7714fb5e3c4bc5e19a06e920875 -r 23b0b772f0e70758f6388f4cc2e18197a15a1fea yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -49,9 +49,9 @@
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
     "N": (1.0e5, dimensions.force),
-    "C": (2997924580.0, dimensions.charge_si, 0.0, ["esu"]),
-    "A": (2997924580.0, dimensions.current_si, 0.0, ["statA"]),
-    "T": (1.0e4, dimensions.magnetic_field_si, 0.0, ["gauss"]),
+    "C": (2997924580.0, dimensions.charge_si, 0.0, "esu"),
+    "A": (2997924580.0, dimensions.current_si, 0.0, "statA"),
+    "T": (1.0e4, dimensions.magnetic_field_si, 0.0, "gauss"),
 
     # Imperial units
     "ft": (30.48, dimensions.length),
@@ -212,4 +212,5 @@
     dimensions.time:'s',
     dimensions.temperature:'K',
     dimensions.angle:'radian',
+    dimensions.current_si:'A',
 }
\ No newline at end of file

diff -r 3e692258d436d7714fb5e3c4bc5e19a06e920875 -r 23b0b772f0e70758f6388f4cc2e18197a15a1fea yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -116,10 +116,10 @@
 
     # Extra attributes
     __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
-                 "registry","equivalences"]
+                 "registry","equivalence"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
-                equivalences=[], dimensions=None, registry=None, **assumptions):
+                equivalence=None, dimensions=None, registry=None, **assumptions):
         """
         Create a new unit. May be an atomic unit (like a gram) or combinations
         of atomic units (like g / cm**3).
@@ -197,7 +197,7 @@
             if len(unit_data) >= 3:
                 cgs_offset = unit_data[2]
             if len(unit_data) == 4:
-                equivalences = unit_data[3]
+                equivalence = unit_data[3]
 
         # Create obj with superclass construct.
         obj = Expr.__new__(cls, **assumptions)
@@ -209,7 +209,7 @@
         obj.cgs_offset = cgs_offset
         obj.dimensions = dimensions
         obj.registry = registry
-        obj.equivalences = equivalences
+        obj.equivalence = equivalence
 
         if unit_key:
             registry.unit_objs[unit_key] = obj
@@ -342,7 +342,7 @@
 
     def same_dimensions_as(self, other_unit):
         """ Test if dimensions are the same. """
-        if str(other_unit) in self.equivalences or str(self) in other_unit.equivalences:
+        if str(other_unit) == self.equivalence or str(self) == other_unit.equivalence:
             return True
         return (self.dimensions / other_unit.dimensions) == sympy_one
 


https://bitbucket.org/yt_analysis/yt/commits/1e53fa2ad8c0/
Changeset:   1e53fa2ad8c0
Branch:      yt
User:        jzuhone
Date:        2014-09-30 13:40:25+00:00
Summary:     Gradually incorporating equivalences. Still a ways to go.
Affected #:  2 files

diff -r 23b0b772f0e70758f6388f4cc2e18197a15a1fea -r 1e53fa2ad8c0b739debdd82d0b8efa3e56509385 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -49,9 +49,9 @@
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
     "N": (1.0e5, dimensions.force),
-    "C": (2997924580.0, dimensions.charge_si, 0.0, "esu"),
-    "A": (2997924580.0, dimensions.current_si, 0.0, "statA"),
-    "T": (1.0e4, dimensions.magnetic_field_si, 0.0, "gauss"),
+    "C": (2997924580.0, dimensions.charge_si, 0.0, ("esu", dimensions.charge)),
+    "A": (2997924580.0, dimensions.current_si, 0.0, ("statA", dimensions.current)),
+    "T": (1.0e4, dimensions.magnetic_field_si, 0.0, ("gauss", dimensions.magnetic_field)),
 
     # Imperial units
     "ft": (30.48, dimensions.length),

diff -r 23b0b772f0e70758f6388f4cc2e18197a15a1fea -r 1e53fa2ad8c0b739debdd82d0b8efa3e56509385 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -209,6 +209,9 @@
         obj.cgs_offset = cgs_offset
         obj.dimensions = dimensions
         obj.registry = registry
+        if isinstance(equivalence, tuple):
+            equivalence = Unit(unit_expr=equivalence[0], cgs_value=1.0,
+                               dimensions=equivalence[1], registry=registry)
         obj.equivalence = equivalence
 
         if unit_key:
@@ -266,12 +269,19 @@
                 cgs_offset = self.cgs_offset
             else:
                 raise InvalidUnitOperation("Quantities with units of Fahrenheit "
-                                           "and Celcius cannot be multiplied.")
+                                           "and Celsius cannot be multiplied.")
+
+        equivalence = None
+        if self.equivalence or u.equivalence:
+            a = self.equivalence if self.equivalence else self
+            b = u.equivalence if u.equivalence else u
+            equivalence = a*b
 
         return Unit(self.expr * u.expr,
                     cgs_value=(self.cgs_value * u.cgs_value),
                     cgs_offset=cgs_offset,
                     dimensions=(self.dimensions * u.dimensions),
+                    equivalence=equivalence,
                     registry=self.registry)
 
     def __div__(self, u):
@@ -289,12 +299,19 @@
                 cgs_offset = self.cgs_offset
             else:
                 raise InvalidUnitOperation("Quantities with units of Farhenheit "
-                                           "and Celcius cannot be multiplied.")
+                                           "and Celsius cannot be multiplied.")
+
+        equivalence = None
+        if self.equivalence or u.equivalence:
+            a = self.equivalence if self.equivalence else self
+            b = u.equivalence if u.equivalence else u
+            equivalence = a/b
 
         return Unit(self.expr / u.expr,
                     cgs_value=(self.cgs_value / u.cgs_value),
                     cgs_offset=cgs_offset,
                     dimensions=(self.dimensions / u.dimensions),
+                    equivalence=equivalence,
                     registry=self.registry)
 
     __truediv__ = __div__
@@ -308,8 +325,14 @@
                                        "power '%s' (type %s). Failed to cast " \
                                        "it to a float." % (p, type(p)) )
 
+        equivalence = None
+        if self.equivalence:
+            equivalence = self.equivalence**p
+
         return Unit(self.expr**p, cgs_value=(self.cgs_value**p),
-                    dimensions=(self.dimensions**p), registry=self.registry)
+                    dimensions=(self.dimensions**p),
+                    equivalence=equivalence,
+                    registry=self.registry)
 
     def __eq__(self, u):
         """ Test unit equality. """
@@ -342,7 +365,7 @@
 
     def same_dimensions_as(self, other_unit):
         """ Test if dimensions are the same. """
-        if str(other_unit) == self.equivalence or str(self) == other_unit.equivalence:
+        if other_unit == self.equivalence or self == other_unit.equivalence:
             return True
         return (self.dimensions / other_unit.dimensions) == sympy_one
 
@@ -376,20 +399,30 @@
         Create and return dimensionally-equivalent cgs units.
 
         """
-        units_string = self._get_system_unit_string(cgs_base_units)
+        if self.equivalence is None:
+            units_string = self._get_system_unit_string(cgs_base_units)
+            dims = self.dimensions
+        else:
+            units_string = self.equivalence._get_system_unit_string(cgs_base_units)
+            dims = self.equivalence.dimensions
         return Unit(units_string, cgs_value=1.0,
-                    dimensions=self.dimensions, registry=self.registry)
+                    dimensions=dims, registry=self.registry)
 
     def get_mks_equivalent(self):
         """
         Create and return dimensionally-equivalent mks units.
 
         """
-        units_string = self._get_system_unit_string(mks_base_units)
+        if self.equivalence is None:
+            units_string = self._get_system_unit_string(mks_base_units)
+            dims = self.dimensions
+        else:
+            units_string = self.equivalence._get_system_unit_string(mks_base_units)
+            dims = self.equivalence.dimensions
         cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent())[0] /
                      get_conversion_factor(self, Unit(units_string))[0])
         return Unit(units_string, cgs_value=cgs_value,
-                    dimensions=self.dimensions, registry=self.registry)
+                    dimensions=dims, registry=self.registry)
 
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)


https://bitbucket.org/yt_analysis/yt/commits/9e165e5e866d/
Changeset:   9e165e5e866d
Branch:      yt
User:        jzuhone
Date:        2014-09-30 13:44:23+00:00
Summary:     This fixes mks, but it's not "mksa". not sure what to do about that.
Affected #:  1 file

diff -r 1e53fa2ad8c0b739debdd82d0b8efa3e56509385 -r 9e165e5e866d8c51ffb6f8a6bbef42df878ce4e4 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -416,11 +416,13 @@
         if self.equivalence is None:
             units_string = self._get_system_unit_string(mks_base_units)
             dims = self.dimensions
+            units = self
         else:
             units_string = self.equivalence._get_system_unit_string(mks_base_units)
             dims = self.equivalence.dimensions
-        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent())[0] /
-                     get_conversion_factor(self, Unit(units_string))[0])
+            units = self.equivalence
+        cgs_value = (get_conversion_factor(units, units.get_cgs_equivalent())[0] /
+                     get_conversion_factor(units, Unit(units_string))[0])
         return Unit(units_string, cgs_value=cgs_value,
                     dimensions=dims, registry=self.registry)
 


https://bitbucket.org/yt_analysis/yt/commits/9ab8bcc03cdb/
Changeset:   9ab8bcc03cdb
Branch:      yt
User:        jzuhone
Date:        2014-09-30 14:31:24+00:00
Summary:     Cleaning up the unit lookup table a bit, adding the "beta" unit which has a velocity equivalence.
Affected #:  1 file

diff -r 9e165e5e866d8c51ffb6f8a6bbef42df878ce4e4 -r 9ab8bcc03cdb822bb8e2be4df736098b0fc19e6b yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -49,8 +49,8 @@
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
     "N": (1.0e5, dimensions.force),
-    "C": (2997924580.0, dimensions.charge_si, 0.0, ("esu", dimensions.charge)),
-    "A": (2997924580.0, dimensions.current_si, 0.0, ("statA", dimensions.current)),
+    "C": (0.1*speed_of_light_cm_per_s, dimensions.charge_si, 0.0, ("esu", dimensions.charge)),
+    "A": (0.1*speed_of_light_cm_per_s, dimensions.current_si, 0.0, ("statA", dimensions.current)),
     "T": (1.0e4, dimensions.magnetic_field_si, 0.0, ("gauss", dimensions.magnetic_field)),
 
     # Imperial units
@@ -71,6 +71,7 @@
 
     # Velocities
     "c": (speed_of_light_cm_per_s, dimensions.velocity),
+    "beta": (speed_of_light_cm_per_s, dimensions.dimensionless, 0.0, ("cm/s", dimensions.velocity)),
 
     # Solar units
     "Msun": (mass_sun_grams, dimensions.mass),
@@ -212,5 +213,4 @@
     dimensions.time:'s',
     dimensions.temperature:'K',
     dimensions.angle:'radian',
-    dimensions.current_si:'A',
 }
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/987bd186c242/
Changeset:   987bd186c242
Branch:      yt
User:        jzuhone
Date:        2014-09-30 15:50:35+00:00
Summary:     Changed the way equivalences are handled, where it is determined when creating a new unit object. Equivalences are now part of a separate table. Most operations seem to work now.
Affected #:  2 files

diff -r 9ab8bcc03cdb822bb8e2be4df736098b0fc19e6b -r 987bd186c242a23b74864e45dd25fbc1c8a091f7 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -49,9 +49,9 @@
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
     "N": (1.0e5, dimensions.force),
-    "C": (0.1*speed_of_light_cm_per_s, dimensions.charge_si, 0.0, ("esu", dimensions.charge)),
-    "A": (0.1*speed_of_light_cm_per_s, dimensions.current_si, 0.0, ("statA", dimensions.current)),
-    "T": (1.0e4, dimensions.magnetic_field_si, 0.0, ("gauss", dimensions.magnetic_field)),
+    "C": (0.1*speed_of_light_cm_per_s, dimensions.charge_si),
+    "A": (0.1*speed_of_light_cm_per_s, dimensions.current_si),
+    "T": (1.0e4, dimensions.magnetic_field_si),
 
     # Imperial units
     "ft": (30.48, dimensions.length),
@@ -71,7 +71,7 @@
 
     # Velocities
     "c": (speed_of_light_cm_per_s, dimensions.velocity),
-    "beta": (speed_of_light_cm_per_s, dimensions.dimensionless, 0.0, ("cm/s", dimensions.velocity)),
+    "beta": (speed_of_light_cm_per_s, dimensions.dimensionless),
 
     # Solar units
     "Msun": (mass_sun_grams, dimensions.mass),
@@ -213,4 +213,11 @@
     dimensions.time:'s',
     dimensions.temperature:'K',
     dimensions.angle:'radian',
+}
+
+unit_equivalences = {
+    "C":("esu", dimensions.charge),
+    "T":("gauss", dimensions.magnetic_field),
+    "A":("statA", dimensions.current),
+    "beta":("c", dimensions.velocity),
 }
\ No newline at end of file

diff -r 9ab8bcc03cdb822bb8e2be4df736098b0fc19e6b -r 987bd186c242a23b74864e45dd25fbc1c8a091f7 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -17,7 +17,7 @@
     Pow, Symbol, Integer, \
     Float, Basic, Rational, sqrt
 from sympy.core.numbers import One
-from sympy import sympify, latex
+from sympy import sympify, latex, symbols
 from sympy.parsing.sympy_parser import \
     parse_expr, auto_number, rationalize
 from keyword import iskeyword
@@ -26,7 +26,7 @@
 from yt.units.unit_lookup_table import \
     latex_symbol_lut, unit_prefixes, \
     prefixable_units, cgs_base_units, \
-    mks_base_units
+    mks_base_units, unit_equivalences
 from yt.units.unit_registry import UnitRegistry
 
 import copy
@@ -119,7 +119,7 @@
                  "registry","equivalence"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
-                equivalence=None, dimensions=None, registry=None, **assumptions):
+                dimensions=None, registry=None, **assumptions):
         """
         Create a new unit. May be an atomic unit (like a gram) or combinations
         of atomic units (like g / cm**3).
@@ -194,10 +194,9 @@
             unit_data = _get_unit_data_from_expr(unit_expr, registry.lut)
             cgs_value = unit_data[0]
             dimensions = unit_data[1]
-            if len(unit_data) >= 3:
+            if len(unit_data) == 3:
                 cgs_offset = unit_data[2]
-            if len(unit_data) == 4:
-                equivalence = unit_data[3]
+            equivalence = cgs_equivalences.get(str(unit_expr),None)
 
         # Create obj with superclass construct.
         obj = Expr.__new__(cls, **assumptions)
@@ -209,9 +208,18 @@
         obj.cgs_offset = cgs_offset
         obj.dimensions = dimensions
         obj.registry = registry
-        if isinstance(equivalence, tuple):
-            equivalence = Unit(unit_expr=equivalence[0], cgs_value=1.0,
-                               dimensions=equivalence[1], registry=registry)
+
+        equivalences = []
+        for atom in unit_expr.atoms():
+            if str(atom) in unit_equivalences:
+                equiv, dims = unit_equivalences[str(atom)]
+                equivalences.append((atom, symbols(equiv)))
+        if len(equivalences) > 0:
+            equivalence = unit_expr.subs(equivalences)
+            equivalence = Unit(unit_expr=equivalence, cgs_value=1.0,
+                               dimensions=None, registry=registry)
+        else:
+            equivalence = None
         obj.equivalence = equivalence
 
         if unit_key:
@@ -271,17 +279,10 @@
                 raise InvalidUnitOperation("Quantities with units of Fahrenheit "
                                            "and Celsius cannot be multiplied.")
 
-        equivalence = None
-        if self.equivalence or u.equivalence:
-            a = self.equivalence if self.equivalence else self
-            b = u.equivalence if u.equivalence else u
-            equivalence = a*b
-
         return Unit(self.expr * u.expr,
                     cgs_value=(self.cgs_value * u.cgs_value),
                     cgs_offset=cgs_offset,
                     dimensions=(self.dimensions * u.dimensions),
-                    equivalence=equivalence,
                     registry=self.registry)
 
     def __div__(self, u):
@@ -301,17 +302,10 @@
                 raise InvalidUnitOperation("Quantities with units of Farhenheit "
                                            "and Celsius cannot be multiplied.")
 
-        equivalence = None
-        if self.equivalence or u.equivalence:
-            a = self.equivalence if self.equivalence else self
-            b = u.equivalence if u.equivalence else u
-            equivalence = a/b
-
         return Unit(self.expr / u.expr,
                     cgs_value=(self.cgs_value / u.cgs_value),
                     cgs_offset=cgs_offset,
                     dimensions=(self.dimensions / u.dimensions),
-                    equivalence=equivalence,
                     registry=self.registry)
 
     __truediv__ = __div__
@@ -325,13 +319,8 @@
                                        "power '%s' (type %s). Failed to cast " \
                                        "it to a float." % (p, type(p)) )
 
-        equivalence = None
-        if self.equivalence:
-            equivalence = self.equivalence**p
-
         return Unit(self.expr**p, cgs_value=(self.cgs_value**p),
                     dimensions=(self.dimensions**p),
-                    equivalence=equivalence,
                     registry=self.registry)
 
     def __eq__(self, u):
@@ -365,7 +354,13 @@
 
     def same_dimensions_as(self, other_unit):
         """ Test if dimensions are the same. """
-        if other_unit == self.equivalence or self == other_unit.equivalence:
+        first_check = False
+        second_check = False
+        if self.equivalence:
+            first_check = self.equivalence.dimensions / other_unit.dimensions == sympy_one
+        if other_unit.equivalence:
+            second_check = other_unit.equivalence.dimensions / self.dimensions == sympy_one
+        if first_check or second_check:
             return True
         return (self.dimensions / other_unit.dimensions) == sympy_one
 


https://bitbucket.org/yt_analysis/yt/commits/14d40b63423c/
Changeset:   14d40b63423c
Branch:      yt
User:        jzuhone
Date:        2014-09-30 16:25:21+00:00
Summary:     For now, this will only apply to MKS em units.
Affected #:  2 files

diff -r 987bd186c242a23b74864e45dd25fbc1c8a091f7 -r 14d40b63423cea9c1d18be89a9d806fd11c35686 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -213,11 +213,11 @@
     dimensions.time:'s',
     dimensions.temperature:'K',
     dimensions.angle:'radian',
+    dimensions.current_si:'A',
 }
 
 unit_equivalences = {
     "C":("esu", dimensions.charge),
     "T":("gauss", dimensions.magnetic_field),
     "A":("statA", dimensions.current),
-    "beta":("c", dimensions.velocity),
 }
\ No newline at end of file

diff -r 987bd186c242a23b74864e45dd25fbc1c8a091f7 -r 14d40b63423cea9c1d18be89a9d806fd11c35686 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -196,7 +196,6 @@
             dimensions = unit_data[1]
             if len(unit_data) == 3:
                 cgs_offset = unit_data[2]
-            equivalence = cgs_equivalences.get(str(unit_expr),None)
 
         # Create obj with superclass construct.
         obj = Expr.__new__(cls, **assumptions)
@@ -408,18 +407,11 @@
         Create and return dimensionally-equivalent mks units.
 
         """
-        if self.equivalence is None:
-            units_string = self._get_system_unit_string(mks_base_units)
-            dims = self.dimensions
-            units = self
-        else:
-            units_string = self.equivalence._get_system_unit_string(mks_base_units)
-            dims = self.equivalence.dimensions
-            units = self.equivalence
-        cgs_value = (get_conversion_factor(units, units.get_cgs_equivalent())[0] /
-                     get_conversion_factor(units, Unit(units_string))[0])
+        units_string = self._get_system_unit_string(mks_base_units)
+        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent())[0] /
+                     get_conversion_factor(self, Unit(units_string))[0])
         return Unit(units_string, cgs_value=cgs_value,
-                    dimensions=dims, registry=self.registry)
+                    dimensions=self.dimensions, registry=self.registry)
 
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)


https://bitbucket.org/yt_analysis/yt/commits/abc3dc70e3fb/
Changeset:   abc3dc70e3fb
Branch:      yt
User:        jzuhone
Date:        2014-09-30 17:32:37+00:00
Summary:     Better handling of mks conversions. Bring back the beta equivalence. Make electromagnetic dimensions distinctly cgs or mks.
Affected #:  3 files

diff -r 14d40b63423cea9c1d18be89a9d806fd11c35686 -r abc3dc70e3fb5f4e3290cf1ae9fc829d1359f125 yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -19,10 +19,10 @@
 time = Symbol("(time)", positive=True)
 temperature = Symbol("(temperature)", positive=True)
 angle = Symbol("(angle)", positive=True)
-current_si = Symbol("(current_si)", positive=True)
+current_mks = Symbol("(current_mks)", positive=True)
 dimensionless = sympify(1)
 
-base_dimensions = [mass, length, time, temperature, angle, current_si,
+base_dimensions = [mass, length, time, temperature, angle, current_mks,
                    dimensionless]
 
 #
@@ -48,22 +48,22 @@
 specific_flux = flux / rate
 
 # Gaussian electromagnetic units
-charge   = (energy * length)**Rational(1, 2)  # proper 1/2 power
-current  = charge / time
-electric_field = charge / length**2
-magnetic_field = electric_field
+charge_cgs  = (energy * length)**Rational(1, 2)  # proper 1/2 power
+current_cgs = charge_cgs / time
+electric_field_cgs = charge_cgs / length**2
+magnetic_field_cgs = electric_field_cgs
 
 # SI electromagnetic units
-charge_si = current_si * time
-electric_field_si = force / charge_si
-magnetic_field_si = electric_field_si / velocity
+charge_mks = current_mks * time
+electric_field_mks = force / charge_mks
+magnetic_field_mks = electric_field_mks / velocity
 
 solid_angle = angle * angle
 
 derived_dimensions = [rate, velocity, acceleration, jerk, snap, crackle, pop, 
-                      momentum, force, energy, power, charge, electric_field, 
-                      magnetic_field, solid_angle, flux, specific_flux, volume,
-                      area, current, current_si, charge_si, electric_field_si,
-                      magnetic_field_si]
+                      momentum, force, energy, power, charge_cgs, electric_field_cgs,
+                      magnetic_field_cgs, solid_angle, flux, specific_flux, volume,
+                      area, current_cgs, charge_mks, electric_field_mks,
+                      magnetic_field_mks]
 
 dimensions = base_dimensions + derived_dimensions

diff -r 14d40b63423cea9c1d18be89a9d806fd11c35686 -r abc3dc70e3fb5f4e3290cf1ae9fc829d1359f125 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -38,10 +38,10 @@
     # other cgs
     "dyne": (1.0, dimensions.force),
     "erg":  (1.0, dimensions.energy),
-    "esu":  (1.0, dimensions.charge),
-    "gauss": (1.0, dimensions.magnetic_field),
+    "esu":  (1.0, dimensions.charge_cgs),
+    "gauss": (1.0, dimensions.magnetic_field_cgs),
     "degC": (1.0, dimensions.temperature, -273.15),
-    "statA": (1.0, dimensions.current),
+    "statA": (1.0, dimensions.current_cgs),
 
     # some SI
     "m": (1.0e2, dimensions.length),
@@ -49,9 +49,9 @@
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
     "N": (1.0e5, dimensions.force),
-    "C": (0.1*speed_of_light_cm_per_s, dimensions.charge_si),
-    "A": (0.1*speed_of_light_cm_per_s, dimensions.current_si),
-    "T": (1.0e4, dimensions.magnetic_field_si),
+    "C": (0.1*speed_of_light_cm_per_s, dimensions.charge_mks),
+    "A": (0.1*speed_of_light_cm_per_s, dimensions.current_mks),
+    "T": (1.0e4, dimensions.magnetic_field_mks),
 
     # Imperial units
     "ft": (30.48, dimensions.length),
@@ -116,8 +116,8 @@
     "sr": (1.0, dimensions.solid_angle),
     "rad": (1.0, dimensions.solid_angle),
     "deg": (np.pi/180., dimensions.angle),
-    "Fr":  (1.0, dimensions.charge),
-    "G": (1.0, dimensions.magnetic_field),
+    "Fr":  (1.0, dimensions.charge_cgs),
+    "G": (1.0, dimensions.magnetic_field_cgs),
     "d": (1.0, dimensions.time),
     "Angstrom": (cm_per_ang, dimensions.length),
 
@@ -126,7 +126,7 @@
     "l_pl": (planck_length, dimensions.length),
     "t_pl": (planck_time, dimensions.time),
     "T_pl": (planck_temperature, dimensions.temperature),
-    "q_pl": (planck_charge, dimensions.charge),
+    "q_pl": (planck_charge, dimensions.charge_cgs),
     "E_pl": (planck_energy, dimensions.energy),
 
 }
@@ -213,11 +213,12 @@
     dimensions.time:'s',
     dimensions.temperature:'K',
     dimensions.angle:'radian',
-    dimensions.current_si:'A',
+    dimensions.current_mks:'A',
 }
 
 unit_equivalences = {
-    "C":("esu", dimensions.charge),
-    "T":("gauss", dimensions.magnetic_field),
-    "A":("statA", dimensions.current),
+    "C":("mks","esu"),
+    "T":("mks","gauss"),
+    "A":("mks","statA"),
+    "beta":(None,"c"),
 }
\ No newline at end of file

diff -r 14d40b63423cea9c1d18be89a9d806fd11c35686 -r abc3dc70e3fb5f4e3290cf1ae9fc829d1359f125 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -116,7 +116,7 @@
 
     # Extra attributes
     __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
-                 "registry","equivalence"]
+                 "registry","equivalence","is_equiv_mks"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
                 dimensions=None, registry=None, **assumptions):
@@ -209,9 +209,11 @@
         obj.registry = registry
 
         equivalences = []
+        is_mks = []
         for atom in unit_expr.atoms():
             if str(atom) in unit_equivalences:
-                equiv, dims = unit_equivalences[str(atom)]
+                unit_sys, equiv = unit_equivalences[str(atom)]
+                is_mks.append(unit_sys == "mks")
                 equivalences.append((atom, symbols(equiv)))
         if len(equivalences) > 0:
             equivalence = unit_expr.subs(equivalences)
@@ -220,6 +222,7 @@
         else:
             equivalence = None
         obj.equivalence = equivalence
+        obj.is_equiv_mks = all(is_mks)
 
         if unit_key:
             registry.unit_objs[unit_key] = obj
@@ -393,25 +396,22 @@
         Create and return dimensionally-equivalent cgs units.
 
         """
-        if self.equivalence is None:
-            units_string = self._get_system_unit_string(cgs_base_units)
-            dims = self.dimensions
-        else:
-            units_string = self.equivalence._get_system_unit_string(cgs_base_units)
-            dims = self.equivalence.dimensions
+        units = self.equivalence if self.equivalence else self
+        units_string = units._get_system_unit_string(cgs_base_units)
         return Unit(units_string, cgs_value=1.0,
-                    dimensions=dims, registry=self.registry)
+                    dimensions=units.dimensions, registry=self.registry)
 
     def get_mks_equivalent(self):
         """
         Create and return dimensionally-equivalent mks units.
 
         """
-        units_string = self._get_system_unit_string(mks_base_units)
-        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent())[0] /
-                     get_conversion_factor(self, Unit(units_string))[0])
+        units = self.equivalence if self.equivalence and not self.is_equiv_mks else self
+        units_string = units._get_system_unit_string(mks_base_units)
+        cgs_value = (get_conversion_factor(units, units.get_cgs_equivalent())[0] /
+                     get_conversion_factor(units, Unit(units_string))[0])
         return Unit(units_string, cgs_value=cgs_value,
-                    dimensions=self.dimensions, registry=self.registry)
+                    dimensions=units.dimensions, registry=self.registry)
 
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)


https://bitbucket.org/yt_analysis/yt/commits/8aae4c8b973b/
Changeset:   8aae4c8b973b
Branch:      yt
User:        jzuhone
Date:        2014-09-30 18:24:13+00:00
Summary:     Adding mazzotta_weighting field
Affected #:  1 file

diff -r abc3dc70e3fb5f4e3290cf1ae9fc829d1359f125 -r 8aae4c8b973b2b032714405f92641db9f555a239 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -102,6 +102,13 @@
                        function=_xray_emissivity,
                        units="") # add correct units here
 
+    def _mazzotta_weighting(field, data):
+        data["density"]*data["density"]*data["kT"]**-0.25/mh/mh
+
+    registry.add_field((ftype,"mazzotta_weighting"),
+                       function=_mazzotta_weighting,
+                       units="keV**-0.25*cm**-6")
+    
     def _sz_kinetic(field, data):
         scale = 0.88 * sigma_thompson / mh / clight
         vel_axis = data.get_field_parameter("axis")


https://bitbucket.org/yt_analysis/yt/commits/db5749e9fd68/
Changeset:   db5749e9fd68
Branch:      yt
User:        jzuhone
Date:        2014-09-30 18:32:02+00:00
Summary:     Add these for backwards-compatibility
Affected #:  1 file

diff -r 8aae4c8b973b2b032714405f92641db9f555a239 -r db5749e9fd680e6936176f00ed9d05fb76438041 yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -58,6 +58,11 @@
 electric_field_mks = force / charge_mks
 magnetic_field_mks = electric_field_mks / velocity
 
+# Since cgs is our default, I'm adding these shortcuts for backwards-compatibility
+charge = charge_cgs
+electric_field = electric_field_cgs
+magnetic_field = magnetic_field_cgs
+
 solid_angle = angle * angle
 
 derived_dimensions = [rate, velocity, acceleration, jerk, snap, crackle, pop, 


https://bitbucket.org/yt_analysis/yt/commits/1fadbf96e867/
Changeset:   1fadbf96e867
Branch:      yt
User:        jzuhone
Date:        2014-09-30 18:36:02+00:00
Summary:     Oops
Affected #:  1 file

diff -r db5749e9fd680e6936176f00ed9d05fb76438041 -r 1fadbf96e8674b00607b5187ccebf4aaca6d1805 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -103,7 +103,7 @@
                        units="") # add correct units here
 
     def _mazzotta_weighting(field, data):
-        data["density"]*data["density"]*data["kT"]**-0.25/mh/mh
+        return data["density"]*data["density"]*data["kT"]**-0.25/mh/mh
 
     registry.add_field((ftype,"mazzotta_weighting"),
                        function=_mazzotta_weighting,


https://bitbucket.org/yt_analysis/yt/commits/ab979b2846cc/
Changeset:   ab979b2846cc
Branch:      yt
User:        jzuhone
Date:        2014-10-16 21:12:08+00:00
Summary:     Need to find a better way to do this.
Affected #:  1 file

diff -r 1fadbf96e8674b00607b5187ccebf4aaca6d1805 -r ab979b2846cc31710b6487bd301aeb0fa1397656 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -71,7 +71,6 @@
 
     # Velocities
     "c": (speed_of_light_cm_per_s, dimensions.velocity),
-    "beta": (speed_of_light_cm_per_s, dimensions.dimensionless),
 
     # Solar units
     "Msun": (mass_sun_grams, dimensions.mass),
@@ -220,5 +219,4 @@
     "C":("mks","esu"),
     "T":("mks","gauss"),
     "A":("mks","statA"),
-    "beta":(None,"c"),
 }
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/6408072eab62/
Changeset:   6408072eab62
Branch:      yt
User:        jzuhone
Date:        2014-10-16 21:13:23+00:00
Summary:     Allow for unit prefixing.
Affected #:  1 file

diff -r ab979b2846cc31710b6487bd301aeb0fa1397656 -r 6408072eab62317e9c10bcfcc08fcaf2af7154f8 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -211,8 +211,12 @@
         equivalences = []
         is_mks = []
         for atom in unit_expr.atoms():
-            if str(atom) in unit_equivalences:
-                unit_sys, equiv = unit_equivalences[str(atom)]
+            ua = str(atom)
+            possible_prefix = ua[0]
+            if possible_prefix in unit_prefixes:
+                ua = ua[1:]
+            if ua in unit_equivalences:
+                unit_sys, equiv = unit_equivalences[ua]
                 is_mks.append(unit_sys == "mks")
                 equivalences.append((atom, symbols(equiv)))
         if len(equivalences) > 0:


https://bitbucket.org/yt_analysis/yt/commits/75663105d4d8/
Changeset:   75663105d4d8
Branch:      yt
User:        jzuhone
Date:        2014-10-17 06:32:58+00:00
Summary:     Renaming the old equivalences to cg_conversions, adding new equivalencies classes
Affected #:  6 files

diff -r 6408072eab62317e9c10bcfcc08fcaf2af7154f8 -r 75663105d4d8982bc377ef8438fcf7c0f23910be yt/units/equivalencies.py
--- /dev/null
+++ b/yt/units/equivalencies.py
@@ -0,0 +1,81 @@
+"""
+Base dimensions
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import yt.utilities.physical_constants as pc
+from yt.units.dimensions import temperature, mass, energy, length, rate
+from yt.extern.six import add_metaclass
+
+equivalence_registry = {}
+
+class RegisteredEquivalence(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_type_name") and not cls._skip_add:
+            equivalence_registry[cls._type_name] = cls
+
+ at add_metaclass(RegisteredEquivalence)
+class Equivalence(object):
+    _skip_add = False
+
+class ThermalEquivalence(Equivalence):
+    _type_name = "thermal"
+    dims = (temperature,energy,)
+
+    def convert(self, x, new_dims):
+        if new_dims == energy:
+            return pc.kboltz*x
+        elif new_dims == temperature:
+            return x/pc.kboltz
+
+    def __str__(self):
+        return "thermal: temperature <-> energy"
+
+class MassEnergyEquivalence(Equivalence):
+    _type_name = "mass_energy"
+    dims = (mass,energy,)
+
+    def convert(self, x, new_dims):
+        if new_dims == energy:
+            return x*pc.clight*pc.clight
+        elif new_dims == mass:
+            return x/(pc.clight*pc.clight)
+
+    def __str__(self):
+        return "mass_energy: mass <-> energy"
+
+class SpectralEquivalence(Equivalence):
+    _type_name = "spectral"
+    dims = (length,rate,energy,)
+
+    def convert(self, x, new_dims):
+        if new_dims == energy:
+            if x.units.dimensions == length:
+                nu = pc.clight/x
+            elif x.units.dimensions == rate:
+                nu = x
+            return pc.hcgs*nu
+        elif new_dims == length:
+            if x.units.dimensions == rate:
+                return pc.clight/x
+            elif x.units.dimensions == energy:
+                return pc.hcgs*pc.clight/x
+        elif new_dims == rate:
+            if x.units.dimensions == length:
+                return pc.clight/x
+            elif x.units.dimensions == energy:
+                return x/pc.hcgs
+
+    def __str__(self):
+        return "spectral: length <-> rate <-> energy"
+

diff -r 6408072eab62317e9c10bcfcc08fcaf2af7154f8 -r 75663105d4d8982bc377ef8438fcf7c0f23910be yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -99,12 +99,9 @@
     # misc
     "eV": (erg_per_eV, dimensions.energy),
     "amu": (amu_grams, dimensions.mass),
-    "me": (mass_electron_grams, dimensions.mass),
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "counts": (1.0, dimensions.dimensionless),
-    "kB": (boltzmann_constant_erg_per_K,
-           dimensions.energy/dimensions.temperature),
     "photons": (1.0, dimensions.dimensionless),
 
     # for AstroPy compatibility
@@ -215,7 +212,7 @@
     dimensions.current_mks:'A',
 }
 
-unit_equivalences = {
+cgs_conversions = {
     "C":("mks","esu"),
     "T":("mks","gauss"),
     "A":("mks","statA"),

diff -r 6408072eab62317e9c10bcfcc08fcaf2af7154f8 -r 75663105d4d8982bc377ef8438fcf7c0f23910be yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -26,7 +26,7 @@
 from yt.units.unit_lookup_table import \
     latex_symbol_lut, unit_prefixes, \
     prefixable_units, cgs_base_units, \
-    mks_base_units, unit_equivalences
+    mks_base_units, cgs_conversions
 from yt.units.unit_registry import UnitRegistry
 
 import copy
@@ -116,7 +116,7 @@
 
     # Extra attributes
     __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
-                 "registry","equivalence","is_equiv_mks"]
+                 "registry","cgs_conversion","is_mks"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
                 dimensions=None, registry=None, **assumptions):
@@ -208,25 +208,26 @@
         obj.dimensions = dimensions
         obj.registry = registry
 
-        equivalences = []
+        conversions = []
         is_mks = []
         for atom in unit_expr.atoms():
             ua = str(atom)
             possible_prefix = ua[0]
-            if possible_prefix in unit_prefixes:
-                ua = ua[1:]
-            if ua in unit_equivalences:
-                unit_sys, equiv = unit_equivalences[ua]
+            symbol_wo_prefix = ua[1:]
+            if possible_prefix in unit_prefixes and symbol_wo_prefix in prefixable_units:
+                ua = symbol_wo_prefix
+            if ua in cgs_conversions:
+                unit_sys, conv = cgs_conversions[ua]
                 is_mks.append(unit_sys == "mks")
-                equivalences.append((atom, symbols(equiv)))
-        if len(equivalences) > 0:
-            equivalence = unit_expr.subs(equivalences)
-            equivalence = Unit(unit_expr=equivalence, cgs_value=1.0,
+                conversions.append((atom, symbols(conv)))
+        if len(conversions) > 0:
+            conversion = unit_expr.subs(conversions)
+            conversion = Unit(unit_expr=conversion, cgs_value=1.0,
                                dimensions=None, registry=registry)
         else:
-            equivalence = None
-        obj.equivalence = equivalence
-        obj.is_equiv_mks = all(is_mks)
+            conversion = None
+        obj.cgs_conversion = conversion
+        obj.is_mks = all(is_mks)
 
         if unit_key:
             registry.unit_objs[unit_key] = obj
@@ -362,10 +363,10 @@
         """ Test if dimensions are the same. """
         first_check = False
         second_check = False
-        if self.equivalence:
-            first_check = self.equivalence.dimensions / other_unit.dimensions == sympy_one
-        if other_unit.equivalence:
-            second_check = other_unit.equivalence.dimensions / self.dimensions == sympy_one
+        if self.cgs_conversion:
+            first_check = self.cgs_conversion.dimensions / other_unit.dimensions == sympy_one
+        if other_unit.cgs_conversion:
+            second_check = other_unit.cgs_conversion.dimensions / self.dimensions == sympy_one
         if first_check or second_check:
             return True
         return (self.dimensions / other_unit.dimensions) == sympy_one
@@ -400,7 +401,7 @@
         Create and return dimensionally-equivalent cgs units.
 
         """
-        units = self.equivalence if self.equivalence else self
+        units = self.cgs_conversion if self.cgs_conversion else self
         units_string = units._get_system_unit_string(cgs_base_units)
         return Unit(units_string, cgs_value=1.0,
                     dimensions=units.dimensions, registry=self.registry)
@@ -410,7 +411,7 @@
         Create and return dimensionally-equivalent mks units.
 
         """
-        units = self.equivalence if self.equivalence and not self.is_equiv_mks else self
+        units = self.cgs_conversion if self.cgs_conversion and not self.is_mks else self
         units_string = units._get_system_unit_string(mks_base_units)
         cgs_value = (get_conversion_factor(units, units.get_cgs_equivalent())[0] /
                      get_conversion_factor(units, Unit(units_string))[0])

diff -r 6408072eab62317e9c10bcfcc08fcaf2af7154f8 -r 75663105d4d8982bc377ef8438fcf7c0f23910be yt/units/unit_symbols.py
--- a/yt/units/unit_symbols.py
+++ b/yt/units/unit_symbols.py
@@ -140,7 +140,6 @@
 MeV = mega_electron_volt = quan(1.0, "MeV")
 GeV = giga_electron_volt = quan(1.0, "GeV")
 amu = atomic_mass_unit = quan(1.0, "amu")
-me  = electron_mass = quan(1.0, "me")
 angstrom = quan(1.0, "angstrom")
 
 #

diff -r 6408072eab62317e9c10bcfcc08fcaf2af7154f8 -r 75663105d4d8982bc377ef8438fcf7c0f23910be yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -33,10 +33,12 @@
 from yt.units.dimensions import dimensionless
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUnitConversionError, \
-    YTUfuncUnitError, YTIterableUnitCoercionError
+    YTUfuncUnitError, YTIterableUnitCoercionError, \
+    YTInvalidUnitEquivalence
 from numbers import Number as numeric_type
 from yt.utilities.on_demand_imports import _astropy
 from sympy import Rational
+from yt.units.unit_lookup_table import unit_prefixes, prefixable_units
 
 # redefine this here to avoid a circular import from yt.funcs
 def iterable(obj):
@@ -488,6 +490,28 @@
         """
         return self.in_units(self.units.get_mks_equivalent())
 
+    def in_equivalent(self, unit, equiv):
+        from equivalencies import equivalence_registry
+        this_equiv = equivalence_registry[equiv]()
+        possible_prefix = unit[0]
+        symbol_wo_prefix = unit[1:]
+        if possible_prefix in unit_prefixes and symbol_wo_prefix in prefixable_units:
+            u = unit[1:]
+        else:
+            u = unit
+        old_dims = self.units.dimensions
+        new_dims = self.units.registry.lut[u][1]
+        if old_dims in this_equiv.dims and new_dims in this_equiv.dims:
+            return this_equiv.convert(self, new_dims).in_units(unit)
+        else:
+            raise YTInvalidUnitEquivalence(equiv, self.units, unit)
+
+    def list_equivalencies(self):
+        from equivalencies import equivalence_registry
+        for k,v in equivalence_registry.items():
+            if self.units.dimensions in v.dims:
+                print v()
+
     def ndarray_view(self):
         """
         Returns a view into the array, but as an ndarray rather than ytarray.

diff -r 6408072eab62317e9c10bcfcc08fcaf2af7154f8 -r 75663105d4d8982bc377ef8438fcf7c0f23910be yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -415,4 +415,13 @@
         self.filename = filename
 
     def __str__(self):
-        return "A file already exists at %s and clobber=False." % self.filename
\ No newline at end of file
+        return "A file already exists at %s and clobber=False." % self.filename
+
+class YTInvalidUnitEquivalence(Exception):
+    def __init__(self, equiv, unit1, unit2):
+        self.equiv = equiv
+        self.unit1 = unit1
+        self.unit2 = unit2
+
+    def __str__(self):
+        return "The unit equivalence '%s' does not exist for %s and %s." % (self.equiv, self.unit1, self.unit2)
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/959fbed8d384/
Changeset:   959fbed8d384
Branch:      yt
User:        jzuhone
Date:        2014-10-17 14:25:29+00:00
Summary:     Adding SoundSpeed equivalence, docstrings
Affected #:  2 files

diff -r 75663105d4d8982bc377ef8438fcf7c0f23910be -r 959fbed8d38409a8ca597db7ddadff9ba8003d0e yt/units/equivalencies.py
--- a/yt/units/equivalencies.py
+++ b/yt/units/equivalencies.py
@@ -15,6 +15,7 @@
 import yt.utilities.physical_constants as pc
 from yt.units.dimensions import temperature, mass, energy, length, rate
 from yt.extern.six import add_metaclass
+import numpy as np
 
 equivalence_registry = {}
 
@@ -79,3 +80,23 @@
     def __str__(self):
         return "spectral: length <-> rate <-> energy"
 
+class SoundSpeedEquivalence(Equivalence):
+    _type_name = "sound_speed"
+    dims = (velocity,temperature,energy,)
+
+    def convert(self, x, new_dims, mu=0.6, gamma=5./3.):
+        if new_dims == velocity:
+            if x.units.dimensions == temperature:
+                kT = pc.kboltz*x
+            elif x.units.dimensions == energy:
+                kT = x
+            return np.sqrt(gamma*kT/(mu*pc.mh))
+        else:
+            kT = x*x*mu*pc.mh/gamma
+            if new_dims == temperature:
+                return kT/pc.kboltz
+            else:
+                return kT
+
+    def __str__(self):
+        return "sound_speed: velocity <-> temperature <-> energy"

diff -r 75663105d4d8982bc377ef8438fcf7c0f23910be -r 959fbed8d38409a8ca597db7ddadff9ba8003d0e yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -490,7 +490,25 @@
         """
         return self.in_units(self.units.get_mks_equivalent())
 
-    def in_equivalent(self, unit, equiv):
+    def to_equivalent(self, unit, equiv, **kwargs):
+        """
+        Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
+        related by only a constant factor but not in the same units.
+
+        Parameters
+        ----------
+        unit : string
+            The unit that you wish to convert to.
+        equiv : string
+            The equivalence you wish to use. To see which equivalencies are
+            supported for this unitful quantity, try the :method:`list_equivalencies`
+            method.
+
+        Examples
+        --------
+        >>> a = yt.YTArray(1.0e7,"K")
+        >>> a.to_equivalent("keV", "thermal")
+        """
         from equivalencies import equivalence_registry
         this_equiv = equivalence_registry[equiv]()
         possible_prefix = unit[0]
@@ -502,11 +520,15 @@
         old_dims = self.units.dimensions
         new_dims = self.units.registry.lut[u][1]
         if old_dims in this_equiv.dims and new_dims in this_equiv.dims:
-            return this_equiv.convert(self, new_dims).in_units(unit)
+            return this_equiv.convert(self, new_dims, **kwargs).in_units(unit)
         else:
             raise YTInvalidUnitEquivalence(equiv, self.units, unit)
 
     def list_equivalencies(self):
+        """
+        Lists the possible equivalencies associated with this YTArray or
+        YTQuantity.
+        """
         from equivalencies import equivalence_registry
         for k,v in equivalence_registry.items():
             if self.units.dimensions in v.dims:


https://bitbucket.org/yt_analysis/yt/commits/1be337877891/
Changeset:   1be337877891
Branch:      yt
User:        jzuhone
Date:        2014-10-17 14:34:42+00:00
Summary:     Need velocity units
Affected #:  1 file

diff -r 959fbed8d38409a8ca597db7ddadff9ba8003d0e -r 1be33787789130f502a38a2a467765db9ca42c3c yt/units/equivalencies.py
--- a/yt/units/equivalencies.py
+++ b/yt/units/equivalencies.py
@@ -1,6 +1,5 @@
 """
-Base dimensions
-
+Equivalencies between different kinds of units
 
 """
 
@@ -13,7 +12,8 @@
 #-----------------------------------------------------------------------------
 
 import yt.utilities.physical_constants as pc
-from yt.units.dimensions import temperature, mass, energy, length, rate
+from yt.units.dimensions import temperature, mass, energy, length, rate, \
+    velocity
 from yt.extern.six import add_metaclass
 import numpy as np
 


https://bitbucket.org/yt_analysis/yt/commits/7d2d0228ed8f/
Changeset:   7d2d0228ed8f
Branch:      yt
User:        jzuhone
Date:        2014-10-17 14:34:48+00:00
Summary:     Typo
Affected #:  1 file

diff -r 1be33787789130f502a38a2a467765db9ca42c3c -r 7d2d0228ed8f610cf0af974ca7059cbaa5117502 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -461,7 +461,7 @@
             return ratio, ratio*old_units.cgs_offset - new_units.cgs_offset
         else:
             raise InvalidUnitOperation(
-                "Fahrenheit and Celsius are not absoulte temperature scales "
+                "Fahrenheit and Celsius are not absolute temperature scales "
                 "and cannot be used in compound unit symbols.")
 
 #


https://bitbucket.org/yt_analysis/yt/commits/740382c2e577/
Changeset:   740382c2e577
Branch:      yt
User:        jzuhone
Date:        2014-10-17 14:35:00+00:00
Summary:     Make this more general
Affected #:  1 file

diff -r 7d2d0228ed8f610cf0af974ca7059cbaa5117502 -r 740382c2e577f783fe53782a16c5db6310bdeaf6 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -518,7 +518,7 @@
         else:
             u = unit
         old_dims = self.units.dimensions
-        new_dims = self.units.registry.lut[u][1]
+        new_dims = YTQuantity(1.0, unit, registry=self.units.registry).units.dimensions
         if old_dims in this_equiv.dims and new_dims in this_equiv.dims:
             return this_equiv.convert(self, new_dims, **kwargs).in_units(unit)
         else:


https://bitbucket.org/yt_analysis/yt/commits/da8fa976bab4/
Changeset:   da8fa976bab4
Branch:      yt
User:        jzuhone
Date:        2014-10-17 14:36:26+00:00
Summary:     Making this a bit more descriptive
Affected #:  1 file

diff -r 740382c2e577f783fe53782a16c5db6310bdeaf6 -r da8fa976bab492b6a08bfec9cd70772e886d1606 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -424,4 +424,6 @@
         self.unit2 = unit2
 
     def __str__(self):
-        return "The unit equivalence '%s' does not exist for %s and %s." % (self.equiv, self.unit1, self.unit2)
\ No newline at end of file
+        return "The unit equivalence '%s' does not exist for the units '%s' and '%s.'" % (self.equiv,
+                                                                                          self.unit1,
+                                                                                          self.unit2)
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/d8cec28facf9/
Changeset:   d8cec28facf9
Branch:      yt
User:        jzuhone
Date:        2014-10-17 14:43:03+00:00
Summary:     This isn't necessary anymore.
Affected #:  1 file

diff -r da8fa976bab492b6a08bfec9cd70772e886d1606 -r d8cec28facf99304c8f7417dec9d367d622d5afa yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -511,12 +511,6 @@
         """
         from equivalencies import equivalence_registry
         this_equiv = equivalence_registry[equiv]()
-        possible_prefix = unit[0]
-        symbol_wo_prefix = unit[1:]
-        if possible_prefix in unit_prefixes and symbol_wo_prefix in prefixable_units:
-            u = unit[1:]
-        else:
-            u = unit
         old_dims = self.units.dimensions
         new_dims = YTQuantity(1.0, unit, registry=self.units.registry).units.dimensions
         if old_dims in this_equiv.dims and new_dims in this_equiv.dims:


https://bitbucket.org/yt_analysis/yt/commits/3205ee643de5/
Changeset:   3205ee643de5
Branch:      yt
User:        jzuhone
Date:        2014-10-21 00:30:25+00:00
Summary:     Lorentz equivalence
Affected #:  1 file

diff -r d8cec28facf99304c8f7417dec9d367d622d5afa -r 3205ee643de591dbdbbf7b1d6e1cbe5225d47acb yt/units/equivalencies.py
--- a/yt/units/equivalencies.py
+++ b/yt/units/equivalencies.py
@@ -13,7 +13,7 @@
 
 import yt.utilities.physical_constants as pc
 from yt.units.dimensions import temperature, mass, energy, length, rate, \
-    velocity
+    velocity, dimensionless
 from yt.extern.six import add_metaclass
 import numpy as np
 
@@ -100,3 +100,18 @@
 
     def __str__(self):
         return "sound_speed: velocity <-> temperature <-> energy"
+
+class LorentzEquivalence(Equivalence):
+    _type_name = "lorentz"
+    dims = (dimensionless,velocity,)
+
+    def convert(self, x, new_dims):
+        if new_dims == dimensionless:
+            beta = x.in_cgs()/pc.clight
+            return 1./np.sqrt(1.-beta**2)
+        elif new_dims == velocity:
+            return pc.clight*np.sqrt(1.-1./(x*x))
+
+    def __str__(self):
+        return "lorentz: velocity <-> dimensionless"
+


https://bitbucket.org/yt_analysis/yt/commits/b5e0bfd7ab83/
Changeset:   b5e0bfd7ab83
Branch:      yt
User:        jzuhone
Date:        2014-10-21 00:31:30+00:00
Summary:     Conversion and equivalence tests. Fixed a couple of bugs I introduced in the temperature tests.
Affected #:  1 file

diff -r 3205ee643de591dbdbbf7b1d6e1cbe5225d47acb -r b5e0bfd7ab83642769d5903312c549a961ebb857 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -473,7 +473,7 @@
     balmy_C = YTQuantity(26.85, 'degC')
     balmy_R = YTQuantity(540, 'R')
 
-    assert_array_almost_equal(balmy.in_units('defF'), balmy_F)
+    assert_array_almost_equal(balmy.in_units('degF'), balmy_F)
     assert_array_almost_equal(balmy.in_units('degC'), balmy_C)
     assert_array_almost_equal(balmy.in_units('R'), balmy_R)
 
@@ -491,7 +491,7 @@
     yield assert_true, balmy_view.base is balmy.base
     yield assert_array_almost_equal, np.array(balmy), np.array(balmy_R)
 
-    balmy.convert_to_units('F')
+    balmy.convert_to_units('degF')
     yield assert_true, balmy_view.base is balmy.base
     yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
 
@@ -856,3 +856,73 @@
 
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
+
+def test_cgs_conversions():
+    from yt.utilities.physical_constants import qp, eps_0, clight
+    from yt.units.dimensions import current_mks, time
+
+    qp_mks = qp.in_units("C")
+    yield assert_equal, qp_mks.units.dimensions, current_mks*time
+    yield assert_array_almost_equal, qp_mks.in_cgs(), qp
+
+    K = 1.0/(4*np.pi*eps_0)
+    yield assert_array_almost_equal, K.in_cgs(), 1.0
+
+    B = YTQuantity(1.0,"T")
+    yield assert_array_almost_equal, B.in_units("gauss"), YTQuantity(1.0e4, "gauss")
+
+    I = YTQuantity(1.0,"A")
+    yield assert_array_almost_equal, I.in_units("statA"), YTQuantity(0.1*clight, "statA")
+
+def test_equivalencies():
+    from yt.utilities.physical_constants import clight, mp, kboltz, hcgs, mh
+
+    # Mass-energy
+
+    E = mp.to_equivalent("keV","mass_energy")
+    yield assert_equal, E, mp*clight*clight
+    yield assert_array_almost_equal, mp, E.to_equivalent("g", "mass_energy")
+
+    # Thermal
+
+    T = YTQuantity(1.0e8,"K")
+    E = T.to_equivalent("W*hr","thermal")
+    yield assert_equal, E, (kboltz*T).in_units("W*hr")
+    yield assert_array_almost_equal, T, E.to_equivalent("K", "thermal")
+
+    # Spectral
+
+    l = YTQuantity(4000.,"angstrom")
+    nu = l.to_equivalent("Hz","spectral")
+    yield assert_equal, nu, clight/l
+    E = hcgs*nu
+    l2 = E.to_equivalent("angstrom", "spectral")
+    yield assert_array_almost_equal, l, l2
+    nu2 = clight/l2.in_units("cm")
+    yield assert_array_almost_equal, nu, nu2
+    E2 = nu2.to_equivalent("keV", "spectral")
+    yield assert_array_almost_equal, E2, E.in_units("keV")
+
+    # Sound-speed
+
+    mu = 0.6
+    gg = 5./3.
+    c_s = T.to_equivalent("km/s","sound_speed")
+    yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
+    yield assert_array_almost_equal, T, c_s.to_equivalent("K","sound_speed")
+
+    mu = 0.5
+    gg = 4./3.
+    c_s = T.to_equivalent("km/s","sound_speed", mu=mu, gamma=gg)
+    yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
+    yield assert_array_almost_equal, T, c_s.to_equivalent("K","sound_speed",
+                                                          mu=mu, gamma=gg)
+
+    # Lorentz
+
+    v = 0.8*clight
+    g = v.to_equivalent("dimensionless","lorentz")
+    g2 = YTQuantity(1./np.sqrt(1.-0.8*0.8), "dimensionless")
+    yield assert_array_almost_equal, g, g2
+    v2 = g2.to_equivalent("mile/hr", "lorentz")
+    yield assert_array_almost_equal, v2, v.in_units("mile/hr")


https://bitbucket.org/yt_analysis/yt/commits/92f35c77e40b/
Changeset:   92f35c77e40b
Branch:      yt
User:        jzuhone
Date:        2014-10-21 00:45:33+00:00
Summary:     Docs for unit equivalencies.
Affected #:  1 file

diff -r b5e0bfd7ab83642769d5903312c549a961ebb857 -r 92f35c77e40b190e19920f7574d1f364bcaea8fc doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2faff88abc93fe2bc9d91467db786a8b69ec3ece6783a7055942ecc7c47a0817"
+  "signature": "sha256:6d22af645775ae666db3df46f89f4a257ddedcb2b646576cececbf99db0e7e03"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -36,7 +36,7 @@
      "input": [
       "import yt\n",
       "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
-      "          \n",
+      "\n",
       "dd = ds.all_data()\n",
       "maxval, maxloc = ds.find_max('density')\n",
       "\n",
@@ -302,6 +302,126 @@
      "level": 3,
      "metadata": {},
      "source": [
+      "Unit Equivalencies"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://www.astropy.org)). The possible unit equivalencies are:\n",
+      "\n",
+      "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n",
+      "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",
+      "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n",
+      "* `\"sound_speed\"`: conversions between temperature and sound speed ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
+      "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "A `YTArray` or `YTQuantity` can be converted to an equivalent using `to_equivalent`, where the unit and the equivalence name are provided as arguments:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd[\"temperature\"].to_equivalent(\"erg\", \"thermal\")\n",
+      "print dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\")\n",
+      "\n",
+      "# Rest energy of the proton\n",
+      "from yt.utilities.physical_constants import mp\n",
+      "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
+      "print E_p"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Equivalencies can go in both directions, without any information required other than the unit you want to convert to:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import clight\n",
+      "v = 0.1*clight\n",
+      "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
+      "print g\n",
+      "print g.to_equivalent(\"c\", \"lorentz\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since the sound speed also depends on the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$, these may be optionally supplied to the `to_equivalent` call (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\", gamma=4./3., mu=0.5)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If a certain equivalence does not exist for a particular unit, then an error will be thrown:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.exceptions import YTInvalidUnitEquivalence\n",
+      "try:\n",
+      "    x = v.to_equivalent(\"angstrom\", \"spectral\")\n",
+      "except YTInvalidUnitEquivalence, e:\n",
+      "    print e"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To list the equivalencies available for a given `YTArray` or `YTQuantity`, use the `list_equivalencies` method:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "E_p.list_equivalencies()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
       "Round-Trip Conversions to and from AstroPy's Units System"
      ]
     },


https://bitbucket.org/yt_analysis/yt/commits/ee3f88b7ca51/
Changeset:   ee3f88b7ca51
Branch:      yt
User:        jzuhone
Date:        2014-10-21 04:41:03+00:00
Summary:     Docs on cgs conversions
Affected #:  2 files

diff -r 92f35c77e40b190e19920f7574d1f364bcaea8fc -r ee3f88b7ca513559817e5465bdc53f17c1c2bfbb doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:6d22af645775ae666db3df46f89f4a257ddedcb2b646576cececbf99db0e7e03"
+  "signature": "sha256:60fd867cad7a689b3b2642f1076a7618bf13842f8585e68db83060961f72f18e"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -222,6 +222,69 @@
      "level": 3,
      "metadata": {},
      "source": [
+      "Electrostatic/Electromagnetic Units"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Electromagnetic units can be a bit tricky, because the units for such quantities in different unit systems can have entirely different dimensions, even if they are meant to represent the same physical quantities. For example, in the SI system of units, current in Amperes is a fundamental unit of measure, so the unit of charge \"coulomb\" is equal to one ampere-second. On the other hand, in the Gaussian/CGS system, there is no equivalent base electromagnetic unit, and the electrostatic charge unit \"esu\" is equal to one $\\mathrm{cm^{3/2}g^{-1/2}s^{-1}}$ (which does not have any apparent physical significance). `yt` recognizes this difference:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "q1 = yt.YTArray(1.0,\"C\") # Coulombs\n",
+      "q2 = yt.YTArray(1.0,\"esu\") # electrostatic units / statcoulomb\n",
+      "\n",
+      "print \"units =\", q1.in_mks().units, \", dims =\", q1.units.dimensions\n",
+      "print \"units =\", q2.in_cgs().units, \", dims =\", q2.units.dimensions"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Under the hood, the `yt` units system has a translation layer that converts between these two systems, without any further effort required. For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import qp\n",
+      "\n",
+      "print qp\n",
+      "qp_C = qp.in_units(\"C\")\n",
+      "print qp_C"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The electromagnetic unit translations `yt` understands are:\n",
+      "\n",
+      "* Charge: 1 coulomb (C) $\\leftrightarrow$ 0.1c electrostatic unit (esu, Fr)\n",
+      "* Current: 1 ampere (A, C/s) $\\leftrightarrow$ 0.1c statampere (statA, esu/s, Fr) \n",
+      "* Magnetic Field: 1 tesla (T) $\\leftrightarrow 10^4$ gauss (G)\n",
+      "\n",
+      "where \"Fr\" is the franklin, an alternative name for the electrostatic unit, and c is the speed of light. "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
       "Working with views and converting to ndarray"
      ]
     },
@@ -391,9 +454,10 @@
      "collapsed": false,
      "input": [
       "from yt.utilities.exceptions import YTInvalidUnitEquivalence\n",
+      "\n",
       "try:\n",
       "    x = v.to_equivalent(\"angstrom\", \"spectral\")\n",
-      "except YTInvalidUnitEquivalence, e:\n",
+      "except YTInvalidUnitEquivalence as e:\n",
       "    print e"
      ],
      "language": "python",
@@ -444,10 +508,9 @@
      "collapsed": false,
      "input": [
       "from astropy import units as u\n",
-      "from yt import YTQuantity, YTArray\n",
       "\n",
       "x = 42.0 * u.meter\n",
-      "y = YTQuantity.from_astropy(x) "
+      "y = yt.YTQuantity.from_astropy(x) "
      ],
      "language": "python",
      "metadata": {},
@@ -469,7 +532,7 @@
      "collapsed": false,
      "input": [
       "a = np.random.random(size=10) * u.km/u.s\n",
-      "b = YTArray.from_astropy(a)"
+      "b = yt.YTArray.from_astropy(a)"
      ],
      "language": "python",
      "metadata": {},
@@ -556,7 +619,7 @@
      "collapsed": false,
      "input": [
       "k1 = kboltz.to_astropy()\n",
-      "k2 = YTQuantity.from_astropy(kb)\n",
+      "k2 = yt.YTQuantity.from_astropy(kb)\n",
       "print k1 == k2"
      ],
      "language": "python",
@@ -567,7 +630,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "c = YTArray.from_astropy(a)\n",
+      "c = yt.YTArray.from_astropy(a)\n",
       "d = c.to_astropy()\n",
       "print a == d"
      ],

diff -r 92f35c77e40b190e19920f7574d1f364bcaea8fc -r ee3f88b7ca513559817e5465bdc53f17c1c2bfbb yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -868,10 +868,10 @@
     K = 1.0/(4*np.pi*eps_0)
     yield assert_array_almost_equal, K.in_cgs(), 1.0
 
-    B = YTQuantity(1.0,"T")
+    B = YTQuantity(1.0, "T")
     yield assert_array_almost_equal, B.in_units("gauss"), YTQuantity(1.0e4, "gauss")
 
-    I = YTQuantity(1.0,"A")
+    I = YTQuantity(1.0, "A")
     yield assert_array_almost_equal, I.in_units("statA"), YTQuantity(0.1*clight, "statA")
 
 def test_equivalencies():


https://bitbucket.org/yt_analysis/yt/commits/021c2d730c17/
Changeset:   021c2d730c17
Branch:      yt
User:        jzuhone
Date:        2014-10-21 04:43:41+00:00
Summary:     Fix link
Affected #:  1 file

diff -r ee3f88b7ca513559817e5465bdc53f17c1c2bfbb -r 021c2d730c17ce3a5d98d64900a3029614a2fa65 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:60fd867cad7a689b3b2642f1076a7618bf13842f8585e68db83060961f72f18e"
+  "signature": "sha256:1e75c69b70dcfd31489cb9e891de015e1dd7c892e7697b5d65553358af2ce7a3"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -372,7 +372,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://www.astropy.org)). The possible unit equivalencies are:\n",
+      "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://docs.astropy.org/en/latest/units/equivalencies.html)). The possible unit equivalencies are:\n",
       "\n",
       "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n",
       "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",


https://bitbucket.org/yt_analysis/yt/commits/2923cb1471ad/
Changeset:   2923cb1471ad
Branch:      yt
User:        jzuhone
Date:        2014-10-24 17:46:20+00:00
Summary:     Schwarzschild and Compton equivalencies
Affected #:  3 files

diff -r 021c2d730c17ce3a5d98d64900a3029614a2fa65 -r 2923cb1471ad70197d31a19839ff9b450eda490a doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:1e75c69b70dcfd31489cb9e891de015e1dd7c892e7697b5d65553358af2ce7a3"
+  "signature": "sha256:873ef1e917aa6f7916369be75a37f3e9422340ea71a65b79193ce763de2eadaa"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -378,7 +378,9 @@
       "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",
       "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n",
       "* `\"sound_speed\"`: conversions between temperature and sound speed ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
-      "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)"
+      "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)\n",
+      "* `\"schwarzschild\"`: conversions between mass and Schwarzschild radius ($R_S = 2GM/c^2$)\n",
+      "* `\"compton\"`: conversions between mass and Compton wavelength ($\\lambda = h/mc$)"
      ]
     },
     {

diff -r 021c2d730c17ce3a5d98d64900a3029614a2fa65 -r 2923cb1471ad70197d31a19839ff9b450eda490a yt/units/equivalencies.py
--- a/yt/units/equivalencies.py
+++ b/yt/units/equivalencies.py
@@ -115,3 +115,26 @@
     def __str__(self):
         return "lorentz: velocity <-> dimensionless"
 
+class SchwarzschildEquivalence(Equivalence):
+    _type_name = "schwarzschild"
+    dims = (mass,length,)
+
+    def convert(self, x, new_dims):
+        if new_dims == length:
+            return 2.*pc.G*x/(pc.clight*pc.clight)
+        elif new_dims == mass:
+            return 0.5*x*pc.clight*pc.clight/pc.G
+
+    def __str__(self):
+        return "schwarzschild: mass <-> length"
+
+class ComptonEquivalence(Equivalence):
+    _type_name = "compton"
+    dims = (mass,length,)
+
+    def convert(self, x, new_dims):
+        return pc.hcgs/(x*pc.clight)
+
+    def __str__(self):
+        return "compton: mass <-> length"
+

diff -r 021c2d730c17ce3a5d98d64900a3029614a2fa65 -r 2923cb1471ad70197d31a19839ff9b450eda490a yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -875,7 +875,8 @@
     yield assert_array_almost_equal, I.in_units("statA"), YTQuantity(0.1*clight, "statA")
 
 def test_equivalencies():
-    from yt.utilities.physical_constants import clight, mp, kboltz, hcgs, mh
+    from yt.utilities.physical_constants import clight, mp, kboltz, hcgs, mh, me, \
+        mass_sun_cgs, G
 
     # Mass-energy
 
@@ -926,3 +927,15 @@
     yield assert_array_almost_equal, g, g2
     v2 = g2.to_equivalent("mile/hr", "lorentz")
     yield assert_array_almost_equal, v2, v.in_units("mile/hr")
+
+    # Schwarzschild
+
+    R = mass_sun_cgs.to_equivalent("kpc","schwarzschild")
+    yield assert_equal, R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight)
+    yield assert_array_almost_equal, mass_sun_cgs, R.to_equivalent("g", "schwarzschild")
+
+    # Compton
+
+    l = me.to_equivalent("angstrom","compton")
+    yield assert_equal, l, hcgs/(me*clight)
+    yield assert_array_almost_equal, mp, l.to_equivalent("g", "compton")


https://bitbucket.org/yt_analysis/yt/commits/81d9c9336cd1/
Changeset:   81d9c9336cd1
Branch:      yt
User:        jzuhone
Date:        2014-10-24 18:35:26+00:00
Summary:     Merge
Affected #:  221 files

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -186,9 +186,20 @@
     this_f = getattr(frontends_module, frontend)
     field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
     dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
+
     if frontend == "sph":
         field_info_names = \
           ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    elif frontend == "boxlib":
+        field_info_names = []
+        for d in dataset_names:
+            if "Maestro" in d:  
+                field_info_names.append("MaestroFieldInfo")
+            elif "Castro" in d: 
+                field_info_names.append("CastroFieldInfo")
+            else: 
+                field_info_names.append("BoxlibFieldInfo")
+
     for dset_name, fi_name in zip(dataset_names, field_info_names):
         fi = getattr(this_f, fi_name)
         nfields = 0

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -16,7 +16,7 @@
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
+BRANCH="yt" # This is the branch to which we will forcibly update.
 
 if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
@@ -500,13 +500,28 @@
     fi
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
+    BUILD_ARGS=""
+    case $LIB in
+        *h5py*)
+            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            ;;
+        *numpy*)
+            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            then
+                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                                                 import numpy; print SV(numpy.__version__) < SV("1.8.0")')
+                if [ $VER == "True" ]
+                then
+                    echo "Removing previous NumPy instance (see issue #889)"
+                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                fi
+            fi
+            ;;
+        *)
+            ;;
+    esac
     cd $LIB
-    if [ ! -z `echo $LIB | grep h5py` ]
-    then
-	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    else
-        ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    fi
+    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
@@ -580,56 +595,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +666,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -729,7 +741,7 @@
         cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
-		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -932,7 +944,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY
@@ -975,8 +986,11 @@
 
 if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
-    echo "Installing pure-python readline"
-    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
+    if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+    then
+        echo "Installing pure-python readline"
+        ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}
+    fi
 fi
 
 if [ $INST_ENZO -eq 1 ]
@@ -1026,7 +1040,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/bootcamp/"
+    echo "    http://yt-project.org/doc/quickstart/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -39,6 +39,13 @@
         padding-top: 10px;
         padding-bottom: 10px;
     }
+    /* since 3.1.0 */
+    .navbar-collapse.collapse.in { 
+        display: block!important;
+    }
+    .collapsing {
+        overflow: hidden!important;
+    }
 }
 
 /* 

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -291,7 +291,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], method=\"sum\")\n",
       "prj.set_log(\"density\", True)\n",
       "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
       "prj.show()"
@@ -304,4 +304,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -72,6 +72,8 @@
 * Quantities
 * Callbacks
 
+A list of all available filters, quantities, and callbacks can be found in 
+:ref:`halo_analysis_ref`.  
 All interaction with this analysis can be performed by importing from 
 halo_analysis.
 
@@ -129,7 +131,14 @@
 are center_of_mass and bulk_velocity. Their definitions are available in 
 ``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
 your quantity may be of use to the general community, add it to 
-``halo_quantities.py`` and issue a pull request.
+``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
+
+* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
+* ``particle_mass`` -- Mass of halo
+* ``particle_position_x`` -- Location of halo
+* ``particle_position_y`` -- Location of halo
+* ``particle_position_z`` -- Location of halo
+* ``virial_radius`` -- Virial radius of halo
 
 An example of adding a quantity:
 
@@ -154,6 +163,18 @@
    # ... Later on in your script
    hc.add_quantity("my_quantity") 
 
+This quantity will then be accessible for functions called later via the 
+*quantities* dictionary that is associated with the halo object.
+
+.. code-block:: python
+
+   def my_new_function(halo):
+       print halo.quantities["my_quantity"]
+   add_callback("print_quantity", my_new_function)
+
+   # ... Anywhere after "my_quantity" has been called
+   hc.add_callback("print_quantity")
+
 Callbacks
 ^^^^^^^^^
 
@@ -171,10 +192,10 @@
    hc.add_callback("sphere", factor=2.0)
     
 Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``. New callbacks may 
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
 be added by using the syntax shown below. If you think that your 
 callback may be of use to the general community, add it to 
-halo_callbacks.py and issue a pull request
+halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
 

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -75,7 +75,8 @@
   mass. In simulations where the highest-resolution particles all have the 
   same mass (ie: zoom-in grid based simulations), one can set up a particle
   filter to select the lowest mass particles and perform the halo finding
-  only on those.
+  only on those.  See the this cookbook recipe for an example: 
+  :ref:`cookbook-rockstar-nested-grid`.
 
 To run the Rockstar Halo finding, you must launch python with MPI and 
 parallelization enabled. While Rockstar itself does not require MPI to run, 

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -225,12 +225,12 @@
 
 **Projection** 
     | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`
-    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, style="integrate", field_parameters=None)``
+    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, method="integrate", field_parameters=None)``
     | A 2D projection of a 3D volume along one of the axis directions.  
       By default, this is a line integral through the entire simulation volume 
       (although it can be a subset of that volume specified by a data object
       with the ``data_source`` keyword).  Alternatively, one can specify 
-      a weight_field and different ``style`` values to change the nature
+      a weight_field and different ``method`` values to change the nature
       of the projection outcome.  See :ref:`projection-types` for more information.
 
 **Streamline** 
@@ -263,7 +263,7 @@
 
    ds = load("my_data")
    sp = ds.sphere('c', (10, 'kpc'))
-   print ad.quantities.angular_momentum_vector()
+   print sp.quantities.angular_momentum_vector()
 
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d705a81671d5692ed6691b3402115edbe9c98af815af5bb160ddf551bf02c76"
+  "signature": "sha256:427da1e1d02deb543246218dc8cce991268b518b25cfdd5944a4a436695f874b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -40,11 +40,13 @@
      "source": [
       "We will filter these into young stars and old stars by masking on the ('Stars', 'creation_time') field. \n",
       "\n",
-      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The second argument is a yt data container and is usually the only one used in a filter definition.\n",
+      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The first argument is a `ParticleFilter` object that contains metadata about the filter its self.  The second argument is a yt data container.\n",
       "\n",
-      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages.\n",
+      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages. \n",
       "\n",
-      "Old stars either formed dynamically in the simulation (ages greater than 5 Myr) or were present in the initial conditions (negative ages)."
+      "Conversely, let's define \"old\" stars as those stars formed dynamically in the simulation with ages greater than 5 Myr.  We also include stars with negative ages, since these stars were included in the simulation initial conditions.\n",
+      "\n",
+      "We make use of `pfilter.filtered_type` so that the filter definition will use the same particle type as the one specified in the call to `add_particle_filter` below.  This makes the filter definition usable for arbitrary particle types.  Since we're only filtering the `\"Stars\"` particle type in this example, we could have also replaced `pfilter.filtered_type` with `\"Stars\"` and gotten the same result."
      ]
     },
     {
@@ -52,12 +54,12 @@
      "collapsed": false,
      "input": [
       "def young_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_and(age.in_units('Myr') <= 5, age >= 0)\n",
       "    return filter\n",
       "\n",
       "def old_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_or(age.in_units('Myr') >= 5, age < 0)\n",
       "    return filter"
      ],
@@ -140,4 +142,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8ba193cc3867e2185133bbf3952bd5834e6c63993208635c71cf55fa6f27b491"
+  "signature": "sha256:67eb4b2a3d1017bac09209ebc939e8c1fe154660fa15f76862019dfc8652ec32"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -305,9 +305,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Overriding Code Unit Definitions"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "On occasion, you might have a dataset for a supported frontend that does not have the conversions to code units accessible (for example, Athena data) or you may want to change them outright. `yt` provides a mechanism so that one may provide their own code unit definitions to `load`, which override the default rules for a given frontend for defining code units. This is provided through the `units_override` dictionary. We'll use an example of an Athena dataset. First, a call to `load` without `units_override`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds1 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\")\n",
+      "print ds1.length_unit\n",
+      "print ds1.mass_unit\n",
+      "print ds1.time_unit\n",
+      "sp1 = ds1.sphere(\"c\",(0.1,\"unitary\"))\n",
+      "print sp1[\"density\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This is a galaxy cluster dataset, so it is not likely that the units of density are correct. We happen to know that the unit definitions are different, so we can override the units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "units_override = {\"length_unit\":(1.0,\"Mpc\"),\n",
+      "                  \"time_unit\":(1.0,\"Myr\"),\n",
+      "                  \"mass_unit\":(1.0e14,\"Msun\")}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`units_override` can take the following keys:\n",
+      "\n",
+      "* `length_unit`\n",
+      "* `time_unit`\n",
+      "* `mass_unit`\n",
+      "* `magnetic_unit`\n",
+      "* `temperature_unit`\n",
+      "\n",
+      "and the associated values can be (value, unit) tuples, `YTQuantities`, or floats (in the latter case they are assumed to have the corresponding cgs unit). "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds2 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)\n",
+      "print ds2.length_unit\n",
+      "print ds2.mass_unit\n",
+      "print ds2.time_unit\n",
+      "sp2 = ds2.sphere(\"c\",(0.1,\"unitary\"))\n",
+      "print sp2[\"density\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This option should be used very carefully, and *only* if you know that the dataset does not provide units or that the unit definitions generated are incorrect for some reason. "
+     ]
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -37,7 +37,7 @@
 .. note::
 
    The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
+   http://yt-project.org/data.  See :ref:`quickstart-introduction` for more
    details.
 
 Let us know if you would like to contribute other example notebooks, or have

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/bootcamp/1)_Introduction.ipynb
--- a/doc/source/bootcamp/1)_Introduction.ipynb
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:39620670ce7751b23f30d2123fd3598de1c7843331f65de13e29f4ae9f759e0f"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Welcome to the yt bootcamp!\n",
-      "\n",
-      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
-      "\n",
-      "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook.  The documentation exists at http://yt-project.org/doc/.  If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.\n",
-      "\n",
-      "## Acquiring the datasets for this tutorial\n",
-      "\n",
-      "If you are executing these tutorials interactively, you need some sample datasets on which to run the code.  You can download these datasets at http://yt-project.org/data/.  The datasets necessary for each lesson are noted next to the corresponding tutorial.\n",
-      "\n",
-      "## What's Next?\n",
-      "\n",
-      "The Notebooks are meant to be explored in this order:\n",
-      "\n",
-      "1. Introduction\n",
-      "2. Data Inspection (IsolatedGalaxy dataset)\n",
-      "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n",
-      "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n",
-      "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
-      "6. Volume Rendering (IsolatedGalaxy dataset)"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "download_datasets = False\n",
-      "if download_datasets:\n",
-      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
-      "    print \"Got enzo_tiny_cosmology\"\n",
-      "    !tar xf enzo_tiny_cosmology.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
-      "    print \"Got Enzo_64\"\n",
-      "    !tar xf Enzo_64.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
-      "    print \"Got IsolatedGalaxy\"\n",
-      "    !tar xf IsolatedGalaxy.tar\n",
-      "    \n",
-      "    print \"All done!\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ /dev/null
@@ -1,384 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a8fe78715c1f3900c37c675d84320fe65f0ba8734abba60fd12e74d957e5d8ee"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Starting Out and Loading Data\n",
-      "\n",
-      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now that we've loaded yt, we can load up some data.  Let's load the `IsolatedGalaxy` dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Fields and Facts\n",
-      "\n",
-      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"index\" it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.print_stats()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also tell you the fields it found on disk:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, all of the fields it thinks it knows how to generate:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.derived_field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also transparently generate fields.  However, we encourage you to examine exactly what yt is doing when it generates those fields.  To see, you can ask for the source of a given field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.field_info[\"gas\", \"vorticity_x\"].get_source()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt stores information about the domain of the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also convert this into various units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width.in_units(\"kpc\")\n",
-      "print ds.domain_width.in_units(\"au\")\n",
-      "print ds.domain_width.in_units(\"mile\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Mesh Structure\n",
-      "\n",
-      "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grid_left_edge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The index (`ds.index` here) has an attribute `grids` which is all of the grid objects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grids[1]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g = ds.index.grids[1]\n",
-      "print g"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Grids have dimensions, extents, level, and even a list of Child grids."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.ActiveDimensions"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.LeftEdge, g.RightEdge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Level"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Children"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Advanced Grid Inspection\n",
-      "\n",
-      "If we want to examine grids only at a given level, we can!  Not only that, but we can load data and take a look at various fields.\n",
-      "\n",
-      "*This section can be skipped!*"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "gs = ds.index.select_grids(ds.index.max_level)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g2 = gs[0]\n",
-      "print g2\n",
-      "print g2.Parent\n",
-      "print g2.get_global_startindex()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print g2[\"density\"][:,:,0]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print (g2.Parent.child_mask == 0).sum() * 8\n",
-      "print g2.ActiveDimensions.prod()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "for f in ds.field_list:\n",
-      "    fv = g[f]\n",
-      "    if fv.size == 0: continue\n",
-      "    print f, fv.min(), fv.max()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Examining Data in Regions\n",
-      "\n",
-      "yt provides data object selectors.  In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it.  yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n",
-      "\n",
-      "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10, 'kpc'))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can calculate a bunch of bulk quantities.  Here's that list, but there's a list in the docs, too!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Let's look at the total mass.  This is how you call a given quantity.  yt calls these \"Derived Quantities\".  We'll talk about a few in a later notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.total_mass()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ /dev/null
@@ -1,275 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Simple Visualizations of Data\n",
-      "\n",
-      "Just like in our first notebook, we have to load yt and then some data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "For this notebook, we'll load up a cosmology dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "print \"Redshift =\", ds.current_redshift"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the terms that yt uses, a projection is a line integral through the domain.  This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned.  Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you.  However, we also provide a simple method of creating Projections and plotting them in a single step.  This is called a Plot Window, here specifically known as a `ProjectionPlot`.  One thing to note is that in yt, we project all the way through the entire domain at a single time.  This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n",
-      "\n",
-      "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly.  The cookbook in the documentation includes detailed examples of this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
-      "\n",
-      "Now we'll zoom and pan a bit."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(2.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((0.1, 0.0))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((-0.25, -0.5))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(0.1)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we specify multiple fields, each time we call `show` we get multiple plots back.  Same for `save`!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the colormap on a field-by-field basis."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.set_cmap(\"temperature\", \"hot\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, we can re-center the plot on different locations.  One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "v, c = ds.find_max(\"density\")\n",
-      "p.set_center((c[0], c[1]))\n",
-      "p.zoom(10)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
-      "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
-      "s.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the logging of various fields:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.set_log(\"velocity_magnitude\", True)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides many different annotations for your plots.  You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here.  We'll annotate with velocity arrows."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.annotate_velocity()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Contours can also be overlaid:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
-      "s.annotate_contour(\"temperature\")\n",
-      "s.zoom(2.5)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, we can save out to the file system."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.save()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ /dev/null
@@ -1,382 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a46e1baa90d32045c2b524100f28bad41b3665249612c9a275ee0375a6f4be20"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Data Objects and Time Series Data\n",
-      "\n",
-      "Just like before, we will load up yt.  Since we'll be using pylab to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from matplotlib import pylab\n",
-      "from yt.analysis_modules.halo_finding.api import HaloFinder"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Time Series Data\n",
-      "\n",
-      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `DatasetSeries` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
-      "\n",
-      "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ts = yt.DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 1: Simple Time Series\n",
-      "\n",
-      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for ds in ts` where `ds` means \"Dataset\" and `ts` is the \"Time Series\" we just loaded up.  For each dataset, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the `extrema` Derived Quantity, and append the min and max to our extrema outputs."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "rho_ex = []\n",
-      "times = []\n",
-      "for ds in ts:\n",
-      "    dd = ds.all_data()\n",
-      "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(ds.current_time.in_units(\"Gyr\"))\n",
-      "rho_ex = np.array(rho_ex)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the minimum and the maximum:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n",
-      "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n",
-      "pylab.ylabel(\"Density ($g/cm^3$)\")\n",
-      "pylab.xlabel(\"Time (Gyr)\")\n",
-      "pylab.legend()\n",
-      "pylab.ylim(1e-32, 1e-21)\n",
-      "pylab.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 2: Advanced Time Series\n",
-      "\n",
-      "Let's do something a bit different.  Let's calculate the total mass inside halos and outside halos.\n",
-      "\n",
-      "This actually touches a lot of different pieces of machinery in yt.  For every dataset, we will run the halo finder HOP.  Then, we calculate the total mass in the domain.  Then, for each halo, we calculate the sum of the baryon mass in that halo.  We'll keep running tallies of these two things."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units import Msun\n",
-      "\n",
-      "mass = []\n",
-      "zs = []\n",
-      "for ds in ts:\n",
-      "    halos = HaloFinder(ds)\n",
-      "    dd = ds.all_data()\n",
-      "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    total_in_baryons = 0.0*Msun\n",
-      "    for halo in halos:\n",
-      "        sp = halo.get_sphere()\n",
-      "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    mass.append(total_in_baryons/total_mass)\n",
-      "    zs.append(ds.current_redshift)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now let's plot them!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogx(zs, mass, '-xb')\n",
-      "pylab.xlabel(\"Redshift\")\n",
-      "pylab.ylabel(\"Mass in halos / Total mass\")\n",
-      "pylab.xlim(max(zs), min(zs))\n",
-      "pylab.ylim(-0.01, .18)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Data Objects\n",
-      "\n",
-      "Time series data have many applications, but most of them rely on examining the underlying data in some way.  Below, we'll see how to use and manipulate data objects.\n",
-      "\n",
-      "### Ray Queries\n",
-      "\n",
-      "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
-      "\n",
-      "To create a ray, we specify the start and end points.\n",
-      "\n",
-      "Note that we need to convert these arrays to numpy arrays due to a bug in matplotlib 1.3.1."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
-      "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"dts\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"t\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"x\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Slice Queries\n",
-      "\n",
-      "While slices are often used for visualization, they can be useful for other operations as well.  yt regards slices as multi-resolution objects.  They are an array of cells that are not all the same size; it only returns the cells at the highest resolution that it intersects.  (This is true for all yt data objects.)  Slices and projections have the special fields `px`, `py`, `pdx` and `pdy`, which correspond to the coordinates and half-widths in the pixel plane."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "v, c = ds.find_max(\"density\")\n",
-      "sl = ds.slice(0, c[0])\n",
-      "print sl[\"index\", \"x\"]\n",
-      "print sl[\"index\", \"z\"]\n",
-      "print sl[\"pdx\"]\n",
-      "print sl[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to do something interesting with a `Slice`, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "frb = sl.to_frb((50.0, 'kpc'), 1024)\n",
-      "print frb[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of `density`, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "yt.write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
-      "from IPython.display import Image\n",
-      "Image(filename = \"temp.png\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Off-Axis Slices\n",
-      "\n",
-      "yt provides not only slices, but off-axis slices that are sometimes called \"cutting planes.\"  These are specified by (in order) a normal vector and a center.  Here we've set the normal vector to `[0.2, 0.3, 0.5]` and the center to be the point of maximum density.\n",
-      "\n",
-      "We can then turn these directly into plot windows using `to_pw`.  Note that the `to_pw` and `to_frb` methods are available on slices, off-axis slices, and projections, and can be used on any of them."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
-      "pw = cp.to_pw(fields = [(\"gas\", \"density\")])"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Once we have our plot window from our cutting plane, we can show it here."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pw.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can, as noted above, do the same with our slice:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pws = sl.to_pw(fields=[\"density\"])\n",
-      "#pws.show()\n",
-      "print pws.plots.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Covering Grids\n",
-      "\n",
-      "If we want to access a 3D array of data that spans multiple resolutions in our simulation, we can use a covering grid.  This will return a 3D array of data, drawing from up to the resolution level specified when creating the data.  For example, if you create a covering grid that spans two child grids of a single parent grid, it will fill those zones covered by a zone of a child grid with the data from that child grid.  Where it is covered only by the parent grid, the cells from the parent grid will be duplicated (appropriately) to fill the covering grid.\n",
-      "\n",
-      "There are two different types of covering grids: unsmoothed and smoothed.  Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc.  This will help to reduce edge effects.  Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n",
-      "\n",
-      "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2.  We can then ask for the Density field, which will be a 3D array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cg = ds.covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print cg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In this example, we do exactly the same thing: except we ask for a *smoothed* covering grid, which will reduce edge effects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "scg = ds.smoothed_covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print scg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 2923cb1471ad70197d31a19839ff9b450eda490a -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ /dev/null
@@ -1,254 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Derived Fields and Profiles\n",
-      "\n",
-      "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk.  This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used.  Additionally, you can create them by just writing python functions."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from yt import derived_field\n",
-      "from matplotlib import pylab"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Derived Fields\n",
-      "\n",
-      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `dinosaurs` and our units are `K*cm/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "@derived_field(name = \"dinosaurs\", units = \"K * cm/s\")\n",
-      "def _dinos(field, data):\n",
-      "    return data[\"temperature\"] * data[\"velocity_magnitude\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One important thing to note is that derived fields must be defined *before* any datasets are loaded.  Let's load up our data and take a look at some quantities."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "dd = ds.all_data()\n",
-      "print dd.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `extrema` quantity -- the exact same way that we would for density, temperature, and so on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.extrema(\"dinosaurs\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can do the same for the average quantities as well."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.weighted_average_quantity(\"dinosaurs\", weight=\"temperature\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## A Few Other Quantities\n",
-      "\n",
-      "We can ask other quantities of our data, as well.  For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema.  All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10.0, 'kpc'))\n",
-      "bv = sp.quantities.bulk_velocity()\n",
-      "L = sp.quantities.angular_momentum_vector()\n",
-      "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
-      "print bv\n",
-      "print L\n",
-      "print rho_min, rho_max"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Profiles\n",
-      "\n",
-      "yt provides the ability to bin in 1, 2 and 3 dimensions.  This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n",
-      "\n",
-      "We do this using the objects `Profile1D`, `Profile2D`, and `Profile3D`.  The first two are the most common since they are the easiest to visualize.\n",
-      "\n",
-      "This first set of commands manually creates a profile object the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `temperature` and (previously-defined) `dinosaurs`.  We then plot it in a loglog plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
-      "prof.add_fields([\"temperature\",\"dinosaurs\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Temperature $(K)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the `dinosaurs` field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Dinosaurs $(K cm / s)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to see the total mass in every bin, we profile the `cell_mass` field with no weight.  Specifying `weight=None` will simply take the total value in every bin and add that up."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
-      "prof.add_fields([\"cell_mass\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Cell mass $(M_\\odot)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class.  Let's redo the last plot using `ProfilePlot`"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n",
-      "prof.set_unit('cell_mass', 'Msun')\n",
-      "prof.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Field Parameters\n",
-      "\n",
-      "Field parameters are a method of passing information to derived fields.  For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation.  yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off.  Here we show how that works:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp_small = ds.sphere(\"max\", (50.0, 'kpc'))\n",
-      "bv = sp_small.quantities.bulk_velocity()\n",
-      "\n",
-      "sp = ds.sphere(\"max\", (0.1, 'Mpc'))\n",
-      "rv1 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "sp.clear_data()\n",
-      "sp.set_field_parameter(\"bulk_velocity\", bv)\n",
-      "rv2 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "print bv\n",
-      "print rv1\n",
-      "print rv2"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/244477624661/
Changeset:   244477624661
Branch:      yt
User:        jzuhone
Date:        2014-10-27 14:23:45+00:00
Summary:     Putting this back in
Affected #:  1 file

diff -r 81d9c9336cd142cd69f183ee8f5e3f0c73e179c9 -r 244477624661ffcfa7a9c5b0a9bf974128fb2b1f yt/units/unit_symbols.py
--- a/yt/units/unit_symbols.py
+++ b/yt/units/unit_symbols.py
@@ -141,6 +141,7 @@
 GeV = giga_electron_volt = quan(1.0, "GeV")
 amu = atomic_mass_unit = quan(1.0, "amu")
 angstrom = quan(1.0, "angstrom")
+me  = electron_mass = quan(1.0, "me")
 
 #
 # Angle units


https://bitbucket.org/yt_analysis/yt/commits/2fe72d656b88/
Changeset:   2fe72d656b88
Branch:      yt
User:        jzuhone
Date:        2014-10-27 14:40:25+00:00
Summary:     Making the code in here more clear
Affected #:  1 file

diff -r 244477624661ffcfa7a9c5b0a9bf974128fb2b1f -r 2fe72d656b886d3848bd318d987fb625fe295e8e yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -116,7 +116,7 @@
 
     # Extra attributes
     __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
-                 "registry","cgs_conversion","is_mks"]
+                 "registry", "cgs_conversion", "is_mks"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
                 dimensions=None, registry=None, **assumptions):
@@ -401,7 +401,10 @@
         Create and return dimensionally-equivalent cgs units.
 
         """
-        units = self.cgs_conversion if self.cgs_conversion else self
+        if self.cgs_conversion:
+            units = self.cgs_conversion
+        else:
+            units = self
         units_string = units._get_system_unit_string(cgs_base_units)
         return Unit(units_string, cgs_value=1.0,
                     dimensions=units.dimensions, registry=self.registry)
@@ -411,7 +414,10 @@
         Create and return dimensionally-equivalent mks units.
 
         """
-        units = self.cgs_conversion if self.cgs_conversion and not self.is_mks else self
+        if self.cgs_conversion and not self.is_mks:
+            units = self.cgs_conversion
+        else:
+            units = self
         units_string = units._get_system_unit_string(mks_base_units)
         cgs_value = (get_conversion_factor(units, units.get_cgs_equivalent())[0] /
                      get_conversion_factor(units, Unit(units_string))[0])


https://bitbucket.org/yt_analysis/yt/commits/a6c5a8c2e5df/
Changeset:   a6c5a8c2e5df
Branch:      yt
User:        jzuhone
Date:        2014-10-27 14:41:31+00:00
Summary:     Changing the names of these variables
Affected #:  3 files

diff -r 2fe72d656b886d3848bd318d987fb625fe295e8e -r a6c5a8c2e5dfe91a608234951c09406c2db89b18 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -20,8 +20,8 @@
     metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \
     cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \
     boltzmann_constant_erg_per_K, kelvin_per_rankine, \
-    speed_of_light_cm_per_s, planck_length, planck_charge, \
-    planck_energy, planck_mass, planck_temperature, planck_time
+    speed_of_light_cm_per_s, planck_length_cm, planck_charge_esu, \
+    planck_energy_erg, planck_mass_grams, planck_temperature_K, planck_time_s
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -118,12 +118,12 @@
     "Angstrom": (cm_per_ang, dimensions.length),
 
     # Planck units
-    "m_pl": (planck_mass, dimensions.mass),
-    "l_pl": (planck_length, dimensions.length),
-    "t_pl": (planck_time, dimensions.time),
-    "T_pl": (planck_temperature, dimensions.temperature),
-    "q_pl": (planck_charge, dimensions.charge_cgs),
-    "E_pl": (planck_energy, dimensions.energy),
+    "m_pl": (planck_mass_grams, dimensions.mass),
+    "l_pl": (planck_length_cm, dimensions.length),
+    "t_pl": (planck_time_s, dimensions.time),
+    "T_pl": (planck_temperature_K, dimensions.temperature),
+    "q_pl": (planck_charge_esu, dimensions.charge_cgs),
+    "E_pl": (planck_energy_erg, dimensions.energy),
 
 }
 

diff -r 2fe72d656b886d3848bd318d987fb625fe295e8e -r a6c5a8c2e5dfe91a608234951c09406c2db89b18 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -54,12 +54,12 @@
 Na = 1 / amu_cgs
 
 #Planck units
-m_pl = YTQuantity(planck_mass, "g")
-l_pl = YTQuantity(planck_length, "cm")
-t_pl = YTQuantity(planck_time, "s")
-E_pl = YTQuantity(planck_energy, "erg")
-q_pl = YTQuantity(planck_charge, "esu")
-T_pl = YTQuantity(planck_temperature, "K")
+m_pl = planck_mass = YTQuantity(planck_mass_grams, "g")
+l_pl = planck_length = YTQuantity(planck_length_cm, "cm")
+t_pl = planck_time = YTQuantity(planck_time_s, "s")
+E_pl = planck_energy = YTQuantity(planck_energy_erg, "erg")
+q_pl = planck_charge = YTQuantity(planck_charge_esu, "esu")
+T_pl = planck_temperature = YTQuantity(planck_temperature_K, "K")
 
 mu_0 = YTQuantity(4.0e-7*pi, "N/A**2")
 eps_0 = (1.0/(clight.in_mks()**2*mu_0)).in_units("C**2/N/m**2")

diff -r 2fe72d656b886d3848bd318d987fb625fe295e8e -r a6c5a8c2e5dfe91a608234951c09406c2db89b18 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -111,9 +111,9 @@
 TINY = 1.0e-40
 
 # Planck units
-planck_mass = 2.17650925245e-05
-planck_length = 1.6161992557e-33
-planck_time = planck_length / speed_of_light_cm_per_s
-planck_energy = planck_mass * speed_of_light_cm_per_s * speed_of_light_cm_per_s
-planck_temperature = planck_energy / boltzmann_constant_erg_per_K
-planck_charge = 5.62274532302e-09
+planck_mass_grams = 2.17650925245e-05
+planck_length_cm = 1.6161992557e-33
+planck_time_s = planck_length_cm / speed_of_light_cm_per_s
+planck_energy_erg = planck_mass_grams * speed_of_light_cm_per_s * speed_of_light_cm_per_s
+planck_temperature_K = planck_energy_erg / boltzmann_constant_erg_per_K
+planck_charge_esu = 5.62274532302e-09


https://bitbucket.org/yt_analysis/yt/commits/2fe559f093e8/
Changeset:   2fe559f093e8
Branch:      yt
User:        jzuhone
Date:        2014-10-27 15:36:09+00:00
Summary:     Adding "G" to the prefixable_units table.
Affected #:  1 file

diff -r a6c5a8c2e5dfe91a608234951c09406c2db89b18 -r 2fe559f093e86a27681dbc3f41a3dd7b23b48e3f yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -188,6 +188,7 @@
     "Hz",
     "W",
     "gauss",
+    "G",
     "Jy",
     "N",
     "T",


https://bitbucket.org/yt_analysis/yt/commits/a1bb0f4f0383/
Changeset:   a1bb0f4f0383
Branch:      yt
User:        jzuhone
Date:        2014-10-27 20:11:01+00:00
Summary:     Number density and density dimensions
Affected #:  1 file

diff -r 2fe559f093e86a27681dbc3f41a3dd7b23b48e3f -r a1bb0f4f0383f734a05f596fe065b18c981e1f20 yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -46,6 +46,8 @@
 power    = energy / time
 flux     = power / area
 specific_flux = flux / rate
+number_density = 1/(length*length*length)
+density = mass * number_density
 
 # Gaussian electromagnetic units
 charge_cgs  = (energy * length)**Rational(1, 2)  # proper 1/2 power
@@ -58,7 +60,7 @@
 electric_field_mks = force / charge_mks
 magnetic_field_mks = electric_field_mks / velocity
 
-# Since cgs is our default, I'm adding these shortcuts for backwards-compatibility
+# Since cgs is our default, I'm adding these aliases for backwards-compatibility
 charge = charge_cgs
 electric_field = electric_field_cgs
 magnetic_field = magnetic_field_cgs


https://bitbucket.org/yt_analysis/yt/commits/6a873c08b1f2/
Changeset:   6a873c08b1f2
Branch:      yt
User:        jzuhone
Date:        2014-10-28 05:56:57+00:00
Summary:     Adding this back in
Affected #:  1 file

diff -r a1bb0f4f0383f734a05f596fe065b18c981e1f20 -r 6a873c08b1f2701af5b9538b00725c3f67eb335b yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -103,6 +103,7 @@
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "counts": (1.0, dimensions.dimensionless),
     "photons": (1.0, dimensions.dimensionless),
+    "me": (mass_electron_grams, dimensions.mass),
 
     # for AstroPy compatibility
     "solMass": (mass_sun_grams, dimensions.mass),


https://bitbucket.org/yt_analysis/yt/commits/4f594476a7bb/
Changeset:   4f594476a7bb
Branch:      yt
User:        jzuhone
Date:        2014-10-28 05:57:16+00:00
Summary:     Number density equivalence
Affected #:  1 file

diff -r 6a873c08b1f2701af5b9538b00725c3f67eb335b -r 4f594476a7bb600c695e9f33b0cd105c3d1e27dc yt/units/equivalencies.py
--- a/yt/units/equivalencies.py
+++ b/yt/units/equivalencies.py
@@ -13,7 +13,7 @@
 
 import yt.utilities.physical_constants as pc
 from yt.units.dimensions import temperature, mass, energy, length, rate, \
-    velocity, dimensionless
+    velocity, dimensionless, density, number_density
 from yt.extern.six import add_metaclass
 import numpy as np
 
@@ -29,6 +29,16 @@
 class Equivalence(object):
     _skip_add = False
 
+class NumberDensityEquivalence(Equivalence):
+    _type_name = "number_density"
+    dims = (density,number_density,)
+
+    def convert(self, x, new_dims, mu=0.6):
+        if new_dims == number_density:
+            return x/(mu*pc.mh)
+        elif new_dims == density:
+            return x*mu*pc.mh
+
 class ThermalEquivalence(Equivalence):
     _type_name = "thermal"
     dims = (temperature,energy,)
@@ -99,7 +109,7 @@
                 return kT
 
     def __str__(self):
-        return "sound_speed: velocity <-> temperature <-> energy"
+        return "sound_speed (ideal gas): velocity <-> temperature <-> energy"
 
 class LorentzEquivalence(Equivalence):
     _type_name = "lorentz"


https://bitbucket.org/yt_analysis/yt/commits/b2d0c9271d5e/
Changeset:   b2d0c9271d5e
Branch:      yt
User:        jzuhone
Date:        2014-10-28 05:57:25+00:00
Summary:     elementary_charge alias
Affected #:  1 file

diff -r 4f594476a7bb600c695e9f33b0cd105c3d1e27dc -r b2d0c9271d5e643007a4fbd2633c3e137780616b yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -15,6 +15,7 @@
 
 # Charge
 charge_proton_cgs = YTQuantity(4.8032056e-10, 'esu')
+elementary_charge = charge_proton_cgs
 
 # Physical Constants
 boltzmann_constant_cgs = YTQuantity(boltzmann_constant_erg_per_K, 'erg/K')


https://bitbucket.org/yt_analysis/yt/commits/3ae60b972813/
Changeset:   3ae60b972813
Branch:      yt
User:        jzuhone
Date:        2014-10-28 05:57:58+00:00
Summary:     Giving unit equivalencies their own notebook in the docs
Affected #:  4 files

diff -r b2d0c9271d5e643007a4fbd2633c3e137780616b -r 3ae60b97281378dba58b60415a6924d586ded905 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:873ef1e917aa6f7916369be75a37f3e9422340ea71a65b79193ce763de2eadaa"
+  "signature": "sha256:c7cfb2db456d127bb633b7eee7ad6fe14290aa622ac62694c7840d80137afaba"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -257,11 +257,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.utilities.physical_constants import qp\n",
+      "from yt.utilities.physical_constants import elementary_charge\n",
       "\n",
-      "print qp\n",
-      "qp_C = qp.in_units(\"C\")\n",
-      "print qp_C"
+      "print elementary_charge\n",
+      "elementary_charge_C = elementary_charge.in_units(\"C\")\n",
+      "print elementary_charge_C"
      ],
      "language": "python",
      "metadata": {},
@@ -365,129 +365,6 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "Unit Equivalencies"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://docs.astropy.org/en/latest/units/equivalencies.html)). The possible unit equivalencies are:\n",
-      "\n",
-      "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n",
-      "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",
-      "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n",
-      "* `\"sound_speed\"`: conversions between temperature and sound speed ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
-      "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)\n",
-      "* `\"schwarzschild\"`: conversions between mass and Schwarzschild radius ($R_S = 2GM/c^2$)\n",
-      "* `\"compton\"`: conversions between mass and Compton wavelength ($\\lambda = h/mc$)"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "A `YTArray` or `YTQuantity` can be converted to an equivalent using `to_equivalent`, where the unit and the equivalence name are provided as arguments:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd[\"temperature\"].to_equivalent(\"erg\", \"thermal\")\n",
-      "print dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\")\n",
-      "\n",
-      "# Rest energy of the proton\n",
-      "from yt.utilities.physical_constants import mp\n",
-      "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
-      "print E_p"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Equivalencies can go in both directions, without any information required other than the unit you want to convert to:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.utilities.physical_constants import clight\n",
-      "v = 0.1*clight\n",
-      "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
-      "print g\n",
-      "print g.to_equivalent(\"c\", \"lorentz\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Since the sound speed also depends on the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$, these may be optionally supplied to the `to_equivalent` call (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\", gamma=4./3., mu=0.5)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If a certain equivalence does not exist for a particular unit, then an error will be thrown:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.utilities.exceptions import YTInvalidUnitEquivalence\n",
-      "\n",
-      "try:\n",
-      "    x = v.to_equivalent(\"angstrom\", \"spectral\")\n",
-      "except YTInvalidUnitEquivalence as e:\n",
-      "    print e"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To list the equivalencies available for a given `YTArray` or `YTQuantity`, use the `list_equivalencies` method:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "E_p.list_equivalencies()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
       "Round-Trip Conversions to and from AstroPy's Units System"
      ]
     },

diff -r b2d0c9271d5e643007a4fbd2633c3e137780616b -r 3ae60b97281378dba58b60415a6924d586ded905 doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- /dev/null
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -0,0 +1,132 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:540508ad9cef9e81722ee4a6d7ed148df117fde2467b22dcf856cb6a4c01b00f"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://docs.astropy.org/en/latest/units/equivalencies.html)). The possible unit equivalencies are:\n",
+      "\n",
+      "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n",
+      "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",
+      "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n",
+      "* `\"sound_speed\"`: conversions between temperature and sound speed for an ideal gas ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
+      "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)\n",
+      "* `\"schwarzschild\"`: conversions between mass and Schwarzschild radius ($R_S = 2GM/c^2$)\n",
+      "* `\"compton\"`: conversions between mass and Compton wavelength ($\\lambda = h/mc$)\n",
+      "\n",
+      "A `YTArray` or `YTQuantity` can be converted to an equivalent using `to_equivalent`, where the unit and the equivalence name are provided as arguments:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "import numpy as np\n",
+      "\n",
+      "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
+      "\n",
+      "dd = ds.all_data()\n",
+      "\n",
+      "print dd[\"temperature\"].to_equivalent(\"erg\", \"thermal\")\n",
+      "print dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\")\n",
+      "\n",
+      "# Rest energy of the proton\n",
+      "from yt.utilities.physical_constants import mp\n",
+      "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
+      "print E_p"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Equivalencies can go in both directions, without any information required other than the unit you want to convert to:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import clight\n",
+      "v = 0.1*clight\n",
+      "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
+      "print g\n",
+      "print g.to_equivalent(\"c\", \"lorentz\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since the sound speed for an ideal gas also depends on the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$, these may be optionally supplied to the `to_equivalent` call (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\", gamma=4./3., mu=0.5)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If a certain equivalence does not exist for a particular unit, then an error will be thrown:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.exceptions import YTInvalidUnitEquivalence\n",
+      "\n",
+      "try:\n",
+      "    x = v.to_equivalent(\"angstrom\", \"spectral\")\n",
+      "except YTInvalidUnitEquivalence as e:\n",
+      "    print e"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To list the equivalencies available for a given `YTArray` or `YTQuantity`, use the `list_equivalencies` method:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "E_p.list_equivalencies()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r b2d0c9271d5e643007a4fbd2633c3e137780616b -r 3ae60b97281378dba58b60415a6924d586ded905 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -33,6 +33,7 @@
    comoving_units_and_code_units
    comparing_units_from_different_datasets
    units_and_plotting
+   unit_equivalencies
 
 .. note::
 

diff -r b2d0c9271d5e643007a4fbd2633c3e137780616b -r 3ae60b97281378dba58b60415a6924d586ded905 doc/source/analyzing/units/unit_equivalencies.rst
--- /dev/null
+++ b/doc/source/analyzing/units/unit_equivalencies.rst
@@ -0,0 +1,7 @@
+.. _symbolic_units:
+
+Symbolic units: :code:`yt.units`
+================================
+
+.. notebook:: 6)_Unit_Equivalencies.ipynb
+   :skip_exceptions:


https://bitbucket.org/yt_analysis/yt/commits/ffce974e84c9/
Changeset:   ffce974e84c9
Branch:      yt
User:        jzuhone
Date:        2014-10-28 06:12:05+00:00
Summary:     Merge
Affected #:  46 files

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -52,7 +52,7 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
    hc.create()
-   ad = hc.all_data()
+   ad = hc.halos_ds.all_data()
    masses = ad['particle_mass'][:]
 
 

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/cookbook/gadget_notebook.rst
--- /dev/null
+++ b/doc/source/cookbook/gadget_notebook.rst
@@ -0,0 +1,7 @@
+.. _gadget-notebook:
+
+Using yt to view and analyze Gadget outputs
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. notebook:: yt_gadget_analysis.ipynb
+

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/cookbook/tipsy_and_yt.ipynb
--- a/doc/source/cookbook/tipsy_and_yt.ipynb
+++ b/doc/source/cookbook/tipsy_and_yt.ipynb
@@ -1,7 +1,16 @@
 {
  "metadata": {
+  "kernelspec": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "display_name": "IPython (Python 2)",
+   "language": "python",
+   "name": "python2"
+  },
   "name": "",
-  "signature": "sha256:2ae8b1599fa35495fa1bb8deb1c67094e3529e70093b30e20354122cd9403d9d"
+  "signature": "sha256:1f6e5cf50123ad75676f035a2a36cd60f4987832462907b9cb78cb25548d8afd"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -10,14 +19,6 @@
    "cells": [
     {
      "cell_type": "heading",
-     "level": 1,
-     "metadata": {},
-     "source": [
-      "Using yt to view and analyze Tipsy outputs from Gasoline"
-     ]
-    },
-    {
-     "cell_type": "heading",
      "level": 2,
      "metadata": {},
      "source": [
@@ -193,4 +194,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/cookbook/yt_gadget_analysis.ipynb
--- /dev/null
+++ b/doc/source/cookbook/yt_gadget_analysis.ipynb
@@ -0,0 +1,263 @@
+{
+ "metadata": {
+  "kernelspec": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "display_name": "IPython (Python 2)",
+   "language": "python",
+   "name": "python2"
+  },
+  "name": "",
+  "signature": "sha256:42e2b7cc4c70a501432f24bc0d62d0723605d50196399148dd365d28387dd55d"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Loading the data"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First we set up our imports:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "import numpy as np\n",
+      "import yt.units as units\n",
+      "import pylab"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First we load the data set, specifying both the unit length/mass/velocity, as well as the size of the bounding box (which should encapsulate all the particles in the data set)\n",
+      "\n",
+      "At the end, we flatten the data into \"ad\" in case we want access to the raw simulation data"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      ">This dataset is available for download at http://yt-project.org/data/GadgetDiskGalaxy.tar.gz (430 MB)."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fname = 'GadgetDiskGalaxy/snapshot_200.hdf5'\n",
+      "\n",
+      "unit_base = {'UnitLength_in_cm'         : 3.08568e+21,\n",
+      "             'UnitMass_in_g'            :   1.989e+43,\n",
+      "             'UnitVelocity_in_cm_per_s' :      100000}\n",
+      "\n",
+      "bbox_lim = 1e5 #kpc\n",
+      "\n",
+      "bbox = [[-bbox_lim,bbox_lim],\n",
+      "        [-bbox_lim,bbox_lim],\n",
+      "        [-bbox_lim,bbox_lim]]\n",
+      " \n",
+      "ds = yt.load(fname,unit_base=unit_base,bounding_box=bbox)\n",
+      "ds.index\n",
+      "ad= ds.all_data()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Let's make a projection plot to look at the entire volume"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "px = yt.ProjectionPlot(ds, 'x', ('gas', 'density'))\n",
+      "px.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Let's print some quantities about the domain, as well as the physical properties of the simulation\n"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print 'left edge: ',ds.domain_left_edge\n",
+      "print 'right edge: ',ds.domain_right_edge\n",
+      "print 'center: ',ds.domain_center"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also see the fields that are available to query in the dataset"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sorted(ds.field_list)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Let's create a data object that represents the full simulation domain, and find the total mass in gas and dark matter particles contained in it:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad = ds.all_data()\n",
+      "\n",
+      "# total_mass returns a list, representing the total gas and dark matter + stellar mass, respectively\n",
+      "print [tm.in_units('Msun') for tm in ad.quantities.total_mass()]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now let's say we want to zoom in on the box (since clearly the bounding we chose initially is much larger than the volume containing the gas particles!), and center on wherever the highest gas density peak is.  First, let's find this peak:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "density = ad[(\"PartType0\",\"density\")]\n",
+      "wdens = np.where(density == np.max(density))\n",
+      "coordinates = ad[(\"PartType0\",\"Coordinates\")]\n",
+      "center = coordinates[wdens][0]\n",
+      "print 'center = ',center"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Set up the box to zoom into"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_box_size = ds.quan(250,'code_length')\n",
+      "\n",
+      "left_edge = center - new_box_size/2\n",
+      "right_edge = center + new_box_size/2\n",
+      "\n",
+      "print new_box_size.in_units('Mpc')\n",
+      "print left_edge.in_units('Mpc')\n",
+      "print right_edge.in_units('Mpc')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad2= ds.region(center=center, left_edge=left_edge, right_edge=right_edge)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Using this new data object, let's confirm that we're only looking at a subset of the domain by first calculating thte total mass in gas and particles contained in the subvolume:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print [tm.in_units('Msun') for tm in ad.quantities.total_mass()]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "And then by visualizing what the new zoomed region looks like"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "px = yt.ProjectionPlot(ds, 'x', ('gas', 'density'), center=center, width=new_box_size)\n",
+      "px.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Cool - there's a disk galaxy there!"
+     ]
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -7,14 +7,14 @@
             have a question about making a custom derived quantity, please
             contact the mailing list.
 
-yt is designed to support analysis and visualization of data from multiple
-different simulation codes, although it has so far been most successfully
-applied to Adaptive Mesh Refinement (AMR) data. For a list of codes and the
-level of support they enjoy, see :ref:`code-support`.
+yt is designed to support analysis and visualization of data from
+multiple different simulation codes. For a list of codes and the level
+of support they enjoy, see :ref:`code-support`.
 
-We'd like to support a broad range of codes, both AMR-based and otherwise. To
-add support for a new code, a few things need to be put into place. These
-necessary structures can be classified into a couple categories:
+We'd like to support a broad range of codes, both Adaptive Mesh
+Refinement (AMR)-based and otherwise. To add support for a new code, a
+few things need to be put into place. These necessary structures can
+be classified into a couple categories:
 
  * Data meaning: This is the set of parameters that convert the data into
    physically relevant units; things like spatial and mass conversions, time
@@ -33,73 +33,147 @@
 If you are interested in adding a new code, be sure to drop us a line on
 `yt-dev <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_!
 
-To get started, make a new directory in ``yt/frontends`` with the name of your
-code -- you can start by copying into it the contents of the ``stream``
-directory, which is a pretty empty format. You'll then have to create a subclass
-of ``Dataset``. This subclass will need to handle conversion between the
-different physical units and the code units; for the most part, the examples of
-``OrionDataset`` and ``EnzoDataset`` should be followed, but
-``ChomboDataset``, as a slightly newer addition, can also be used as an
-instructive example -- be sure to add an ``_is_valid`` classmethod that will
-verify if a filename is valid for that output type, as that is how "load" works.
+To get started, make a new directory in ``yt/frontends`` with the name
+of your code.  Copying the contents of the ``yt/frontends/_skeleton``
+directory will add a lot of boilerplate for the required classes and
+methods that are needed.  In particular, you'll have to create a
+subclass of ``Dataset`` in the data_structures.py file. This subclass
+will need to handle conversion between the different physical units
+and the code units (typically in the ``_set_code_unit_attributes()``
+method), read in metadata describing the overall data on disk (via the
+``_parse_parameter_file()`` method), and provide a ``classmethod``
+called ``_is_valid()`` that lets the ``yt.load`` method help identify an
+input file as belonging to *this* particular ``Dataset`` subclass.
+For the most part, the examples of
+``yt.frontends.boxlib.data_structures.OrionDataset`` and
+``yt.frontends.enzo.data_structures.EnzoDataset`` should be followed,
+but ``yt.frontends.chombo.data_structures.ChomboDataset``, as a
+slightly newer addition, can also be used as an instructive example.
 
-A new set of fields must be added in the file ``fields.py`` in that directory.
-For the most part this means subclassing ``CodeFieldInfoContainer`` and adding
-the necessary fields specific to that code. Here is the Chombo field container:
+A new set of fields must be added in the file ``fields.py`` in your
+new directory.  For the most part this means subclassing 
+``FieldInfoContainer`` and adding the necessary fields specific to
+your code. Here is a snippet from the base BoxLib field container:
 
 .. code-block:: python
 
-    from UniversalFields import *
-    class ChomboFieldContainer(CodeFieldInfoContainer):
-        _shared_state = {}
-        _field_list = {}
-    ChomboFieldInfo = ChomboFieldContainer()
-    add_chombo_field = ChomboFieldInfo.add_field
+    from yt.fields.field_info_container import FieldInfoContainer
+    class BoxlibFieldInfo(FieldInfoContainer):
+        known_other_fields = (
+            ("density", (rho_units, ["density"], None)),
+	    ("eden", (eden_units, ["energy_density"], None)),
+	    ("xmom", (mom_units, ["momentum_x"], None)),
+	    ("ymom", (mom_units, ["momentum_y"], None)),
+	    ("zmom", (mom_units, ["momentum_z"], None)),
+	    ("temperature", ("K", ["temperature"], None)),
+	    ("Temp", ("K", ["temperature"], None)),
+	    ("x_velocity", ("cm/s", ["velocity_x"], None)),
+	    ("y_velocity", ("cm/s", ["velocity_y"], None)),
+	    ("z_velocity", ("cm/s", ["velocity_z"], None)),
+	    ("xvel", ("cm/s", ["velocity_x"], None)),
+	    ("yvel", ("cm/s", ["velocity_y"], None)),
+	    ("zvel", ("cm/s", ["velocity_z"], None)),
+	)
 
-The field container is a shared state object, which is why we explicitly set
-``_shared_state`` equal to a mutable.
+	known_particle_fields = (
+	    ("particle_mass", ("code_mass", [], None)),
+	    ("particle_position_x", ("code_length", [], None)),
+	    ("particle_position_y", ("code_length", [], None)),
+	    ("particle_position_z", ("code_length", [], None)),
+	    ("particle_momentum_x", (mom_units, [], None)),
+	    ("particle_momentum_y", (mom_units, [], None)),
+	    ("particle_momentum_z", (mom_units, [], None)),
+	    ("particle_angmomen_x", ("code_length**2/code_time", [], None)),
+	    ("particle_angmomen_y", ("code_length**2/code_time", [], None)),
+	    ("particle_angmomen_z", ("code_length**2/code_time", [], None)),
+	    ("particle_id", ("", ["particle_index"], None)),
+	    ("particle_mdot", ("code_mass/code_time", [], None)),
+	)
+
+The tuples, ``known_other_fields`` and ``known_particle_fields``
+contain entries, which are tuples of the form ``("name", ("units",
+["fields", "to", "alias"], "display_name"))``.  ``"name"`` is the name
+of a field stored on-disk in the dataset. ``"units"`` corresponds to
+the units of that field.  The list ``["fields", "to", "alias"]``
+allows you to specify additional aliases to this particular field; for
+example, if your on-disk field for the x-direction velocity were
+``"x-direction-velocity"``, maybe you'd prefer to alias to the more
+terse name of ``"xvel"``.  ``"display_name"`` is an optional parameter
+that can be used to specify how you want the field to be displayed on
+a plot; this can be LaTeX code, for example the density field could
+have a display name of ``r"\rho"``.  Omitting the ``"display_name"``
+will result in using a capitalized version of the ``"name"``.
 
 Data Localization Structures
 ----------------------------
 
-As of right now, the "grid patch" mechanism is going to remain in yt, however in
-the future that may change. As such, some other output formats -- like Gadget --
-may be shoe-horned in, slightly.
+These functions and classes let yt know about how the arrangement of
+data on disk corresponds to the physical arrangement of data within
+the simulation.  yt has grid datastructures for handling both
+patch-based and octree-based AMR codes.  The terms 'patch-based'
+and 'octree-based' are used somewhat loosely here.  For example,
+traditionally, the FLASH code used the paramesh AMR library, which is
+based on a tree structure, but the FLASH frontend in yt utilizes yt's
+patch-based datastructures.  It is up to the frontend developer to
+determine which yt datastructures best match the datastructures of
+their simulation code.
 
-Hierarchy
-^^^^^^^^^
+Both approaches -- patch-based and octree-based -- have a concept of a
+*Hierarchy* or *Index* (used somewhat interchangeably in the code) of
+datastructures and something that describes the elements that make up
+the Hierarchy or Index.  For patch-based codes, the Index is a
+collection of ``AMRGridPatch`` objects that describe a block of zones.
+For octree-based codes, the Index contains datastructures that hold
+information about the individual octs, namely an ``OctreeContainer``.
 
-To set up data localization, an ``AMRHierarchy`` subclass must be added in the
-file ``data_structures.py``. The index object must override the following
-methods:
+Hierarchy or Index
+^^^^^^^^^^^^^^^^^^
 
- * ``_detect_fields``: ``self.field_list`` must be populated as a list of
-   strings corresponding to "native" fields in the data files.
- * ``_setup_classes``: it's probably safe to crib this from one of the other
-   ``AMRHierarchy`` subclasses.
- * ``_count_grids``: this must set self.num_grids to be the total number of
-   grids in the simulation.
- * ``_parse_index``: this must fill in ``grid_left_edge``,
+To set up data localization, a ``GridIndex`` subclass for patch-based
+codes or an ``OctreeIndex`` subclass for octree-based codes must be
+added in the file ``data_structures.py``. Examples of these different
+types of ``Index`` can be found in, for example, the
+``yt.frontends.chombo.data_structures.ChomboHierarchy`` for patch-based
+codes and ``yt.frontends.ramses.data_structures.RAMSESIndex`` for
+octree-based codes.  
+
+For the most part, the ``GridIndex`` subclass must override (at a
+minimum) the following methods:
+
+ * ``_detect_output_fields()``: ``self.field_list`` must be populated as a list
+   of strings corresponding to "native" fields in the data files.
+ * ``_count_grids()``: this must set ``self.num_grids`` to be the total number
+   of grids (equivalently ``AMRGridPatch``'es) in the simulation.
+ * ``_parse_index()``: this must fill in ``grid_left_edge``,
    ``grid_right_edge``, ``grid_particle_count``, ``grid_dimensions`` and
-   ``grid_levels`` with the appropriate information. Additionally, ``grids``
-   must be an array of grid objects that already know their IDs.
- * ``_populate_grid_objects``: this initializes the grids by calling
-   ``_prepare_grid`` and ``_setup_dx`` on all of them.  Additionally, it should
-   set up ``Children`` and ``Parent`` lists on each grid object.
- * ``_setup_unknown_fields``: If a field is in the data file that yt doesn't
-   already know, this is where you make a guess at it.
- * ``_setup_derived_fields``: ``self.derived_field_list`` needs to be made a
-   list of strings that correspond to all derived fields valid for this
-   index.
+   ``grid_levels`` with the appropriate information.  Each of these variables 
+   is an array, with an entry for each of the ``self.num_grids`` grids.  
+   Additionally, ``grids``  must be an array of ``AMRGridPatch`` objects that 
+   already know their IDs.
+ * ``_populate_grid_objects()``: this initializes the grids by calling
+   ``_prepare_grid()`` and ``_setup_dx()`` on all of them.  Additionally, it 
+   should set up ``Children`` and ``Parent`` lists on each grid object.
 
-For the most part, the ``ChomboHierarchy`` should be the first place to look for
-hints on how to do this; ``EnzoHierarchy`` is also instructive.
+The ``OctreeIndex`` has somewhat analogous methods, but often with
+different names; both ``OctreeIndex`` and ``GridIndex`` are subclasses
+of the ``Index`` class.  In particular, for the ``OctreeIndex``, the
+method ``_initialize_oct_handler()`` setups up much of the oct
+metadata that is analogous to the grid metadata created in the
+``GridIndex`` methods ``_count_grids()``, ``_parse_index()``, and
+``_populate_grid_objects()``.
 
 Grids
 ^^^^^
 
-A new grid object, subclassing ``AMRGridPatch``, will also have to be added.
-This should go in ``data_structures.py``. For the most part, this may be all
+.. note:: This section only applies to the approach using yt's patch-based
+	  datastructures.  For the octree-based approach, one does not create
+	  a grid object, but rather an ``OctreeSubset``, which has methods
+	  for filling out portions of the octree structure.  Again, see the
+	  code in ``yt.frontends.ramses.data_structures`` for an example of
+	  the octree approach.
+
+A new grid object, subclassing ``AMRGridPatch``, will also have to be added in
+``data_structures.py``. For the most part, this may be all
 that is needed:
 
 .. code-block:: python
@@ -115,32 +189,46 @@
             self.Level = level
 
 
-Even the most complex grid object, ``OrionGrid``, is still relatively simple.
+Even one of the more complex grid objects,
+``yt.frontends.boxlib.BoxlibGrid``, is still relatively simple.
 
 Data Reading Functions
 ----------------------
 
-In ``io.py``, there are a number of IO handlers that handle the mechanisms by
-which data is read off disk.  To implement a new data reader, you must subclass
-``BaseIOHandler`` and override the following methods:
+In ``io.py``, there are a number of IO handlers that handle the
+mechanisms by which data is read off disk.  To implement a new data
+reader, you must subclass ``BaseIOHandler``.  The various frontend IO
+handlers are stored in an IO registry - essentially a dictionary that
+uses the name of the frontend as a key, and the specific IO handler as
+a value.  It is important, therefore, to set the ``dataset_type``
+attribute of your subclass, which is what is used as the key in the IO
+registry.  For example:
 
- * ``_read_field_names``: this routine accepts a grid object and must return all
-   the fields in the data file affiliated with that grid. It is used at the
-   initialization of the ``AMRHierarchy`` but likely not later.
- * ``modify``: This accepts a field from a data file and returns it ready to be
-   used by yt. This is used in Enzo data for preloading.
- * ``_read_data_set``: This accepts a grid object and a field name and must
-   return that field, ready to be used by yt as a NumPy array. Note that this
-   presupposes that any actions done in ``modify`` (above) have been executed.
- * ``_read_data_slice``: This accepts a grid object, a field name, an axis and
-   an (integer) coordinate, and it must return a slice through the array at that
-   value.
- * ``preload``: (optional) This accepts a list of grids and a list of datasets
-   and it populates ``self.queue`` (a dict keyed by grid id) with dicts of
-   datasets.
- * ``_read_exception``: (property) This is a tuple of exceptions that can be
-   raised by the data reading to indicate a field does not exist in the file.
+.. code-block:: python
 
+    class IOHandlerBoxlib(BaseIOHandler):
+        _dataset_type = "boxlib_native"
+	...
+
+At a minimum, one should also override the following methods
+
+* ``_read_fluid_selection()``: this receives a collection of data "chunks", a 
+  selector describing which "chunks" you are concerned with, a list of fields,
+  and the size of the data to read.  It should create and return a dictionary 
+  whose keys are the fields, and whose values are numpy arrays containing the 
+  data.  The data should actually be read via the ``_read_chunk_data()`` 
+  method.
+* ``_read_chunk_data()``: this method receives a "chunk" of data along with a 
+  list of fields we want to read.  It loops over all the grid objects within 
+  the "chunk" of data and reads from disk the specific fields, returning a 
+  dictionary whose keys are the fields and whose values are numpy arrays of
+  the data.
+
+If your dataset has particle information, you'll want to override the
+``_read_particle_coords()`` and ``read_particle_fields()`` methods as
+well.  Each code is going to read data from disk in a different
+fashion, but the ``yt.frontends.boxlib.io.IOHandlerBoxlib`` is a
+decent place to start.
 
 And that just about covers it. Please feel free to email
 `yt-users <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_ or

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1010,6 +1010,8 @@
 onto the grid, you can also effectively mimic what your data would look like at
 lower resolution.
 
+See :ref:`gadget-notebook` for an example.
+
 .. _loading-tipsy-data:
 
 Tipsy Data

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -40,14 +40,15 @@
 
 .. code-block:: python
 
-   from yt.config import ytcfg
-   ytcfg["yt", "loglevel"] = "1"
+   import yt
+   yt.funcs.mylog.setLevel(1)
 
-   from yt.mods import *
-   ds = load("my_data0001")
+   ds = yt.load("my_data0001")
    ds.print_stats()
 
-This has the same effect as setting ``loglevel = 1`` in the configuration file.
+This has the same effect as setting ``loglevel = 1`` in the configuration
+file. Note that a log level of 1 means that all log messages are printed to
+stdout.  To disable logging, set the log level to 50.
 
 Setting Configuration On the Command Line
 -----------------------------------------

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/reference/faq/index.rst
--- a/doc/source/reference/faq/index.rst
+++ b/doc/source/reference/faq/index.rst
@@ -214,26 +214,37 @@
 
 The plugin file is a means of modifying the available fields, quantities, data
 objects and so on without modifying the source code of yt.  The plugin file
-will be executed if it is detected, and it must be:
+will be executed if it is detected.  It must be located in a ``.yt`` folder
+in your home directory and be named ``my_plugins.py``:
 
 .. code-block:: bash
 
    $HOME/.yt/my_plugins.py
 
-The code in this file can thus add fields, add derived quantities, add
+The code in this file can add fields, define functions, define
 datatypes, and on and on.  It is executed at the bottom of ``yt.mods``, and so
-it is provided with the entire namespace available in the module ``yt.mods`` --
-which is the primary entry point to yt, and which contains most of the
-functionality of yt.  For example, if I created a plugin file containing:
+it is provided with the entire namespace available in the module ``yt.mods``.
+For example, if I created a plugin file containing:
 
 .. code-block:: python
 
    def _myfunc(field, data):
        return np.random.random(data["density"].shape)
-   add_field("SomeQuantity", function=_myfunc)
+   add_field("some_quantity", function=_myfunc, units='')
 
-then all of my data objects would have access to the field "SomeQuantity"
-despite its lack of use.
+then all of my data objects would have access to the field "some_quantity".
+Note that the units must be specified as a string, see
+:ref:`data_selection_and_fields` for more details on units and derived fields.
+
+.. note::
+
+   Since the ``my_plugins.py`` is parsed inside of ``yt.mods``, you must import
+   yt using ``yt.mods`` to use the plugins file.  If you import using
+   ``import yt``, the plugins file will not be parsed.  You can tell that your
+   plugins file is being parsed by watching for a logging message when you
+   import yt.  Note that both the ``yt load`` and ``iyt`` command line entry
+   points invoke ``from yt.mods import *``, so the ``my_plugins.py`` file
+   will be parsed if you enter yt that way.
 
 You can also define other convenience functions in your plugin file.  For
 instance, you could define some variables or functions, and even import common

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -469,6 +469,21 @@
    slc.set_log('density', False)
    slc.save()
 
+Specifically, a field containing both positive and negative values can be plotted
+with symlog scale, by seting the boolean to be ``True`` and providing an extra
+parameter ``linthresh``. In the region around zero (when the log scale approaches
+to infinity), the linear scale will be applied to the region ``(-linthresh, linthresh)``
+and stretched relative to the logarithmic range. You can also plot a positive field 
+under symlog scale with the linear range of ``(0, linthresh)``.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'x-velocity', width=(30,'kpc'))
+   slc.set_log('x-velocity', True, linthresh=1.e1)
+   slc.save()
+
 Lastly, the :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_zlim`
 function makes it possible to set a custom colormap range.
 
@@ -531,6 +546,26 @@
    slc.set_buff_size(1600)
    slc.save()
 
+Turning off minorticks
+~~~~~~~~~~~~~~~~~~~~~~
+
+By default minorticks for the x and y axes are turned on.
+The minorticks may be removed using the
+:meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_minorticks`
+function, which either accepts a specific field name including the 'all' alias
+and the desired state for the plot as 'on' or 'off'. There is also an analogous
+:meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_cbar_minorticks`
+function for the colorbar axis.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(10,'kpc'))
+   slc.set_minorticks('all', 'off')
+   slc.set_cbar_minorticks('all', 'off')
+   slc.save()
+
 .. _matplotlib-customization:
 
 Further customization via matplotlib
@@ -743,7 +778,7 @@
 Adjusting the plot units does not require recreating the histogram, so adjusting
 units will always be inexpensive, requiring only an in-place unit conversion.
 
-In the following example we create a a plot of the average density in solar
+In the following example we create a plot of the average density in solar
 masses per cubic parsec as a function of radius in kiloparsecs.
 
 .. python-script::
@@ -892,7 +927,7 @@
 ``fractional`` keyword to ``True``.  When set to ``True``, the value in each bin
 is divided by the sum total from all bins.  These can be turned into cumulative
 distribution functions (CDFs) by setting the ``accumulation`` keyword to
-``True``.  This will make is so that the value in any bin N is the cumulative
+``True``.  This will make it so that the value in any bin N is the cumulative
 sum of all bins from 0 to N.  The direction of the summation can be reversed by
 setting ``accumulation`` to ``-True``.  For ``PhasePlot``, the accumulation can
 be set independently for each axis by setting ``accumulation`` to a list of

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -186,13 +186,13 @@
             # Simple error check to make sure more than 100% of box depth
             # is never required.
             if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
-                mylog.debug(("Warning: box fraction required to go from " +
+                mylog.error(("Warning: box fraction required to go from " +
                              "z = %f to %f is %f") %
                             (self.light_cone_solution[q]["redshift"], z_next,
                              self.light_cone_solution[q]["box_depth_fraction"]))
-                mylog.debug(("Full box delta z is %f, but it is %f to the " +
+                mylog.error(("Full box delta z is %f, but it is %f to the " +
                              "next data dump.") %
-                            (self.light_cone_solution[q]["deltazMax"],
+                            (self.light_cone_solution[q]["dz_max"],
                              self.light_cone_solution[q]["redshift"]-z_next))
 
             # Get projection axis and center.

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -169,7 +169,7 @@
                                 (self.light_ray_solution[q]['redshift'], z_next,
                                  self.light_ray_solution[q]['traversal_box_fraction']))
                     mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                                (self.light_ray_solution[q]['deltazMax'],
+                                (self.light_ray_solution[q]['dz_max'],
                                  self.light_ray_solution[q]['redshift']-z_next))
 
                 # Get dataset axis and center.

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -98,6 +98,8 @@
 class YTConfigParser(ConfigParser.ConfigParser):
     def __setitem__(self, key, val):
         self.set(key[0], key[1], val)
+    def __getitem__(self, key):
+        self.get(key[0], key[1])
 
 if os.path.exists(os.path.expanduser("~/.yt/config")):
     ytcfg = YTConfigParser(ytcfg_defaults)

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -807,6 +807,8 @@
         self.fields = [k for k in self.field_data if k not in skip]
         if fields is not None:
             self.fields = ensure_list(fields) + self.fields
+        if len(self.fields) == 0:
+            raise ValueError("No fields found to plot in get_pw")
         (bounds, center, display_center) = \
             get_window_parameters(axis, center, width, self.ds)
         pw = PWViewerMPL(self, bounds, fields=self.fields, origin=origin,

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -542,7 +542,7 @@
     """
     _type_name = "data_collection"
     _con_args = ("_obj_list",)
-    def __init__(self, center, obj_list, ds = None, field_parameters = None):
+    def __init__(self, obj_list, ds=None, field_parameters=None, center=None):
         YTSelectionContainer3D.__init__(self, center, ds, field_parameters)
         self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],
                                 dtype="int64")

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/data_objects/tests/test_data_collection.py
--- a/yt/data_objects/tests/test_data_collection.py
+++ b/yt/data_objects/tests/test_data_collection.py
@@ -8,7 +8,7 @@
     # We decompose in different ways
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs)
-        coll = ds.data_collection(ds.domain_center, ds.index.grids)
+        coll = ds.data_collection(ds.index.grids)
         crho = coll["density"].sum(dtype="float64").to_ndarray()
         grho = np.sum([g["density"].sum(dtype="float64") for g in ds.index.grids],
                       dtype="float64")
@@ -16,7 +16,7 @@
         yield assert_equal, coll.size, ds.domain_dimensions.prod()
         for gi in range(ds.index.num_grids):
             grids = ds.index.grids[:gi+1]
-            coll = ds.data_collection(ds.domain_center, grids)
+            coll = ds.data_collection(grids)
             crho = coll["density"].sum(dtype="float64")
             grho = np.sum([g["density"].sum(dtype="float64") for g in grids],
                           dtype="float64")

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/fields/api.py
--- a/yt/fields/api.py
+++ b/yt/fields/api.py
@@ -26,6 +26,11 @@
 from . import particle_fields
 #from . import species_fields
 from . import vector_operations
+from . import local_fields
+from . import my_plugin_fields
+
+from .local_fields import add_field, derived_field
+
 
 from .derived_field import \
     DerivedField, \
@@ -38,6 +43,3 @@
     FieldDetector
 from .field_info_container import \
     FieldInfoContainer
-
-from . import local_fields
-from .local_fields import add_field, derived_field

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/fields/my_plugin_fields.py
--- /dev/null
+++ b/yt/fields/my_plugin_fields.py
@@ -0,0 +1,31 @@
+"""
+This is a container for storing fields defined in the my_plugins.py file.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from .field_plugin_registry import \
+    register_field_plugin
+
+from .field_info_container import \
+    FieldInfoContainer
+
+# Empty FieldInfoContainer
+my_plugins_fields = FieldInfoContainer(None, [], None)
+
+ at register_field_plugin
+def setup_my_plugins_fields(registry, ftype="gas", slice_info=None):
+    # fields end up inside this container when added via add_field in
+    # my_plugins.py. See yt.funcs.enable_plugins to see how this is set up.
+    registry.update(my_plugins_fields)

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/_skeleton/api.py
--- a/yt/frontends/_skeleton/api.py
+++ b/yt/frontends/_skeleton/api.py
@@ -19,8 +19,7 @@
       SkeletonDataset
 
 from .fields import \
-      SkeletonFieldInfo, \
-      add_skeleton_field
+      SkeletonFieldInfo
 
 from .io import \
       IOHandlerSkeleton

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -13,18 +13,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
-from yt.data_objects.grid_patch import \
-    AMRGridPatch
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
     GridIndex
 from yt.data_objects.static_output import \
     Dataset
-from yt.utilities.lib.misc_utilities import \
-    get_box_grids_level
+from .fields import SkeletonFieldInfo
 
 class SkeletonGrid(AMRGridPatch):
     _id_offset = 0
@@ -41,20 +36,15 @@
     def __repr__(self):
         return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
-class SkeletonHierarchy(AMRHierarchy):
-
+class SkeletonHierarchy(GridIndex):
     grid = SkeletonGrid
     
     def __init__(self, ds, dataset_type='skeleton'):
         self.dataset_type = dataset_type
-        self.dataset = weakref.proxy(ds)
         # for now, the index file is the dataset!
         self.index_filename = self.dataset.parameter_filename
         self.directory = os.path.dirname(self.index_filename)
-        AMRHierarchy.__init__(self, ds, dataset_type)
-
-    def _initialize_data_storage(self):
-        pass
+        GridIndex.__init__(self, ds, dataset_type)
 
     def _detect_output_fields(self):
         # This needs to set a self.field_list that contains all the available,

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -13,9 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-import h5py
-
 from yt.utilities.io_handler import \
     BaseIOHandler
 

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -39,3 +39,5 @@
         for frontend in _frontends:
             _mod = "yt.frontends.%s.api" % frontend
             setattr(self, frontend, importlib.import_module(_mod))
+        setattr(self, 'api', importlib.import_module('yt.frontends.api'))
+        setattr(self, '__name__', 'yt.frontends.api')

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/art/api.py
--- a/yt/frontends/art/api.py
+++ b/yt/frontends/art/api.py
@@ -24,3 +24,5 @@
 
 from .io import \
       IOHandlerART
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/artio/api.py
--- a/yt/frontends/artio/api.py
+++ b/yt/frontends/artio/api.py
@@ -22,3 +22,5 @@
 
 from .io import \
     IOHandlerARTIO
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/athena/api.py
--- a/yt/frontends/athena/api.py
+++ b/yt/frontends/athena/api.py
@@ -22,3 +22,5 @@
 
 from .io import \
       IOHandlerAthena
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -29,3 +29,5 @@
 
 from .io import \
       IOHandlerBoxlib
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -35,3 +35,5 @@
 from .io import \
     IOHandlerChomboHDF5,\
     IOHandlerPlutoHDF5
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/enzo/api.py
--- a/yt/frontends/enzo/api.py
+++ b/yt/frontends/enzo/api.py
@@ -35,3 +35,5 @@
       IOHandlerInMemory, \
       IOHandlerPacked2D, \
       IOHandlerPacked1D
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -444,8 +444,7 @@
                 try:
                     gf = self.io._read_field_names(grid)
                 except self.io._read_exception:
-                    mylog.debug("Grid %s is a bit funky?", grid.id)
-                    continue
+                    raise IOError("Grid %s is a bit funky?", grid.id)
                 mylog.debug("Grid %s has: %s", grid.id, gf)
                 field_list = field_list.union(gf)
             if "AppendActiveParticleType" in self.dataset.parameters:

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -203,11 +203,13 @@
                 units="code_velocity**2")
             # Subtract off B-field energy
             def _sub_b(field, data):
-                return data[te_name] - 0.5*(
-                    data["x-velocity"]**2.0
-                    + data["y-velocity"]**2.0
-                    + data["z-velocity"]**2.0 ) \
-                    - data["MagneticEnergy"]/data["Density"]
+                ret = data[te_name] - 0.5*data["x-velocity"]**2.0
+                if data.ds.dimensionality > 1:
+                    ret -= 0.5*data["y-velocity"]**2.0
+                if data.ds.dimensionality > 2:
+                    ret -= 0.5*data["z-velocity"]**2.0
+                ret -= data["MagneticEnergy"]/data["Density"]
+                return ret
             self.add_field(
                 ("gas", "thermal_energy"),
                 function=_sub_b, units = "erg/g")
@@ -217,10 +219,12 @@
                 units = "code_velocity**2")
             self.alias(("gas", "total_energy"), ("enzo", te_name))
             def _tot_minus_kin(field, data):
-                return data[te_name] - 0.5*(
-                    data["x-velocity"]**2.0
-                    + data["y-velocity"]**2.0
-                    + data["z-velocity"]**2.0 )
+                ret = data[te_name] - 0.5*data["x-velocity"]**2.0
+                if data.ds.dimensionality > 1:
+                    ret -= 0.5*data["y-velocity"]**2.0
+                if data.ds.dimensionality > 2:
+                    ret -= 0.5*data["z-velocity"]**2.0
+                return ret
             self.add_field(
                 ("gas", "thermal_energy"),
                 function = _tot_minus_kin,

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,7 +36,10 @@
     def _read_field_names(self, grid):
         if grid.filename is None: return []
         f = h5py.File(grid.filename, "r")
-        group = f["/Grid%08i" % grid.id]
+        try:
+            group = f["/Grid%08i" % grid.id]
+        except KeyError:
+            group = f
         fields = []
         add_io = "io" in grid.ds.particle_types
         for name, v in group.iteritems():
@@ -366,6 +369,8 @@
                     #print "Opening (count) %s" % g.filename
                     f = h5py.File(g.filename, "r")
                 gds = f.get("/Grid%08i" % g.id)
+                if gds is None:
+                    gds = f
                 for field in fields:
                     ftype, fname = field
                     ds = np.atleast_3d(gds.get(fname).value.transpose())

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/fits/api.py
--- a/yt/frontends/fits/api.py
+++ b/yt/frontends/fits/api.py
@@ -22,4 +22,6 @@
       IOHandlerFITS
 
 from .misc import \
-      setup_counts_fields
\ No newline at end of file
+      setup_counts_fields
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/flash/api.py
--- a/yt/frontends/flash/api.py
+++ b/yt/frontends/flash/api.py
@@ -23,3 +23,5 @@
 
 from .io import \
       IOHandlerFLASH
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/moab/api.py
--- a/yt/frontends/moab/api.py
+++ b/yt/frontends/moab/api.py
@@ -25,3 +25,5 @@
 
 from .io import \
       IOHandlerMoabH5MHex8
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/ramses/api.py
--- a/yt/frontends/ramses/api.py
+++ b/yt/frontends/ramses/api.py
@@ -24,3 +24,5 @@
 
 from .definitions import \
       field_aliases
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/sph/api.py
--- a/yt/frontends/sph/api.py
+++ b/yt/frontends/sph/api.py
@@ -31,3 +31,5 @@
       SPHFieldInfo, \
       TipsyFieldInfo,\
       EagleNetworkFieldInfo
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -31,3 +31,5 @@
 
 from .io import \
       IOHandlerStream
+
+from . import tests

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -701,7 +701,7 @@
                    field_units=None, bbox=None, sim_time=0.0, length_unit=None,
                    mass_unit=None, time_unit=None, velocity_unit=None,
                    magnetic_unit=None, periodicity=(True, True, True),
-                   geometry = "cartesian"):
+                   geometry = "cartesian", refine_by=2):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
     This should allow a sequence of grids of varying resolution of data to be
@@ -751,6 +751,8 @@
         each axis
     geometry : string
         "cartesian", "cylindrical" or "polar"
+    refine_by : integer
+        Specifies the refinement ratio between levels.  Defaults to 2.
 
     Examples
     --------
@@ -849,7 +851,7 @@
     handler.name = "AMRGridData"
     handler.domain_left_edge = domain_left_edge
     handler.domain_right_edge = domain_right_edge
-    handler.refine_by = 2
+    handler.refine_by = refine_by
     handler.dimensionality = 3
     handler.domain_dimensions = domain_dimensions
     handler.simulation_time = sim_time

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/frontends/stream/tests/test_stream_amrgrids.py
--- a/yt/frontends/stream/tests/test_stream_amrgrids.py
+++ b/yt/frontends/stream/tests/test_stream_amrgrids.py
@@ -26,3 +26,25 @@
         p = ProjectionPlot(spf, 'x', ["density"], center='c', origin='native')
         return p
     yield assert_raises, YTIntDomainOverflow, make_proj
+
+def test_refine_by():
+    grid_data = []
+    ref_by = 4
+    lo = 0.0
+    hi = 1.0
+    fine_grid_width = (hi - lo) / ref_by
+    for level in range(2):
+        grid_dict = {}
+
+        grid_dict['left_edge'] = [0.0 + 0.5*fine_grid_width*level]*3
+        grid_dict['right_edge'] = [1.0 - 0.5*fine_grid_width*level]*3
+        grid_dict['dimensions'] = [8, 8, 8]
+        grid_dict['level'] = level
+
+        grid_dict['density'] = np.ones((8,8,8))
+
+        grid_data.append(grid_dict)
+
+    domain_dimensions = np.array([8, 8, 8])
+
+    spf = load_amr_grids(grid_data, domain_dimensions, refine_by=ref_by)

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 import time, types, signal, inspect, traceback, sys, pdb, os, re
-import time, types, signal, inspect, traceback, sys, pdb, os, re
 import contextlib
 import warnings, struct, subprocess
 import numpy as np
@@ -748,9 +747,10 @@
             SyntaxWarning, stacklevel=2)
         return cls(*args, **kwargs)
     return _func
-    
+
 def enable_plugins():
     import yt
+    from yt.fields.my_plugin_fields import my_plugins_fields
     from yt.config import ytcfg
     my_plugin_name = ytcfg.get("yt","pluginfilename")
     # We assume that it is with respect to the $HOME/.yt directory
@@ -760,7 +760,9 @@
         _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
     if os.path.isfile(_fn):
         mylog.info("Loading plugins from %s", _fn)
-        execfile(_fn, yt.__dict__)
+        execdict = yt.__dict__.copy()
+        execdict['add_field'] = my_plugins_fields.add_field
+        execfile(_fn, execdict)
 
 def fix_unitary(u):
     if u == '1':

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/units/unit_symbols.py
--- a/yt/units/unit_symbols.py
+++ b/yt/units/unit_symbols.py
@@ -85,7 +85,7 @@
 # Kelvin
 #
 
-degree_kelvin = Kelvin = quan(1.0, "K")
+degree_kelvin = Kelvin = K = quan(1.0, "K")
 
 #
 # Misc CGS

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -217,7 +217,8 @@
           np.ndarray[np.int64_t, ndim=1] ys,
           np.ndarray[np.float64_t, ndim=2] colors,
           int points_per_color=1,
-          int thick=1):
+          int thick=1,
+	  int flip=0):
 
     cdef int nx = image.shape[0]
     cdef int ny = image.shape[1]
@@ -256,18 +257,23 @@
             if x0 >= thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick:
                 for xi in range(x0-thick/2, x0+(1+thick)/2):
                     for yi in range(y0-thick/2, y0+(1+thick)/2):
+                        if flip: 
+                            yi0 = ny - yi
+                        else:
+                            yi0 = yi
+
                         if has_alpha:
-                            image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3])
+                            image[xi, yi0, 3] = outa = alpha[3] + image[xi, yi0, 3]*(1-alpha[3])
                             if outa != 0.0:
                                 outa = 1.0/outa
                             for i in range(3):
-                                image[xi, yi, i] = \
-                                        ((1.-alpha[3])*image[xi, yi, i]*image[xi, yi, 3]
+                                image[xi, yi0, i] = \
+                                        ((1.-alpha[3])*image[xi, yi0, i]*image[xi, yi0, 3]
                                          + alpha[3]*alpha[i])*outa
                         else:
                             for i in range(3):
-                                image[xi, yi, i] = \
-                                        (1.-alpha[i])*image[xi,yi,i] + alpha[i]
+                                image[xi, yi0, i] = \
+                                        (1.-alpha[i])*image[xi,yi0,i] + alpha[i]
 
             if (x0 == x1 and y0 == y1):
                 break

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -109,12 +109,16 @@
             cax.set_position(caxrect)
             self.cax = cax
 
-    def _init_image(self, data, cbnorm, cmap, extent, aspect):
+    def _init_image(self, data, cbnorm, cblinthresh, cmap, extent, aspect):
         """Store output of imshow in image variable"""
         if (cbnorm == 'log10'):
             norm = matplotlib.colors.LogNorm()
         elif (cbnorm == 'linear'):
             norm = matplotlib.colors.Normalize()
+        elif (cbnorm == 'symlog'):
+            if cblinthresh is None:
+                cblinthresh = (data.max()-data.min())/10.
+            norm = matplotlib.colors.SymLogNorm(cblinthresh,vmin=data.min(), vmax=data.max())
         extent = [float(e) for e in extent]
         if isinstance(cmap, tuple):
             if has_brewer:
@@ -126,7 +130,16 @@
         self.image = self.axes.imshow(data.to_ndarray(), origin='lower',
                                       extent=extent, norm=norm, vmin=self.zmin,
                                       aspect=aspect, vmax=self.zmax, cmap=cmap)
-        self.cb = self.figure.colorbar(self.image, self.cax)
+        if (cbnorm == 'symlog'):
+            formatter = matplotlib.ticker.LogFormatterMathtext()
+            self.cb = self.figure.colorbar(self.image, self.cax, format=formatter)
+            yticks = list(-10**np.arange(np.floor(np.log10(-data.min())),\
+                          np.rint(np.log10(cblinthresh))-1, -1)) + [0] + \
+                     list(10**np.arange(np.rint(np.log10(cblinthresh)),\
+                          np.ceil(np.log10(data.max()))+1))
+            self.cb.set_ticks(yticks)
+        else:
+            self.cb = self.figure.colorbar(self.image, self.cax)
 
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -80,6 +80,53 @@
         return args[0]
     return newfunc
 
+def get_log_minorticks(vmin, vmax):
+    """calculate positions of linear minorticks on a log colorbar
+
+    Parameters
+    ----------
+    vmin : float
+        the minimum value in the colorbar
+    vmax : float
+        the maximum value in the colorbar
+
+    """
+    expA = np.floor(np.log10(vmin))
+    expB = np.floor(np.log10(vmax))
+    cofA = np.ceil(vmin/10**expA)
+    cofB = np.floor(vmax/10**expB)
+    lmticks = []
+    while cofA*10**expA <= cofB*10**expB:
+        if expA < expB:
+            lmticks = np.hstack( (lmticks, np.linspace(cofA, 9, 10-cofA)*10**expA) )
+            cofA = 1
+            expA += 1
+        else:
+            lmticks = np.hstack( (lmticks, np.linspace(cofA, cofB, cofB-cofA+1)*10**expA) )
+            expA += 1
+    return np.array(lmticks)
+
+def get_symlog_minorticks(linthresh, vmin, vmax):
+    """calculate positions of linear minorticks on a symmetric log colorbar
+
+    Parameters
+    ----------
+    linthresh: float
+        the threshold for the linear region
+    vmin : float
+        the minimum value in the colorbar
+    vmax : float
+        the maximum value in the colorbar
+
+    """
+    if vmin >= 0 or vmax <= 0:
+        raise RuntimeError(
+            '''attempting to set minorticks for
+              a symlog plot with one-sided data:
+              got vmin = %s, vmax = %s''' % (vmin, vmax))
+    return np.hstack( (-get_log_minorticks(linthresh,-vmin)[::-1], 0,
+                        get_log_minorticks(linthresh, vmax)) )
+
 field_transforms = {}
 
 
@@ -102,6 +149,7 @@
 
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
+symlog_transform = FieldTransform('symlog', None, LogLocator())
 
 class PlotDictionary(defaultdict):
     def __getitem__(self, item):
@@ -143,11 +191,13 @@
         self._font_color = None
         self._xlabel = None
         self._ylabel = None
+        self._minorticks = {}
+        self._cbar_minorticks = {}
         self._colorbar_label = PlotDictionary(
             self.data_source, lambda: None)
 
     @invalidate_plot
-    def set_log(self, field, log):
+    def set_log(self, field, log, linthresh=None):
         """set a field to log or linear.
 
         Parameters
@@ -156,6 +206,8 @@
             the field to set a transform
         log : boolean
             Log on/off.
+        linthresh : float (must be positive)
+            linthresh will be enabled for symlog scale only when log is true
 
         """
         if field == 'all':
@@ -164,7 +216,13 @@
             fields = [field]
         for field in self.data_source._determine_fields(fields):
             if log:
-                self._field_transform[field] = log_transform
+                if linthresh is not None:
+                    if not linthresh > 0.: 
+                        raise ValueError('\"linthresh\" must be positive')
+                    self._field_transform[field] = symlog_transform
+                    self._field_transform[field].func = linthresh 
+                else:
+                    self._field_transform[field] = log_transform
             else:
                 self._field_transform[field] = linear_transform
         return self
@@ -271,6 +329,58 @@
             self.plots[field].zmax = myzmax
         return self
 
+    @invalidate_plot
+    def set_minorticks(self, field, state):
+        """turn minor ticks on or off in the current plot
+
+        Displaying minor ticks reduces performance; turn them off
+        using set_minorticks('all', 'off') if drawing speed is a problem.
+
+        Parameters
+        ----------
+        field : string
+            the field to remove minorticks
+        state : string
+            the state indicating 'on' or 'off'
+
+        """
+        if field == 'all':
+            fields = self.plots.keys()
+        else:
+            fields = [field]
+        for field in self.data_source._determine_fields(fields):
+            if state == 'on':
+                self._minorticks[field] = True
+            else:
+                self._minorticks[field] = False
+        return self
+
+    @invalidate_plot
+    def set_cbar_minorticks(self, field, state):
+        """turn colorbar minor ticks on or off in the current plot
+
+        Displaying minor ticks reduces performance; turn them off 
+        using set_cbar_minorticks('all', 'off') if drawing speed is a problem.
+
+        Parameters
+        ----------
+        field : string
+            the field to remove colorbar minorticks
+        state : string
+            the state indicating 'on' or 'off'
+
+        """
+        if field == 'all':
+            fields = self.plots.keys()
+        else:
+            fields = [field]
+        for field in self.data_source._determine_fields(fields):
+            if state == 'on':
+                self._cbar_minorticks[field] = True
+            else:
+                self._cbar_minorticks[field] = False
+        return self
+
     def setup_callbacks(self):
         # Left blank to be overriden in subclasses
         pass

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -33,7 +33,9 @@
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .plot_container import \
-    ImagePlotContainer, log_transform, linear_transform, \
+    ImagePlotContainer, \
+    log_transform, linear_transform, symlog_transform, \
+    get_log_minorticks, get_symlog_minorticks, \
     invalidate_data, invalidate_plot, apply_callback
 
 from yt.data_objects.time_series import \
@@ -711,9 +713,9 @@
                 zlim = (None, None)
 
             image = self.frb[f]
-
             if self._field_transform[f] == log_transform:
                 msg = None
+                use_symlog = False
                 if zlim != (None, None):
                     pass
                 elif np.nanmax(image) == np.nanmin(image):
@@ -724,10 +726,21 @@
                           "values.  Max = %f." % (f, np.nanmax(image))
                 elif not np.any(np.isfinite(image)):
                     msg = "Plot image for field %s is filled with NaNs." % (f,)
+                elif np.nanmax(image) > 0. and np.nanmin(image) < 0:
+                    msg = "Plot image for field %s has both positive "\
+                          "and negative values. Min = %f, Max = %f."\
+                          % (f, np.nanmin(image), np.nanmax(image))
+                    use_symlog = True
                 if msg is not None:
                     mylog.warning(msg)
-                    mylog.warning("Switching to linear colorbar scaling.")
-                    self._field_transform[f] = linear_transform
+                    if use_symlog:
+                        mylog.warning("Switching to symlog colorbar scaling "\
+                                      "unless linear scaling is specified later")
+                        self._field_transform[f] = symlog_transform
+                        self._field_transform[f].func = None
+                    else:
+                        mylog.warning("Switching to linear colorbar scaling.")
+                        self._field_transform[f] = linear_transform
 
             fp = self._font_properties
 
@@ -746,6 +759,7 @@
 
             self.plots[f] = WindowPlotMPL(
                 image, self._field_transform[f].name,
+                self._field_transform[f].func,
                 self._colormaps[f], extent, zlim,
                 self.figure_size, fp.get_size(),
                 self.aspect, fig, axes, cax)
@@ -860,6 +874,32 @@
                            self.plots[f].cb.ax.axes.yaxis.get_offset_text()]):
                 label.set_fontproperties(fp)
 
+            # x-y axes minorticks
+            if f not in self._minorticks:
+                self._minorticks[f] = True
+            if self._minorticks[f] is True:
+                self.plots[f].axes.minorticks_on()
+            else:
+                self.plots[f].axes.minorticks_off()
+
+            # colorbar minorticks
+            if f not in self._cbar_minorticks:
+                self._cbar_minorticks[f] = True
+            if self._cbar_minorticks[f] is True:
+                if self._field_transform[f] == linear_transform:
+                    self.plots[f].cax.minorticks_on()
+                else:
+                    vmin = np.float64( self.plots[f].cb.norm.vmin )
+                    vmax = np.float64( self.plots[f].cb.norm.vmax )
+                    if self._field_transform[f] == log_transform:
+                        mticks = self.plots[f].image.norm( get_log_minorticks(vmin, vmax) )
+                    else: # symlog_transform
+                        flinthresh = 10**np.floor( np.log10( self.plots[f].cb.norm.linthresh ) )
+                        mticks = self.plots[f].image.norm( get_symlog_minorticks(flinthresh, vmin, vmax) )
+                    self.plots[f].cax.yaxis.set_ticks(mticks, minor=True)
+            else:
+                self.plots[f].cax.minorticks_off()
+
             self.run_callbacks(f)
 
             if draw_axes is False:
@@ -1643,7 +1683,7 @@
 
 class WindowPlotMPL(ImagePlotMPL):
     """A container for a single PlotWindow matplotlib figure and axes"""
-    def __init__(self, data, cbname, cmap, extent, zlim, figure_size, fontsize,
+    def __init__(self, data, cbname, cblinthresh, cmap, extent, zlim, figure_size, fontsize,
                  unit_aspect, figure, axes, cax):
         self._draw_colorbar = True
         self._draw_axes = True
@@ -1669,7 +1709,7 @@
         super(WindowPlotMPL, self).__init__(
             size, axrect, caxrect, zlim, figure, axes, cax)
 
-        self._init_image(data, cbname, cmap, extent, unit_aspect)
+        self._init_image(data, cbname, cblinthresh, cmap, extent, unit_aspect)
 
         self.image.axes.ticklabel_format(scilimits=(-2, 3))
         if cbname == 'linear':

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -29,7 +29,7 @@
 from .base_plot_types import ImagePlotMPL
 from .plot_container import \
     ImagePlotContainer, \
-    log_transform, linear_transform
+    log_transform, linear_transform, get_log_minorticks
 from yt.data_objects.profiles import \
     create_profile
 from yt.utilities.exceptions import \
@@ -848,6 +848,29 @@
                 label.set_fontproperties(fp)
                 if self._font_color is not None:
                     label.set_color(self._font_color)
+
+            # x-y axes minorticks
+            if f not in self._minorticks:
+                self._minorticks[f] = True
+            if self._minorticks[f] is True:
+                self.plots[f].axes.minorticks_on()
+            else:
+                self.plots[f].axes.minorticks_off()
+
+            # colorbar minorticks
+            if f not in self._cbar_minorticks:
+                self._cbar_minorticks[f] = True
+            if self._cbar_minorticks[f] is True:
+                if self._field_transform[f] == linear_transform:
+                    self.plots[f].cax.minorticks_on()
+                else:
+                    vmin = np.float64( self.plots[f].cb.norm.vmin )
+                    vmax = np.float64( self.plots[f].cb.norm.vmax )
+                    mticks = self.plots[f].image.norm( get_log_minorticks(vmin, vmax) )
+                    self.plots[f].cax.yaxis.set_ticks(mticks, minor=True)
+            else:
+                self.plots[f].cax.minorticks_off()
+
         self._plot_valid = True
 
     @classmethod

diff -r 3ae60b97281378dba58b60415a6924d586ded905 -r ffce974e84c91a736575262dede7d78a4cd3a250 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -316,8 +316,11 @@
         nim = im.rescale(inline=False)
         enhance_rgba(nim)
         nim.add_background_color('black', inline=True)
-       
-        lines(nim, px, py, colors, 24)
+
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(nim, px, py, colors, 24, flip=1)
+
         return nim
 
     def draw_coordinate_vectors(self, im, length=0.05, thickness=1):
@@ -370,11 +373,13 @@
                   np.array([0.0, 1.0, 0.0, alpha]),
                   np.array([0.0, 0.0, 1.0, alpha])]
 
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
         for vec, color in zip(coord_vectors, colors):
             dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
             lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
-                  np.array([color, color]), 1, thickness)
+                  np.array([color, color]), 1, thickness, flip=1)
 
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
@@ -415,7 +420,10 @@
         py1 = int(self.resolution[0]*(dx1/self.width[0]))
         px0 = int(self.resolution[1]*(dy0/self.width[1]))
         px1 = int(self.resolution[1]*(dy1/self.width[1]))
-        lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]))
+
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]),flip=1)
 
     def draw_domain(self,im,alpha=0.3):
         r"""Draws domain edges on an existing volume rendering.
@@ -497,7 +505,9 @@
 
         px, py, dz = self.project_to_plane(vertices, res=im.shape[:2])
        
-        lines(im, px, py, color.reshape(1,4), 24)
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(im, px, py, color.reshape(1,4), 24, flip=1)
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.
@@ -748,7 +758,7 @@
                                         image, sampler),
                            info=self.get_information())
 
-        # flip it up/down to handle how the png orientation is donetest.png
+        # flip it up/down to handle how the png orientation is done
         image = image[:,::-1,:]
         self.save_image(image, fn=fn, clip_ratio=clip_ratio, 
                        transparent=transparent)


https://bitbucket.org/yt_analysis/yt/commits/6ac1445dedad/
Changeset:   6ac1445dedad
Branch:      yt
User:        jzuhone
Date:        2014-10-28 14:01:19+00:00
Summary:     Adding a docstring to mazzotta_weighting
Affected #:  1 file

diff -r ffce974e84c91a736575262dede7d78a4cd3a250 -r 6ac1445dedadeae6c19b0a3f30f8c7834df600c1 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -103,6 +103,8 @@
                        units="") # add correct units here
 
     def _mazzotta_weighting(field, data):
+        # Spectroscopic-like weighting field for galaxy clusters
+        # Only useful as a weight_field for temperature, metallicity, velocity
         return data["density"]*data["density"]*data["kT"]**-0.25/mh/mh
 
     registry.add_field((ftype,"mazzotta_weighting"),


https://bitbucket.org/yt_analysis/yt/commits/d6a924747694/
Changeset:   d6a924747694
Branch:      yt
User:        jzuhone
Date:        2014-10-28 16:18:13+00:00
Summary:     Speedups
Affected #:  2 files

diff -r 6ac1445dedadeae6c19b0a3f30f8c7834df600c1 -r d6a924747694e1fd3399feb85cd363fa49c3e893 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -215,7 +215,12 @@
 }
 
 cgs_conversions = {
-    "C":("mks","esu"),
-    "T":("mks","gauss"),
-    "A":("mks","statA"),
-}
\ No newline at end of file
+    "C":"esu",
+    "T":"gauss",
+    "A":"statA",
+}
+
+for conv in cgs_conversions.keys():
+    if conv in prefixable_units:
+        for p in unit_prefixes:
+            cgs_conversions[p+conv] = p+cgs_conversions[conv]

diff -r 6ac1445dedadeae6c19b0a3f30f8c7834df600c1 -r d6a924747694e1fd3399feb85cd363fa49c3e893 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -116,7 +116,7 @@
 
     # Extra attributes
     __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
-                 "registry", "cgs_conversion", "is_mks"]
+                 "registry", "cgs_conversion"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
                 dimensions=None, registry=None, **assumptions):
@@ -208,26 +208,17 @@
         obj.dimensions = dimensions
         obj.registry = registry
 
-        conversions = []
-        is_mks = []
-        for atom in unit_expr.atoms():
-            ua = str(atom)
-            possible_prefix = ua[0]
-            symbol_wo_prefix = ua[1:]
-            if possible_prefix in unit_prefixes and symbol_wo_prefix in prefixable_units:
-                ua = symbol_wo_prefix
-            if ua in cgs_conversions:
-                unit_sys, conv = cgs_conversions[ua]
-                is_mks.append(unit_sys == "mks")
-                conversions.append((atom, symbols(conv)))
-        if len(conversions) > 0:
+        check_atoms = [atom for atom in unit_expr.free_symbols
+                       if str(atom) in cgs_conversions]
+        if len(check_atoms) > 0:
+            conversions = [(atom, symbols(cgs_conversions[str(atom)]))
+                           for atom in check_atoms]
             conversion = unit_expr.subs(conversions)
             conversion = Unit(unit_expr=conversion, cgs_value=1.0,
                                dimensions=None, registry=registry)
         else:
             conversion = None
         obj.cgs_conversion = conversion
-        obj.is_mks = all(is_mks)
 
         if unit_key:
             registry.unit_objs[unit_key] = obj
@@ -414,7 +405,7 @@
         Create and return dimensionally-equivalent mks units.
 
         """
-        if self.cgs_conversion and not self.is_mks:
+        if self.cgs_conversion:
             units = self.cgs_conversion
         else:
             units = self


https://bitbucket.org/yt_analysis/yt/commits/6dbf1ec9ce52/
Changeset:   6dbf1ec9ce52
Branch:      yt
User:        jzuhone
Date:        2014-10-28 16:23:46+00:00
Summary:     Adding tests for prefixed units
Affected #:  1 file

diff -r d6a924747694e1fd3399feb85cd363fa49c3e893 -r 6dbf1ec9ce526119cc0ba9d77aaafd9512f7cc60 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -864,6 +864,10 @@
     qp_mks = qp.in_units("C")
     yield assert_equal, qp_mks.units.dimensions, current_mks*time
     yield assert_array_almost_equal, qp_mks.in_cgs(), qp
+    qp_mks_k = qp_mks.in_units("kC")
+    yield assert_equal, qp_mks.units.dimensions, current_mks*time
+    yield assert_array_almost_equal, qp_mks_k.in_cgs(), qp
+    yield assert_array_almost_equal, qp_mks_k.in_units("kesu"), qp.in_units("kesu")
 
     K = 1.0/(4*np.pi*eps_0)
     yield assert_array_almost_equal, K.in_cgs(), 1.0


https://bitbucket.org/yt_analysis/yt/commits/7cbabd9d4259/
Changeset:   7cbabd9d4259
Branch:      yt
User:        jzuhone
Date:        2014-10-28 18:18:42+00:00
Summary:     Photon simulator bugfix
Affected #:  1 file

diff -r 6dbf1ec9ce526119cc0ba9d77aaafd9512f7cc60 -r 7cbabd9d42596ede7f4cbef87450d362c3cb84e3 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -39,7 +39,7 @@
         self.emax = emax*units.keV
         self.nchan = nchan
         self.ebins = np.linspace(emin, emax, nchan+1)*units.keV
-        self.de = np.diff(self.ebins)*units.keV
+        self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
     def prepare(self):


https://bitbucket.org/yt_analysis/yt/commits/ace74ecc7236/
Changeset:   ace74ecc7236
Branch:      yt
User:        jzuhone
Date:        2014-10-29 00:05:16+00:00
Summary:     is_mks should not have gone away--it's needed to ensure proper mks conversions of mks electrostatic units.
Affected #:  1 file

diff -r 7cbabd9d42596ede7f4cbef87450d362c3cb84e3 -r ace74ecc7236a14a4f1f9dbca2ae3cb44df4e090 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -116,7 +116,7 @@
 
     # Extra attributes
     __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
-                 "registry", "cgs_conversion"]
+                 "registry", "cgs_conversion", "is_mks"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
                 dimensions=None, registry=None, **assumptions):
@@ -211,14 +211,18 @@
         check_atoms = [atom for atom in unit_expr.free_symbols
                        if str(atom) in cgs_conversions]
         if len(check_atoms) > 0:
-            conversions = [(atom, symbols(cgs_conversions[str(atom)]))
-                           for atom in check_atoms]
+            conversions = []
+            for atom in check_atoms:
+                conversions.append((atom,symbols(cgs_conversions[str(atom)])))
             conversion = unit_expr.subs(conversions)
             conversion = Unit(unit_expr=conversion, cgs_value=1.0,
                                dimensions=None, registry=registry)
+            is_mks = True
         else:
             conversion = None
+            is_mks = False
         obj.cgs_conversion = conversion
+        obj.is_mks = is_mks
 
         if unit_key:
             registry.unit_objs[unit_key] = obj
@@ -405,7 +409,7 @@
         Create and return dimensionally-equivalent mks units.
 
         """
-        if self.cgs_conversion:
+        if self.cgs_conversion and not self.is_mks:
             units = self.cgs_conversion
         else:
             units = self


https://bitbucket.org/yt_analysis/yt/commits/9138f1e015e8/
Changeset:   9138f1e015e8
Branch:      yt
User:        jzuhone
Date:        2014-10-29 04:00:34+00:00
Summary:     Effective temperature equivalence
Affected #:  2 files

diff -r ace74ecc7236a14a4f1f9dbca2ae3cb44df4e090 -r 9138f1e015e88df57929552ebf1c7a3a54ee0033 yt/units/equivalencies.py
--- a/yt/units/equivalencies.py
+++ b/yt/units/equivalencies.py
@@ -13,7 +13,7 @@
 
 import yt.utilities.physical_constants as pc
 from yt.units.dimensions import temperature, mass, energy, length, rate, \
-    velocity, dimensionless, density, number_density
+    velocity, dimensionless, density, number_density, flux
 from yt.extern.six import add_metaclass
 import numpy as np
 
@@ -39,6 +39,9 @@
         elif new_dims == density:
             return x*mu*pc.mh
 
+    def __str__(self):
+        return "number density: density <-> number density"
+
 class ThermalEquivalence(Equivalence):
     _type_name = "thermal"
     dims = (temperature,energy,)
@@ -148,3 +151,16 @@
     def __str__(self):
         return "compton: mass <-> length"
 
+class EffectiveTemperature(Equivalence):
+    _type_name = "effective_temperature"
+    dims = (flux,temperature,)
+
+    def convert(self, x, new_dims):
+        if new_dims == flux:
+            return pc.stefan_boltzmann_constant_cgs*x**4
+        elif new_dims == temperature:
+            return (x/pc.stefan_boltzmann_constant_cgs)**0.25
+
+    def __str__(self):
+        return "effective_temperature: flux <-> temperature"
+

diff -r ace74ecc7236a14a4f1f9dbca2ae3cb44df4e090 -r 9138f1e015e88df57929552ebf1c7a3a54ee0033 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -28,7 +28,8 @@
     assert_array_equal, \
     assert_equal, assert_raises, \
     assert_array_almost_equal_nulp, \
-    assert_array_almost_equal
+    assert_array_almost_equal, \
+    assert_allclose
 from numpy import array
 from yt.units.yt_array import \
     YTArray, YTQuantity, \
@@ -880,20 +881,21 @@
 
 def test_equivalencies():
     from yt.utilities.physical_constants import clight, mp, kboltz, hcgs, mh, me, \
-        mass_sun_cgs, G
+        mass_sun_cgs, G, stefan_boltzmann_constant_cgs
+    import yt.units as u
 
     # Mass-energy
 
     E = mp.to_equivalent("keV","mass_energy")
     yield assert_equal, E, mp*clight*clight
-    yield assert_array_almost_equal, mp, E.to_equivalent("g", "mass_energy")
+    yield assert_allclose, mp, E.to_equivalent("g", "mass_energy")
 
     # Thermal
 
     T = YTQuantity(1.0e8,"K")
     E = T.to_equivalent("W*hr","thermal")
     yield assert_equal, E, (kboltz*T).in_units("W*hr")
-    yield assert_array_almost_equal, T, E.to_equivalent("K", "thermal")
+    yield assert_allclose, T, E.to_equivalent("K", "thermal")
 
     # Spectral
 
@@ -902,11 +904,11 @@
     yield assert_equal, nu, clight/l
     E = hcgs*nu
     l2 = E.to_equivalent("angstrom", "spectral")
-    yield assert_array_almost_equal, l, l2
+    yield assert_allclose, l, l2
     nu2 = clight/l2.in_units("cm")
-    yield assert_array_almost_equal, nu, nu2
+    yield assert_allclose, nu, nu2
     E2 = nu2.to_equivalent("keV", "spectral")
-    yield assert_array_almost_equal, E2, E.in_units("keV")
+    yield assert_allclose, E2, E.in_units("keV")
 
     # Sound-speed
 
@@ -914,32 +916,52 @@
     gg = 5./3.
     c_s = T.to_equivalent("km/s","sound_speed")
     yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
-    yield assert_array_almost_equal, T, c_s.to_equivalent("K","sound_speed")
+    yield assert_allclose, T, c_s.to_equivalent("K","sound_speed")
 
     mu = 0.5
     gg = 4./3.
     c_s = T.to_equivalent("km/s","sound_speed", mu=mu, gamma=gg)
     yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
-    yield assert_array_almost_equal, T, c_s.to_equivalent("K","sound_speed",
-                                                          mu=mu, gamma=gg)
+    yield assert_allclose, T, c_s.to_equivalent("K","sound_speed",
+                                                    mu=mu, gamma=gg)
 
     # Lorentz
 
     v = 0.8*clight
     g = v.to_equivalent("dimensionless","lorentz")
     g2 = YTQuantity(1./np.sqrt(1.-0.8*0.8), "dimensionless")
-    yield assert_array_almost_equal, g, g2
+    yield assert_allclose, g, g2
     v2 = g2.to_equivalent("mile/hr", "lorentz")
-    yield assert_array_almost_equal, v2, v.in_units("mile/hr")
+    yield assert_allclose, v2, v.in_units("mile/hr")
 
     # Schwarzschild
 
     R = mass_sun_cgs.to_equivalent("kpc","schwarzschild")
     yield assert_equal, R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight)
-    yield assert_array_almost_equal, mass_sun_cgs, R.to_equivalent("g", "schwarzschild")
+    yield assert_allclose, mass_sun_cgs, R.to_equivalent("g", "schwarzschild")
 
     # Compton
 
     l = me.to_equivalent("angstrom","compton")
     yield assert_equal, l, hcgs/(me*clight)
-    yield assert_array_almost_equal, mp, l.to_equivalent("g", "compton")
+    yield assert_allclose, me, l.to_equivalent("g", "compton")
+
+    # Number density
+
+    rho = mp/u.cm**3
+
+    n = rho.to_equivalent("cm**-3","number_density")
+    yield assert_equal, n, rho/(mh*0.6)
+    yield assert_allclose, rho, n.to_equivalent("g/cm**3","number_density")
+
+    n = rho.to_equivalent("cm**-3","number_density", mu=0.75)
+    yield assert_equal, n, rho/(mh*0.75)
+    yield assert_allclose, rho, n.to_equivalent("g/cm**3","number_density", mu=0.75)
+
+    # Effective temperature
+
+    T = YTQuantity(1.0e4, "K")
+    F = T.to_equivalent("erg/s/cm**2","effective_temperature")
+    yield assert_equal, F, stefan_boltzmann_constant_cgs*T**4
+    yield assert_allclose, T, F.to_equivalent("K", "effective_temperature")
+


https://bitbucket.org/yt_analysis/yt/commits/baef8f431468/
Changeset:   baef8f431468
Branch:      yt
User:        jzuhone
Date:        2014-10-29 04:01:06+00:00
Summary:     Updating docs, making clear limitations of certain equivalencies
Affected #:  1 file

diff -r 9138f1e015e88df57929552ebf1c7a3a54ee0033 -r baef8f431468e6fbb1b2d3c5c22c1464d6ecfbda doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:540508ad9cef9e81722ee4a6d7ed148df117fde2467b22dcf856cb6a4c01b00f"
+  "signature": "sha256:cee652d703dd3369d81ebc670882d3734f73d0274aab98823a784d8039355480"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -17,11 +17,15 @@
       "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n",
       "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",
       "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n",
-      "* `\"sound_speed\"`: conversions between temperature and sound speed for an ideal gas ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
       "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)\n",
       "* `\"schwarzschild\"`: conversions between mass and Schwarzschild radius ($R_S = 2GM/c^2$)\n",
       "* `\"compton\"`: conversions between mass and Compton wavelength ($\\lambda = h/mc$)\n",
       "\n",
+      "The following unit equivalencies only apply under conditions applicable for an ideal gas with a constant mean molecular weight $\\mu$ and ratio of specific heats $\\gamma$:\n",
+      "\n",
+      "* `\"number_density\"`: conversions between density and number density ($n = \\rho/\\mu{m_p}$)\n",
+      "* `\"sound_speed\"`: conversions between temperature and sound speed for an ideal gas ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
+      "\n",
       "A `YTArray` or `YTQuantity` can be converted to an equivalent using `to_equivalent`, where the unit and the equivalence name are provided as arguments:"
      ]
     },
@@ -70,17 +74,27 @@
      "outputs": []
     },
     {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Special Equivalencies"
+     ]
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Since the sound speed for an ideal gas also depends on the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$, these may be optionally supplied to the `to_equivalent` call (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):"
+      "Some equivalencies can take supplemental information. The `\"number_density\"` equivalence can take a custom mean molecular weight (default is $\\mu = 0.6$):"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\", gamma=4./3., mu=0.5)"
+      "print dd[\"density\"].max()\n",
+      "print dd[\"density\"].to_equivalent(\"cm**-3\", \"number_density\").max()\n",
+      "print dd[\"density\"].to_equivalent(\"cm**-3\", \"number_density\", mu=0.75).max()"
      ],
      "language": "python",
      "metadata": {},
@@ -90,6 +104,39 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "The `\"sound_speed\"` equivalence optionally takes the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$ (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\").mean()\n",
+      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\", gamma=4./3., mu=0.5).mean()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "These options must be used with caution, and only if you know the underlying data adheres to these assumptions!"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Determining Valid Equivalencies"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "If a certain equivalence does not exist for a particular unit, then an error will be thrown:"
      ]
     },


https://bitbucket.org/yt_analysis/yt/commits/7953b98ce611/
Changeset:   7953b98ce611
Branch:      yt
User:        jzuhone
Date:        2014-10-29 14:18:55+00:00
Summary:     Merge
Affected #:  3 files

diff -r baef8f431468e6fbb1b2d3c5c22c1464d6ecfbda -r 7953b98ce611c9f58896f56f85440bd98e13aec5 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -98,7 +98,12 @@
 
 import yt.utilities.physical_constants as physical_constants
 import yt.units as units
-from yt.units.yt_array import YTArray, YTQuantity
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity, \
+    uconcatenate, \
+    uintersect1d, \
+    uunion1d
 
 from yt.fields.api import \
     field_plugins, \

diff -r baef8f431468e6fbb1b2d3c5c22c1464d6ecfbda -r 7953b98ce611c9f58896f56f85440bd98e13aec5 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -33,7 +33,9 @@
 from numpy import array
 from yt.units.yt_array import \
     YTArray, YTQuantity, \
-    unary_operators, binary_operators
+    unary_operators, binary_operators, \
+    uconcatenate, uintersect1d, \
+    uunion1d
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import fake_random_ds, requires_module
@@ -965,3 +967,21 @@
     yield assert_equal, F, stefan_boltzmann_constant_cgs*T**4
     yield assert_allclose, T, F.to_equivalent("K", "effective_temperature")
 
+
+def test_numpy_wrappers():
+    a1 = YTArray([1, 2, 3], 'cm')
+    a2 = YTArray([2, 3, 4, 5, 6], 'cm')
+    catenate_answer = [1, 2, 3, 2, 3, 4, 5, 6]
+    intersect_answer = [2, 3]
+    union_answer = [1, 2, 3, 4, 5, 6]
+
+    yield (assert_array_equal, YTArray(catenate_answer, 'cm'),
+           uconcatenate((a1, a2)))
+    yield assert_array_equal, catenate_answer, np.concatenate((a1, a2))
+
+    yield (assert_array_equal, YTArray(intersect_answer, 'cm'),
+           uintersect1d(a1, a2))
+    yield assert_array_equal, intersect_answer, np.intersect1d(a1, a2)
+
+    yield assert_array_equal, YTArray(union_answer, 'cm'), uunion1d(a1, a2)
+    yield assert_array_equal, union_answer, np.union1d(a1, a2)

diff -r baef8f431468e6fbb1b2d3c5c22c1464d6ecfbda -r 7953b98ce611c9f58896f56f85440bd98e13aec5 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -40,6 +40,8 @@
 from sympy import Rational
 from yt.units.unit_lookup_table import unit_prefixes, prefixable_units
 
+NULL_UNIT = Unit()
+
 # redefine this here to avoid a circular import from yt.funcs
 def iterable(obj):
     try: len(obj)
@@ -85,13 +87,11 @@
     return None
 
 def arctan2_unit(unit1, unit2):
-    return Unit()
+    return NULL_UNIT
 
 def comparison_unit(unit1, unit2):
     return None
 
-NULL_UNIT = Unit()
-
 def coerce_iterable_units(input_object):
     if isinstance(input_object, np.ndarray):
         return input_object
@@ -341,7 +341,7 @@
         """
         if obj is None and hasattr(self, 'units'):
             return
-        self.units = getattr(obj, 'units', None)
+        self.units = getattr(obj, 'units', NULL_UNIT)
 
     def __repr__(self):
         """
@@ -988,7 +988,7 @@
         elif context[0] in unary_operators:
             u = getattr(context[1][0], 'units', None)
             if u is None:
-                u = Unit()
+                u = NULL_UNIT
             unit = self._ufunc_registry[context[0]](u)
             ret_class = type(self)
         elif context[0] in binary_operators:
@@ -1143,8 +1143,7 @@
     def __repr__(self):
         return str(self)
 
-def uconcatenate(arrs, *args, **kwargs):
-    v = np.concatenate(arrs, *args, **kwargs)
+def validate_numpy_wrapper_units(v, arrs):
     if not any(isinstance(a, YTArray) for a in arrs):
         return v
     if not all(isinstance(a, YTArray) for a in arrs):
@@ -1155,6 +1154,63 @@
     v.units = a1.units
     return v
 
+def uconcatenate(arrs, axis=0):
+    """Concatenate a sequence of arrays.
+
+    This wrapper around numpy.concatenate preserves units. All input arrays must
+    have the same units.  See the documentation of numpy.concatenate for full
+    details.
+
+    Examples
+    --------
+    >>> A = yt.YTArray([1, 2, 3], 'cm')
+    >>> B = yt.YTArray([2, 3, 4], 'cm')
+    >>> uconcatenate((A, B))
+    YTArray([ 1., 2., 3., 2., 3., 4.]) cm
+
+    """
+    v = np.concatenate(arrs, axis=axis)
+    v = validate_numpy_wrapper_units(v, arrs)
+    return v
+
+def uintersect1d(arr1, arr2, assume_unique=False):
+    """Find the sorted unique elements of the two input arrays.
+
+    A wrapper around numpy.intersect1d that preserves units.  All input arrays
+    must have the same units.  See the documentation of numpy.intersect1d for
+    full details.
+
+    Examples
+    --------
+    >>> A = yt.YTArray([1, 2, 3], 'cm')
+    >>> B = yt.YTArray([2, 3, 4], 'cm')
+    >>> uintersect1d(A, B)
+    YTArray([ 2., 3.]) cm
+
+    """
+    v = np.intersect1d(arr1, arr2, assume_unique=assume_unique)
+    v = validate_numpy_wrapper_units(v, [arr1, arr2])
+    return v
+
+def uunion1d(arr1, arr2):
+    """Find the union of two arrays.
+
+    A wrapper around numpy.intersect1d that preserves units.  All input arrays
+    must have the same units.  See the documentation of numpy.intersect1d for
+    full details.
+
+    Examples
+    --------
+    >>> A = yt.YTArray([1, 2, 3], 'cm')
+    >>> B = yt.YTArray([2, 3, 4], 'cm')
+    >>> uunion1d(A, B)
+    YTArray([ 1., 2., 3., 4.]) cm
+
+    """
+    v = np.union1d(arr1, arr2)
+    v = validate_numpy_wrapper_units(v, [arr1, arr2])
+    return v
+
 def array_like_field(data, x, field):
     field = data._determine_fields(field)[0]
     if isinstance(field, tuple):


https://bitbucket.org/yt_analysis/yt/commits/1e0faa5cb0fc/
Changeset:   1e0faa5cb0fc
Branch:      yt
User:        MatthewTurk
Date:        2014-11-03 16:53:07+00:00
Summary:     Merged in jzuhone/yt-3.x (pull request #1286)

Conversions for different electromagnetic unit systems, Unit equivalencies
Affected #:  16 files

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2faff88abc93fe2bc9d91467db786a8b69ec3ece6783a7055942ecc7c47a0817"
+  "signature": "sha256:c7cfb2db456d127bb633b7eee7ad6fe14290aa622ac62694c7840d80137afaba"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -36,7 +36,7 @@
      "input": [
       "import yt\n",
       "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
-      "          \n",
+      "\n",
       "dd = ds.all_data()\n",
       "maxval, maxloc = ds.find_max('density')\n",
       "\n",
@@ -222,6 +222,69 @@
      "level": 3,
      "metadata": {},
      "source": [
+      "Electrostatic/Electromagnetic Units"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Electromagnetic units can be a bit tricky, because the units for such quantities in different unit systems can have entirely different dimensions, even if they are meant to represent the same physical quantities. For example, in the SI system of units, current in Amperes is a fundamental unit of measure, so the unit of charge \"coulomb\" is equal to one ampere-second. On the other hand, in the Gaussian/CGS system, there is no equivalent base electromagnetic unit, and the electrostatic charge unit \"esu\" is equal to one $\\mathrm{cm^{3/2}g^{-1/2}s^{-1}}$ (which does not have any apparent physical significance). `yt` recognizes this difference:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "q1 = yt.YTArray(1.0,\"C\") # Coulombs\n",
+      "q2 = yt.YTArray(1.0,\"esu\") # electrostatic units / statcoulomb\n",
+      "\n",
+      "print \"units =\", q1.in_mks().units, \", dims =\", q1.units.dimensions\n",
+      "print \"units =\", q2.in_cgs().units, \", dims =\", q2.units.dimensions"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Under the hood, the `yt` units system has a translation layer that converts between these two systems, without any further effort required. For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import elementary_charge\n",
+      "\n",
+      "print elementary_charge\n",
+      "elementary_charge_C = elementary_charge.in_units(\"C\")\n",
+      "print elementary_charge_C"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The electromagnetic unit translations `yt` understands are:\n",
+      "\n",
+      "* Charge: 1 coulomb (C) $\\leftrightarrow$ 0.1c electrostatic unit (esu, Fr)\n",
+      "* Current: 1 ampere (A, C/s) $\\leftrightarrow$ 0.1c statampere (statA, esu/s, Fr) \n",
+      "* Magnetic Field: 1 tesla (T) $\\leftrightarrow 10^4$ gauss (G)\n",
+      "\n",
+      "where \"Fr\" is the franklin, an alternative name for the electrostatic unit, and c is the speed of light. "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
       "Working with views and converting to ndarray"
      ]
     },
@@ -324,10 +387,9 @@
      "collapsed": false,
      "input": [
       "from astropy import units as u\n",
-      "from yt import YTQuantity, YTArray\n",
       "\n",
       "x = 42.0 * u.meter\n",
-      "y = YTQuantity.from_astropy(x) "
+      "y = yt.YTQuantity.from_astropy(x) "
      ],
      "language": "python",
      "metadata": {},
@@ -349,7 +411,7 @@
      "collapsed": false,
      "input": [
       "a = np.random.random(size=10) * u.km/u.s\n",
-      "b = YTArray.from_astropy(a)"
+      "b = yt.YTArray.from_astropy(a)"
      ],
      "language": "python",
      "metadata": {},
@@ -436,7 +498,7 @@
      "collapsed": false,
      "input": [
       "k1 = kboltz.to_astropy()\n",
-      "k2 = YTQuantity.from_astropy(kb)\n",
+      "k2 = yt.YTQuantity.from_astropy(kb)\n",
       "print k1 == k2"
      ],
      "language": "python",
@@ -447,7 +509,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "c = YTArray.from_astropy(a)\n",
+      "c = yt.YTArray.from_astropy(a)\n",
       "d = c.to_astropy()\n",
       "print a == d"
      ],

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- /dev/null
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -0,0 +1,179 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:cee652d703dd3369d81ebc670882d3734f73d0274aab98823a784d8039355480"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://docs.astropy.org/en/latest/units/equivalencies.html)). The possible unit equivalencies are:\n",
+      "\n",
+      "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n",
+      "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",
+      "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n",
+      "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)\n",
+      "* `\"schwarzschild\"`: conversions between mass and Schwarzschild radius ($R_S = 2GM/c^2$)\n",
+      "* `\"compton\"`: conversions between mass and Compton wavelength ($\\lambda = h/mc$)\n",
+      "\n",
+      "The following unit equivalencies only apply under conditions applicable for an ideal gas with a constant mean molecular weight $\\mu$ and ratio of specific heats $\\gamma$:\n",
+      "\n",
+      "* `\"number_density\"`: conversions between density and number density ($n = \\rho/\\mu{m_p}$)\n",
+      "* `\"sound_speed\"`: conversions between temperature and sound speed for an ideal gas ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
+      "\n",
+      "A `YTArray` or `YTQuantity` can be converted to an equivalent using `to_equivalent`, where the unit and the equivalence name are provided as arguments:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "import numpy as np\n",
+      "\n",
+      "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
+      "\n",
+      "dd = ds.all_data()\n",
+      "\n",
+      "print dd[\"temperature\"].to_equivalent(\"erg\", \"thermal\")\n",
+      "print dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\")\n",
+      "\n",
+      "# Rest energy of the proton\n",
+      "from yt.utilities.physical_constants import mp\n",
+      "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
+      "print E_p"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Equivalencies can go in both directions, without any information required other than the unit you want to convert to:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import clight\n",
+      "v = 0.1*clight\n",
+      "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
+      "print g\n",
+      "print g.to_equivalent(\"c\", \"lorentz\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Special Equivalencies"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some equivalencies can take supplemental information. The `\"number_density\"` equivalence can take a custom mean molecular weight (default is $\\mu = 0.6$):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd[\"density\"].max()\n",
+      "print dd[\"density\"].to_equivalent(\"cm**-3\", \"number_density\").max()\n",
+      "print dd[\"density\"].to_equivalent(\"cm**-3\", \"number_density\", mu=0.75).max()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `\"sound_speed\"` equivalence optionally takes the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$ (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\").mean()\n",
+      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\", gamma=4./3., mu=0.5).mean()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "These options must be used with caution, and only if you know the underlying data adheres to these assumptions!"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Determining Valid Equivalencies"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If a certain equivalence does not exist for a particular unit, then an error will be thrown:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.exceptions import YTInvalidUnitEquivalence\n",
+      "\n",
+      "try:\n",
+      "    x = v.to_equivalent(\"angstrom\", \"spectral\")\n",
+      "except YTInvalidUnitEquivalence as e:\n",
+      "    print e"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To list the equivalencies available for a given `YTArray` or `YTQuantity`, use the `list_equivalencies` method:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "E_p.list_equivalencies()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -33,6 +33,7 @@
    comoving_units_and_code_units
    comparing_units_from_different_datasets
    units_and_plotting
+   unit_equivalencies
 
 .. note::
 

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb doc/source/analyzing/units/unit_equivalencies.rst
--- /dev/null
+++ b/doc/source/analyzing/units/unit_equivalencies.rst
@@ -0,0 +1,7 @@
+.. _symbolic_units:
+
+Symbolic units: :code:`yt.units`
+================================
+
+.. notebook:: 6)_Unit_Equivalencies.ipynb
+   :skip_exceptions:

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -39,7 +39,7 @@
         self.emax = emax*units.keV
         self.nchan = nchan
         self.ebins = np.linspace(emin, emax, nchan+1)*units.keV
-        self.de = np.diff(self.ebins)*units.keV
+        self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
     def prepare(self):

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -102,6 +102,15 @@
                        function=_xray_emissivity,
                        units="") # add correct units here
 
+    def _mazzotta_weighting(field, data):
+        # Spectroscopic-like weighting field for galaxy clusters
+        # Only useful as a weight_field for temperature, metallicity, velocity
+        return data["density"]*data["density"]*data["kT"]**-0.25/mh/mh
+
+    registry.add_field((ftype,"mazzotta_weighting"),
+                       function=_mazzotta_weighting,
+                       units="keV**-0.25*cm**-6")
+    
     def _sz_kinetic(field, data):
         scale = 0.88 * sigma_thompson / mh / clight
         vel_axis = data.get_field_parameter("axis")

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -19,9 +19,11 @@
 time = Symbol("(time)", positive=True)
 temperature = Symbol("(temperature)", positive=True)
 angle = Symbol("(angle)", positive=True)
+current_mks = Symbol("(current_mks)", positive=True)
 dimensionless = sympify(1)
 
-base_dimensions = [mass, length, time, temperature, angle, dimensionless]
+base_dimensions = [mass, length, time, temperature, angle, current_mks,
+                   dimensionless]
 
 #
 # Derived dimensions
@@ -44,16 +46,31 @@
 power    = energy / time
 flux     = power / area
 specific_flux = flux / rate
-charge   = (energy * length)**Rational(1, 2)  # proper 1/2 power
+number_density = 1/(length*length*length)
+density = mass * number_density
 
-electric_field = charge / length**2
-magnetic_field = electric_field
+# Gaussian electromagnetic units
+charge_cgs  = (energy * length)**Rational(1, 2)  # proper 1/2 power
+current_cgs = charge_cgs / time
+electric_field_cgs = charge_cgs / length**2
+magnetic_field_cgs = electric_field_cgs
+
+# SI electromagnetic units
+charge_mks = current_mks * time
+electric_field_mks = force / charge_mks
+magnetic_field_mks = electric_field_mks / velocity
+
+# Since cgs is our default, I'm adding these aliases for backwards-compatibility
+charge = charge_cgs
+electric_field = electric_field_cgs
+magnetic_field = magnetic_field_cgs
 
 solid_angle = angle * angle
 
 derived_dimensions = [rate, velocity, acceleration, jerk, snap, crackle, pop, 
-                      momentum, force, energy, power, charge, electric_field, 
-                      magnetic_field, solid_angle, flux, specific_flux, volume,
-                      area]
+                      momentum, force, energy, power, charge_cgs, electric_field_cgs,
+                      magnetic_field_cgs, solid_angle, flux, specific_flux, volume,
+                      area, current_cgs, charge_mks, electric_field_mks,
+                      magnetic_field_mks]
 
 dimensions = base_dimensions + derived_dimensions

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/units/equivalencies.py
--- /dev/null
+++ b/yt/units/equivalencies.py
@@ -0,0 +1,166 @@
+"""
+Equivalencies between different kinds of units
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import yt.utilities.physical_constants as pc
+from yt.units.dimensions import temperature, mass, energy, length, rate, \
+    velocity, dimensionless, density, number_density, flux
+from yt.extern.six import add_metaclass
+import numpy as np
+
+equivalence_registry = {}
+
+class RegisteredEquivalence(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_type_name") and not cls._skip_add:
+            equivalence_registry[cls._type_name] = cls
+
+ at add_metaclass(RegisteredEquivalence)
+class Equivalence(object):
+    _skip_add = False
+
+class NumberDensityEquivalence(Equivalence):
+    _type_name = "number_density"
+    dims = (density,number_density,)
+
+    def convert(self, x, new_dims, mu=0.6):
+        if new_dims == number_density:
+            return x/(mu*pc.mh)
+        elif new_dims == density:
+            return x*mu*pc.mh
+
+    def __str__(self):
+        return "number density: density <-> number density"
+
+class ThermalEquivalence(Equivalence):
+    _type_name = "thermal"
+    dims = (temperature,energy,)
+
+    def convert(self, x, new_dims):
+        if new_dims == energy:
+            return pc.kboltz*x
+        elif new_dims == temperature:
+            return x/pc.kboltz
+
+    def __str__(self):
+        return "thermal: temperature <-> energy"
+
+class MassEnergyEquivalence(Equivalence):
+    _type_name = "mass_energy"
+    dims = (mass,energy,)
+
+    def convert(self, x, new_dims):
+        if new_dims == energy:
+            return x*pc.clight*pc.clight
+        elif new_dims == mass:
+            return x/(pc.clight*pc.clight)
+
+    def __str__(self):
+        return "mass_energy: mass <-> energy"
+
+class SpectralEquivalence(Equivalence):
+    _type_name = "spectral"
+    dims = (length,rate,energy,)
+
+    def convert(self, x, new_dims):
+        if new_dims == energy:
+            if x.units.dimensions == length:
+                nu = pc.clight/x
+            elif x.units.dimensions == rate:
+                nu = x
+            return pc.hcgs*nu
+        elif new_dims == length:
+            if x.units.dimensions == rate:
+                return pc.clight/x
+            elif x.units.dimensions == energy:
+                return pc.hcgs*pc.clight/x
+        elif new_dims == rate:
+            if x.units.dimensions == length:
+                return pc.clight/x
+            elif x.units.dimensions == energy:
+                return x/pc.hcgs
+
+    def __str__(self):
+        return "spectral: length <-> rate <-> energy"
+
+class SoundSpeedEquivalence(Equivalence):
+    _type_name = "sound_speed"
+    dims = (velocity,temperature,energy,)
+
+    def convert(self, x, new_dims, mu=0.6, gamma=5./3.):
+        if new_dims == velocity:
+            if x.units.dimensions == temperature:
+                kT = pc.kboltz*x
+            elif x.units.dimensions == energy:
+                kT = x
+            return np.sqrt(gamma*kT/(mu*pc.mh))
+        else:
+            kT = x*x*mu*pc.mh/gamma
+            if new_dims == temperature:
+                return kT/pc.kboltz
+            else:
+                return kT
+
+    def __str__(self):
+        return "sound_speed (ideal gas): velocity <-> temperature <-> energy"
+
+class LorentzEquivalence(Equivalence):
+    _type_name = "lorentz"
+    dims = (dimensionless,velocity,)
+
+    def convert(self, x, new_dims):
+        if new_dims == dimensionless:
+            beta = x.in_cgs()/pc.clight
+            return 1./np.sqrt(1.-beta**2)
+        elif new_dims == velocity:
+            return pc.clight*np.sqrt(1.-1./(x*x))
+
+    def __str__(self):
+        return "lorentz: velocity <-> dimensionless"
+
+class SchwarzschildEquivalence(Equivalence):
+    _type_name = "schwarzschild"
+    dims = (mass,length,)
+
+    def convert(self, x, new_dims):
+        if new_dims == length:
+            return 2.*pc.G*x/(pc.clight*pc.clight)
+        elif new_dims == mass:
+            return 0.5*x*pc.clight*pc.clight/pc.G
+
+    def __str__(self):
+        return "schwarzschild: mass <-> length"
+
+class ComptonEquivalence(Equivalence):
+    _type_name = "compton"
+    dims = (mass,length,)
+
+    def convert(self, x, new_dims):
+        return pc.hcgs/(x*pc.clight)
+
+    def __str__(self):
+        return "compton: mass <-> length"
+
+class EffectiveTemperature(Equivalence):
+    _type_name = "effective_temperature"
+    dims = (flux,temperature,)
+
+    def convert(self, x, new_dims):
+        if new_dims == flux:
+            return pc.stefan_boltzmann_constant_cgs*x**4
+        elif new_dims == temperature:
+            return (x/pc.stefan_boltzmann_constant_cgs)**0.25
+
+    def __str__(self):
+        return "effective_temperature: flux <-> temperature"
+

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -28,7 +28,8 @@
     assert_array_equal, \
     assert_equal, assert_raises, \
     assert_array_almost_equal_nulp, \
-    assert_array_almost_equal
+    assert_array_almost_equal, \
+    assert_allclose
 from numpy import array
 from yt.units.yt_array import \
     YTArray, YTQuantity, \
@@ -471,21 +472,21 @@
 
     km = YTQuantity(1, 'km')
     balmy = YTQuantity(300, 'K')
-    balmy_F = YTQuantity(80.33, 'F')
-    balmy_C = YTQuantity(26.85, 'C')
+    balmy_F = YTQuantity(80.33, 'degF')
+    balmy_C = YTQuantity(26.85, 'degC')
     balmy_R = YTQuantity(540, 'R')
 
-    assert_array_almost_equal(balmy.in_units('F'), balmy_F)
-    assert_array_almost_equal(balmy.in_units('C'), balmy_C)
+    assert_array_almost_equal(balmy.in_units('degF'), balmy_F)
+    assert_array_almost_equal(balmy.in_units('degC'), balmy_C)
     assert_array_almost_equal(balmy.in_units('R'), balmy_R)
 
     balmy_view = balmy.ndarray_view()
 
-    balmy.convert_to_units('F')
+    balmy.convert_to_units('degF')
     yield assert_true, balmy_view.base is balmy.base
     yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
 
-    balmy.convert_to_units('C')
+    balmy.convert_to_units('degC')
     yield assert_true, balmy_view.base is balmy.base
     yield assert_array_almost_equal, np.array(balmy), np.array(balmy_C)
 
@@ -493,13 +494,13 @@
     yield assert_true, balmy_view.base is balmy.base
     yield assert_array_almost_equal, np.array(balmy), np.array(balmy_R)
 
-    balmy.convert_to_units('F')
+    balmy.convert_to_units('degF')
     yield assert_true, balmy_view.base is balmy.base
     yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)
 
     yield assert_raises, InvalidUnitOperation, np.multiply, balmy, km
 
-    # Does CGS convergion from F to K work?
+    # Does CGS conversion from F to K work?
     yield assert_array_almost_equal, balmy.in_cgs(), YTQuantity(300, 'K')
 
 
@@ -859,6 +860,114 @@
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
+def test_cgs_conversions():
+    from yt.utilities.physical_constants import qp, eps_0, clight
+    from yt.units.dimensions import current_mks, time
+
+    qp_mks = qp.in_units("C")
+    yield assert_equal, qp_mks.units.dimensions, current_mks*time
+    yield assert_array_almost_equal, qp_mks.in_cgs(), qp
+    qp_mks_k = qp_mks.in_units("kC")
+    yield assert_equal, qp_mks.units.dimensions, current_mks*time
+    yield assert_array_almost_equal, qp_mks_k.in_cgs(), qp
+    yield assert_array_almost_equal, qp_mks_k.in_units("kesu"), qp.in_units("kesu")
+
+    K = 1.0/(4*np.pi*eps_0)
+    yield assert_array_almost_equal, K.in_cgs(), 1.0
+
+    B = YTQuantity(1.0, "T")
+    yield assert_array_almost_equal, B.in_units("gauss"), YTQuantity(1.0e4, "gauss")
+
+    I = YTQuantity(1.0, "A")
+    yield assert_array_almost_equal, I.in_units("statA"), YTQuantity(0.1*clight, "statA")
+
+def test_equivalencies():
+    from yt.utilities.physical_constants import clight, mp, kboltz, hcgs, mh, me, \
+        mass_sun_cgs, G, stefan_boltzmann_constant_cgs
+    import yt.units as u
+
+    # Mass-energy
+
+    E = mp.to_equivalent("keV","mass_energy")
+    yield assert_equal, E, mp*clight*clight
+    yield assert_allclose, mp, E.to_equivalent("g", "mass_energy")
+
+    # Thermal
+
+    T = YTQuantity(1.0e8,"K")
+    E = T.to_equivalent("W*hr","thermal")
+    yield assert_equal, E, (kboltz*T).in_units("W*hr")
+    yield assert_allclose, T, E.to_equivalent("K", "thermal")
+
+    # Spectral
+
+    l = YTQuantity(4000.,"angstrom")
+    nu = l.to_equivalent("Hz","spectral")
+    yield assert_equal, nu, clight/l
+    E = hcgs*nu
+    l2 = E.to_equivalent("angstrom", "spectral")
+    yield assert_allclose, l, l2
+    nu2 = clight/l2.in_units("cm")
+    yield assert_allclose, nu, nu2
+    E2 = nu2.to_equivalent("keV", "spectral")
+    yield assert_allclose, E2, E.in_units("keV")
+
+    # Sound-speed
+
+    mu = 0.6
+    gg = 5./3.
+    c_s = T.to_equivalent("km/s","sound_speed")
+    yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
+    yield assert_allclose, T, c_s.to_equivalent("K","sound_speed")
+
+    mu = 0.5
+    gg = 4./3.
+    c_s = T.to_equivalent("km/s","sound_speed", mu=mu, gamma=gg)
+    yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
+    yield assert_allclose, T, c_s.to_equivalent("K","sound_speed",
+                                                    mu=mu, gamma=gg)
+
+    # Lorentz
+
+    v = 0.8*clight
+    g = v.to_equivalent("dimensionless","lorentz")
+    g2 = YTQuantity(1./np.sqrt(1.-0.8*0.8), "dimensionless")
+    yield assert_allclose, g, g2
+    v2 = g2.to_equivalent("mile/hr", "lorentz")
+    yield assert_allclose, v2, v.in_units("mile/hr")
+
+    # Schwarzschild
+
+    R = mass_sun_cgs.to_equivalent("kpc","schwarzschild")
+    yield assert_equal, R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight)
+    yield assert_allclose, mass_sun_cgs, R.to_equivalent("g", "schwarzschild")
+
+    # Compton
+
+    l = me.to_equivalent("angstrom","compton")
+    yield assert_equal, l, hcgs/(me*clight)
+    yield assert_allclose, me, l.to_equivalent("g", "compton")
+
+    # Number density
+
+    rho = mp/u.cm**3
+
+    n = rho.to_equivalent("cm**-3","number_density")
+    yield assert_equal, n, rho/(mh*0.6)
+    yield assert_allclose, rho, n.to_equivalent("g/cm**3","number_density")
+
+    n = rho.to_equivalent("cm**-3","number_density", mu=0.75)
+    yield assert_equal, n, rho/(mh*0.75)
+    yield assert_allclose, rho, n.to_equivalent("g/cm**3","number_density", mu=0.75)
+
+    # Effective temperature
+
+    T = YTQuantity(1.0e4, "K")
+    F = T.to_equivalent("erg/s/cm**2","effective_temperature")
+    yield assert_equal, F, stefan_boltzmann_constant_cgs*T**4
+    yield assert_allclose, T, F.to_equivalent("K", "effective_temperature")
+
+
 def test_numpy_wrappers():
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([2, 3, 4, 5, 6], 'cm')

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -20,7 +20,8 @@
     metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \
     cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \
     boltzmann_constant_erg_per_K, kelvin_per_rankine, \
-    speed_of_light_cm_per_s
+    speed_of_light_cm_per_s, planck_length_cm, planck_charge_esu, \
+    planck_energy_erg, planck_mass_grams, planck_temperature_K, planck_time_s
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -37,20 +38,25 @@
     # other cgs
     "dyne": (1.0, dimensions.force),
     "erg":  (1.0, dimensions.energy),
-    "esu":  (1.0, dimensions.charge),
-    "gauss": (1.0, dimensions.magnetic_field),
-    "C" : (1.0, dimensions.temperature, -273.15),
+    "esu":  (1.0, dimensions.charge_cgs),
+    "gauss": (1.0, dimensions.magnetic_field_cgs),
+    "degC": (1.0, dimensions.temperature, -273.15),
+    "statA": (1.0, dimensions.current_cgs),
 
     # some SI
     "m": (1.0e2, dimensions.length),
     "J": (1.0e7, dimensions.energy),
     "W": (1.0e7, dimensions.power),
     "Hz": (1.0, dimensions.rate),
+    "N": (1.0e5, dimensions.force),
+    "C": (0.1*speed_of_light_cm_per_s, dimensions.charge_mks),
+    "A": (0.1*speed_of_light_cm_per_s, dimensions.current_mks),
+    "T": (1.0e4, dimensions.magnetic_field_mks),
 
     # Imperial units
     "ft": (30.48, dimensions.length),
     "mile": (160934, dimensions.length),
-    "F": (kelvin_per_rankine, dimensions.temperature, -459.67),
+    "degF": (kelvin_per_rankine, dimensions.temperature, -459.67),
     "R": (kelvin_per_rankine, dimensions.temperature),
 
     # dimensionless stuff
@@ -93,13 +99,11 @@
     # misc
     "eV": (erg_per_eV, dimensions.energy),
     "amu": (amu_grams, dimensions.mass),
-    "me": (mass_electron_grams, dimensions.mass),
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "counts": (1.0, dimensions.dimensionless),
-    "kB": (boltzmann_constant_erg_per_K,
-           dimensions.energy/dimensions.temperature),
     "photons": (1.0, dimensions.dimensionless),
+    "me": (mass_electron_grams, dimensions.mass),
 
     # for AstroPy compatibility
     "solMass": (mass_sun_grams, dimensions.mass),
@@ -109,11 +113,19 @@
     "sr": (1.0, dimensions.solid_angle),
     "rad": (1.0, dimensions.solid_angle),
     "deg": (np.pi/180., dimensions.angle),
-    "Fr":  (1.0, dimensions.charge),
-    "G": (1.0, dimensions.magnetic_field),
+    "Fr":  (1.0, dimensions.charge_cgs),
+    "G": (1.0, dimensions.magnetic_field_cgs),
     "d": (1.0, dimensions.time),
     "Angstrom": (cm_per_ang, dimensions.length),
 
+    # Planck units
+    "m_pl": (planck_mass_grams, dimensions.mass),
+    "l_pl": (planck_length_cm, dimensions.length),
+    "t_pl": (planck_time_s, dimensions.time),
+    "T_pl": (planck_temperature_K, dimensions.temperature),
+    "q_pl": (planck_charge_esu, dimensions.charge_cgs),
+    "E_pl": (planck_energy_erg, dimensions.energy),
+
 }
 
 # Add LaTeX representations for units with trivial representations.
@@ -177,7 +189,12 @@
     "Hz",
     "W",
     "gauss",
+    "G",
     "Jy",
+    "N",
+    "T",
+    "A",
+    "C",
 )
 
 cgs_base_units = {
@@ -194,4 +211,16 @@
     dimensions.time:'s',
     dimensions.temperature:'K',
     dimensions.angle:'radian',
+    dimensions.current_mks:'A',
 }
+
+cgs_conversions = {
+    "C":"esu",
+    "T":"gauss",
+    "A":"statA",
+}
+
+for conv in cgs_conversions.keys():
+    if conv in prefixable_units:
+        for p in unit_prefixes:
+            cgs_conversions[p+conv] = p+cgs_conversions[conv]

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -17,7 +17,7 @@
     Pow, Symbol, Integer, \
     Float, Basic, Rational, sqrt
 from sympy.core.numbers import One
-from sympy import sympify, latex
+from sympy import sympify, latex, symbols
 from sympy.parsing.sympy_parser import \
     parse_expr, auto_number, rationalize
 from keyword import iskeyword
@@ -26,7 +26,7 @@
 from yt.units.unit_lookup_table import \
     latex_symbol_lut, unit_prefixes, \
     prefixable_units, cgs_base_units, \
-    mks_base_units
+    mks_base_units, cgs_conversions
 from yt.units.unit_registry import UnitRegistry
 
 import copy
@@ -116,7 +116,7 @@
 
     # Extra attributes
     __slots__ = ["expr", "is_atomic", "cgs_value", "cgs_offset", "dimensions",
-                 "registry"]
+                 "registry", "cgs_conversion", "is_mks"]
 
     def __new__(cls, unit_expr=sympy_one, cgs_value=None, cgs_offset=0.0,
                 dimensions=None, registry=None, **assumptions):
@@ -191,12 +191,11 @@
             validate_dimensions(dimensions)
         else:
             # lookup the unit symbols
-            try:
-                cgs_value, dimensions = \
-                    _get_unit_data_from_expr(unit_expr, registry.lut)
-            except ValueError:
-                cgs_value, dimensions, cgs_offset = \
-                    _get_unit_data_from_expr(unit_expr, registry.lut)
+            unit_data = _get_unit_data_from_expr(unit_expr, registry.lut)
+            cgs_value = unit_data[0]
+            dimensions = unit_data[1]
+            if len(unit_data) == 3:
+                cgs_offset = unit_data[2]
 
         # Create obj with superclass construct.
         obj = Expr.__new__(cls, **assumptions)
@@ -209,6 +208,22 @@
         obj.dimensions = dimensions
         obj.registry = registry
 
+        check_atoms = [atom for atom in unit_expr.free_symbols
+                       if str(atom) in cgs_conversions]
+        if len(check_atoms) > 0:
+            conversions = []
+            for atom in check_atoms:
+                conversions.append((atom,symbols(cgs_conversions[str(atom)])))
+            conversion = unit_expr.subs(conversions)
+            conversion = Unit(unit_expr=conversion, cgs_value=1.0,
+                               dimensions=None, registry=registry)
+            is_mks = True
+        else:
+            conversion = None
+            is_mks = False
+        obj.cgs_conversion = conversion
+        obj.is_mks = is_mks
+
         if unit_key:
             registry.unit_objs[unit_key] = obj
 
@@ -264,7 +279,7 @@
                 cgs_offset = self.cgs_offset
             else:
                 raise InvalidUnitOperation("Quantities with units of Fahrenheit "
-                                           "and Celcius cannot be multiplied.")
+                                           "and Celsius cannot be multiplied.")
 
         return Unit(self.expr * u.expr,
                     cgs_value=(self.cgs_value * u.cgs_value),
@@ -287,7 +302,7 @@
                 cgs_offset = self.cgs_offset
             else:
                 raise InvalidUnitOperation("Quantities with units of Farhenheit "
-                                           "and Celcius cannot be multiplied.")
+                                           "and Celsius cannot be multiplied.")
 
         return Unit(self.expr / u.expr,
                     cgs_value=(self.cgs_value / u.cgs_value),
@@ -307,7 +322,8 @@
                                        "it to a float." % (p, type(p)) )
 
         return Unit(self.expr**p, cgs_value=(self.cgs_value**p),
-                    dimensions=(self.dimensions**p), registry=self.registry)
+                    dimensions=(self.dimensions**p),
+                    registry=self.registry)
 
     def __eq__(self, u):
         """ Test unit equality. """
@@ -340,6 +356,14 @@
 
     def same_dimensions_as(self, other_unit):
         """ Test if dimensions are the same. """
+        first_check = False
+        second_check = False
+        if self.cgs_conversion:
+            first_check = self.cgs_conversion.dimensions / other_unit.dimensions == sympy_one
+        if other_unit.cgs_conversion:
+            second_check = other_unit.cgs_conversion.dimensions / self.dimensions == sympy_one
+        if first_check or second_check:
+            return True
         return (self.dimensions / other_unit.dimensions) == sympy_one
 
     @property
@@ -372,20 +396,28 @@
         Create and return dimensionally-equivalent cgs units.
 
         """
-        units_string = self._get_system_unit_string(cgs_base_units)
+        if self.cgs_conversion:
+            units = self.cgs_conversion
+        else:
+            units = self
+        units_string = units._get_system_unit_string(cgs_base_units)
         return Unit(units_string, cgs_value=1.0,
-                    dimensions=self.dimensions, registry=self.registry)
+                    dimensions=units.dimensions, registry=self.registry)
 
     def get_mks_equivalent(self):
         """
         Create and return dimensionally-equivalent mks units.
 
         """
-        units_string = self._get_system_unit_string(mks_base_units)
-        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent())[0] /
-                     get_conversion_factor(self, Unit(units_string))[0])
+        if self.cgs_conversion and not self.is_mks:
+            units = self.cgs_conversion
+        else:
+            units = self
+        units_string = units._get_system_unit_string(mks_base_units)
+        cgs_value = (get_conversion_factor(units, units.get_cgs_equivalent())[0] /
+                     get_conversion_factor(units, Unit(units_string))[0])
         return Unit(units_string, cgs_value=cgs_value,
-                    dimensions=self.dimensions, registry=self.registry)
+                    dimensions=units.dimensions, registry=self.registry)
 
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)
@@ -430,7 +462,7 @@
             return ratio, ratio*old_units.cgs_offset - new_units.cgs_offset
         else:
             raise InvalidUnitOperation(
-                "Fahrenheit and Celsius are not absoulte temperature scales "
+                "Fahrenheit and Celsius are not absolute temperature scales "
                 "and cannot be used in compound unit symbols.")
 
 #

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/units/unit_symbols.py
--- a/yt/units/unit_symbols.py
+++ b/yt/units/unit_symbols.py
@@ -140,8 +140,8 @@
 MeV = mega_electron_volt = quan(1.0, "MeV")
 GeV = giga_electron_volt = quan(1.0, "GeV")
 amu = atomic_mass_unit = quan(1.0, "amu")
+angstrom = quan(1.0, "angstrom")
 me  = electron_mass = quan(1.0, "me")
-angstrom = quan(1.0, "angstrom")
 
 #
 # Angle units

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -33,10 +33,12 @@
 from yt.units.dimensions import dimensionless
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUnitConversionError, \
-    YTUfuncUnitError, YTIterableUnitCoercionError
+    YTUfuncUnitError, YTIterableUnitCoercionError, \
+    YTInvalidUnitEquivalence
 from numbers import Number as numeric_type
 from yt.utilities.on_demand_imports import _astropy
 from sympy import Rational
+from yt.units.unit_lookup_table import unit_prefixes, prefixable_units
 
 NULL_UNIT = Unit()
 
@@ -458,6 +460,44 @@
         """
         return self.in_units(self.units.get_mks_equivalent())
 
+    def to_equivalent(self, unit, equiv, **kwargs):
+        """
+        Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
+        related by only a constant factor but not in the same units.
+
+        Parameters
+        ----------
+        unit : string
+            The unit that you wish to convert to.
+        equiv : string
+            The equivalence you wish to use. To see which equivalencies are
+            supported for this unitful quantity, try the :method:`list_equivalencies`
+            method.
+
+        Examples
+        --------
+        >>> a = yt.YTArray(1.0e7,"K")
+        >>> a.to_equivalent("keV", "thermal")
+        """
+        from equivalencies import equivalence_registry
+        this_equiv = equivalence_registry[equiv]()
+        old_dims = self.units.dimensions
+        new_dims = YTQuantity(1.0, unit, registry=self.units.registry).units.dimensions
+        if old_dims in this_equiv.dims and new_dims in this_equiv.dims:
+            return this_equiv.convert(self, new_dims, **kwargs).in_units(unit)
+        else:
+            raise YTInvalidUnitEquivalence(equiv, self.units, unit)
+
+    def list_equivalencies(self):
+        """
+        Lists the possible equivalencies associated with this YTArray or
+        YTQuantity.
+        """
+        from equivalencies import equivalence_registry
+        for k,v in equivalence_registry.items():
+            if self.units.dimensions in v.dims:
+                print v()
+
     def ndarray_view(self):
         """
         Returns a view into the array, but as an ndarray rather than ytarray.

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -426,7 +426,6 @@
     def __str__(self):
         return "A file already exists at %s and clobber=False." % self.filename
 
-
 class YTGDFUnknownGeometry(Exception):
     def __init__(self, geometry):
         self.geometry = geometry
@@ -435,4 +434,14 @@
         return '''Unknown geometry %i. Please refer to GDF standard
                   for more information''' % self.geometry
 
+class YTInvalidUnitEquivalence(Exception):
+    def __init__(self, equiv, unit1, unit2):
+        self.equiv = equiv
+        self.unit1 = unit1
+        self.unit2 = unit2
 
+    def __str__(self):
+        return "The unit equivalence '%s' does not exist for the units '%s' and '%s.'" % (self.equiv,
+                                                                                          self.unit1,
+                                                                                          self.unit2)
+

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -1,5 +1,6 @@
 from yt.utilities.physical_ratios import *
 from yt.units.yt_array import YTQuantity
+from math import pi
 
 mass_electron_cgs = YTQuantity(mass_electron_grams, 'g')
 amu_cgs           = YTQuantity(amu_grams, 'g')
@@ -14,6 +15,7 @@
 
 # Charge
 charge_proton_cgs = YTQuantity(4.8032056e-10, 'esu')
+elementary_charge = charge_proton_cgs
 
 # Physical Constants
 boltzmann_constant_cgs = YTQuantity(boltzmann_constant_erg_per_K, 'erg/K')
@@ -35,7 +37,7 @@
 mass_mars_cgs = YTQuantity(mass_mars_grams, 'g')
 mass_saturn_cgs = YTQuantity(mass_saturn_grams, 'g')
 mass_uranus_cgs = YTQuantity(mass_uranus_grams, 'g')
-mass_neptun_cgs = YTQuantity(mass_neptun_grams, 'g')
+mass_neptune_cgs = YTQuantity(mass_neptune_grams, 'g')
 
 #Short cuts
 G = gravitational_constant_cgs
@@ -48,5 +50,17 @@
 kboltz = boltzmann_constant_cgs
 kb = kboltz
 hcgs = planck_constant_cgs
+hbar = 0.5*hcgs/pi
 sigma_thompson = cross_section_thompson_cgs
 Na = 1 / amu_cgs
+
+#Planck units
+m_pl = planck_mass = YTQuantity(planck_mass_grams, "g")
+l_pl = planck_length = YTQuantity(planck_length_cm, "cm")
+t_pl = planck_time = YTQuantity(planck_time_s, "s")
+E_pl = planck_energy = YTQuantity(planck_energy_erg, "erg")
+q_pl = planck_charge = YTQuantity(planck_charge_esu, "esu")
+T_pl = planck_temperature = YTQuantity(planck_temperature_K, "K")
+
+mu_0 = YTQuantity(4.0e-7*pi, "N/A**2")
+eps_0 = (1.0/(clight.in_mks()**2*mu_0)).in_units("C**2/N/m**2")

diff -r c91388a959906a04a5db1e773537e94bcb636a62 -r 1e0faa5cb0fc8a94a358a7d90fd672176db42eeb yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -92,7 +92,7 @@
 mass_mars_grams = mass_sun_grams / 3098708.0
 mass_saturn_grams = mass_sun_grams / 3497.898
 mass_uranus_grams = mass_sun_grams / 22902.98
-mass_neptun_grams = mass_sun_grams / 19412.24
+mass_neptune_grams = mass_sun_grams / 19412.24
 
 # flux
 jansky_cgs = 1.0e-23
@@ -109,3 +109,11 @@
 # Miscellaneous
 HUGE = 1.0e90
 TINY = 1.0e-40
+
+# Planck units
+planck_mass_grams = 2.17650925245e-05
+planck_length_cm = 1.6161992557e-33
+planck_time_s = planck_length_cm / speed_of_light_cm_per_s
+planck_energy_erg = planck_mass_grams * speed_of_light_cm_per_s * speed_of_light_cm_per_s
+planck_temperature_K = planck_energy_erg / boltzmann_constant_erg_per_K
+planck_charge_esu = 5.62274532302e-09

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list