[yt-svn] commit/yt: 131 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Mar 3 07:02:00 PST 2016


131 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/662e0ee3dbdd/
Changeset:   662e0ee3dbdd
Branch:      yt
User:        jzuhone
Date:        2015-11-30 21:39:14+00:00
Summary:     Convert to a set of base units defined by a dict.
Affected #:  2 files

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 662e0ee3dbddac5e124173e7f8ff7d2181727c4c yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -22,6 +22,7 @@
 from sympy.parsing.sympy_parser import \
     parse_expr, auto_number, rationalize
 from keyword import iskeyword
+from yt.units import dimensions
 from yt.units.dimensions import \
     base_dimensions, temperature, \
     dimensionless, current_mks
@@ -405,7 +406,7 @@
                 power_string = "**(%s)" % factor.as_base_exp()[1]
             else:
                 power_string = ""
-            units.append("".join([unit_string, power_string]))
+            units.append("(%s)%s" % (unit_string, power_string))
         return " * ".join(units)
 
     def get_base_equivalent(self):
@@ -415,7 +416,7 @@
         units_string = self._get_system_unit_string(yt_base_units)
         return Unit(units_string, base_value=1.0,
                     dimensions=self.dimensions, registry=self.registry)
-    
+
     def get_cgs_equivalent(self):
         """
         Create and return dimensionally-equivalent cgs units.
@@ -436,6 +437,20 @@
         return Unit(units_string, base_value=base_value,
                     dimensions=self.dimensions, registry=self.registry)
 
+    def get_custom_equivalent(self, base_units):
+        """
+        Create and return dimensionally-equivalent units in a specified base.
+        """
+        bu = yt_base_units.copy() # This ensures we have a full set regardless
+        for key, value in base_units.items():
+            dim = getattr(dimensions, key)
+            bu[dim] = value
+        units_string = self._get_system_unit_string(bu)
+        base_value = get_conversion_factor(self, self.get_base_equivalent())[0]
+        base_value /= get_conversion_factor(self, Unit(units_string))[0]
+        return Unit(units_string, base_value=base_value,
+                    dimensions=self.dimensions, registry=self.registry)
+
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)
 

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 662e0ee3dbddac5e124173e7f8ff7d2181727c4c yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -472,6 +472,26 @@
         """
         return self.convert_to_units(self.units.get_mks_equivalent())
 
+    def convert_to_custom(self, base_units):
+        """
+        Convert the array and units to the equivalent custom units
+        base.
+
+        Parameters
+        ----------
+        base_units : dict
+            A dictionary which maps the name of the dimension to the
+            base unit requested. Any dimensions omitted from this dict
+            will revert to the default base units in the conversion.
+
+        Examples
+        --------
+        >>> E = YTQuantity(2.5, "erg/s")
+        >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
+        >>> E.convert_to_custom(base_units)
+        """
+        return self.convert_to_units(self.units.get_custom_equivalent(base_units))
+
     def in_units(self, units):
         """
         Creates a copy of this array with the data in the supplied units, and
@@ -542,6 +562,29 @@
         """
         return self.in_units(self.units.get_mks_equivalent())
 
+    def in_custom(self, base_units):
+        """
+        Creates a copy of this array with the data in a set of custom base units.
+
+        Parameters
+        ----------
+        base_units : dict
+            A dictionary which maps the name of the dimension to the
+            base unit requested. Any dimensions omitted from this dict
+            will revert to the default base units in the conversion.
+
+        Returns
+        -------
+        Quantity object with data converted to the specified units base.
+
+        Examples
+        --------
+        >>> from yt.units import G
+        >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
+        >>> my_G = G.in_custom(base_units)
+        """
+        return self.in_units(self.units.get_custom_equivalent(base_units))
+
     def to_equivalent(self, unit, equiv, **kwargs):
         """
         Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
@@ -695,7 +738,7 @@
             The Pint UnitRegistry to use in the conversion. If one is not
             supplied, the default one will be used. NOTE: This is not
             the same as a yt UnitRegistry object.
-            
+
         Examples
         --------
         >>> a = YTQuantity(4.0, "cm**2/s")
@@ -708,7 +751,7 @@
         units = []
         for unit, pow in powers_dict.items():
             # we have to do this because Pint doesn't recognize
-            # "yr" as "year" 
+            # "yr" as "year"
             if str(unit).endswith("yr") and len(str(unit)) in [2,3]:
                 unit = str(unit).replace("yr","year")
             units.append("%s**(%s)" % (unit, Rational(pow)))


https://bitbucket.org/yt_analysis/yt/commits/82f20ac463a6/
Changeset:   82f20ac463a6
Branch:      yt
User:        jzuhone
Date:        2015-11-30 21:55:36+00:00
Summary:     Added a test
Affected #:  1 file

diff -r 662e0ee3dbddac5e124173e7f8ff7d2181727c4c -r 82f20ac463a692a81863304d6aea17197475ff0a yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -467,6 +467,12 @@
     yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'
     yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
 
+    base_units = {'length':'kpc','time':'Myr','mass':'1.0e14*Msun'}
+    em3_converted = YTQuantity(1.545436840386756e-05, '100000000000000.0*Msun/(Myr**2*kpc)')
+    yield assert_equal, em3.in_custom(base_units), em3
+    yield assert_array_almost_equal, em3.in_custom(base_units), em3_converted
+    yield assert_equal, str(em3.in_custom(base_units).units), '100000000000000.0*Msun/(Myr**2*kpc)'
+
     dimless = YTQuantity(1.0, "")
     yield assert_equal, dimless.in_cgs(), dimless
     yield assert_equal, dimless.in_cgs(), 1.0


https://bitbucket.org/yt_analysis/yt/commits/55db2b7e0d3f/
Changeset:   55db2b7e0d3f
Branch:      yt
User:        jzuhone
Date:        2015-11-30 22:47:59+00:00
Summary:     Working on custom dataset units now
Affected #:  4 files

diff -r 82f20ac463a692a81863304d6aea17197475ff0a -r 55db2b7e0d3fa129d9c55ada4701df6b2e66aaa7 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -40,8 +40,10 @@
     ParameterFileStore, \
     NoParameterShelf, \
     output_type_registry
+from yt.units import dimensions
 from yt.units.unit_object import Unit
 from yt.units.unit_registry import UnitRegistry
+from yt.units.unit_lookup_table import yt_base_units
 from yt.fields.derived_field import \
     ValidateSpatial
 from yt.fields.fluid_fields import \
@@ -234,6 +236,7 @@
         self._parse_parameter_file()
         self.set_units()
         self._setup_coordinate_handler()
+        self.custom_base_units = yt_base_units.copy()
 
         # Because we need an instantiated class to check the ds's existence in
         # the cache, we move that check to here from __new__.  This avoids
@@ -804,6 +807,14 @@
             self.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
                                    DW.units.dimensions)
 
+    def set_custom_base_units(self, base_units):
+        for key, value in base_units.items():
+            if isinstance(value, string_types):
+                val = value
+            elif isinstance(value, tuple):
+                val = "*".join(value)
+            self.custom_base_units[key] = self.quan(1.0, val).units
+
     def _override_code_units(self):
         if len(self.units_override) == 0:
             return

diff -r 82f20ac463a692a81863304d6aea17197475ff0a -r 55db2b7e0d3fa129d9c55ada4701df6b2e66aaa7 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -198,22 +198,22 @@
 )
 
 yt_base_units = {
-    dimensions.mass: 'g',
-    dimensions.length: 'cm',
-    dimensions.time: 's',
-    dimensions.temperature: 'K',
-    dimensions.angle: 'radian',
-    dimensions.current_mks: 'A',
+    'mass': 'g',
+    'length': 'cm',
+    'time': 's',
+    'temperature': 'K',
+    'angle': 'radian',
+    'current_mks': 'A',
 }
 
 cgs_base_units = yt_base_units.copy()
-cgs_base_units.pop(dimensions.current_mks)
+cgs_base_units.pop('current_mks')
 
 mks_base_units = {
-    dimensions.mass: 'kg',
-    dimensions.length: 'm',
-    dimensions.time: 's',
-    dimensions.temperature: 'K',
-    dimensions.angle: 'radian',
-    dimensions.current_mks: 'A',
+    'mass': 'kg',
+    'length': 'm',
+    'time': 's',
+    'temperature': 'K',
+    'angle': 'radian',
+    'current_mks': 'A',
 }

diff -r 82f20ac463a692a81863304d6aea17197475ff0a -r 55db2b7e0d3fa129d9c55ada4701df6b2e66aaa7 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -400,8 +400,8 @@
         if my_dims is dimensionless:
             return ""
         for factor in my_dims.as_ordered_factors():
-            dim = list(factor.free_symbols)[0]
-            unit_string = base_units[dim]
+            dim = str(list(factor.free_symbols)[0]).strip("()")
+            unit_string = str(base_units[dim])
             if factor.is_Pow:
                 power_string = "**(%s)" % factor.as_base_exp()[1]
             else:
@@ -441,11 +441,7 @@
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
-        bu = yt_base_units.copy() # This ensures we have a full set regardless
-        for key, value in base_units.items():
-            dim = getattr(dimensions, key)
-            bu[dim] = value
-        units_string = self._get_system_unit_string(bu)
+        units_string = self._get_system_unit_string(base_units)
         base_value = get_conversion_factor(self, self.get_base_equivalent())[0]
         base_value /= get_conversion_factor(self, Unit(units_string))[0]
         return Unit(units_string, base_value=base_value,

diff -r 82f20ac463a692a81863304d6aea17197475ff0a -r 55db2b7e0d3fa129d9c55ada4701df6b2e66aaa7 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -492,6 +492,18 @@
         """
         return self.convert_to_units(self.units.get_custom_equivalent(base_units))
 
+    def convert_to_dataset_units(self, ds):
+        """
+        Convert the array and units to the custom base units of the dataset *ds*.
+
+        Examples
+        --------
+        >>> E = YTQuantity(2.5, "erg/s")
+        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0100")
+        >>> E.convert_to_dataset_units(ds)
+        """
+        return self.convert_to_units(self.units.get_custom_equivalent(ds.custom_base_units))
+
     def in_units(self, units):
         """
         Creates a copy of this array with the data in the supplied units, and
@@ -585,6 +597,25 @@
         """
         return self.in_units(self.units.get_custom_equivalent(base_units))
 
+    def in_dataset_units(self, ds):
+        """
+        Creates a copy of this array with the data in the custom base units
+        of the Dataset *ds*.
+
+        Returns
+        -------
+        Quantity object with data converted to the custom units base
+        of a Dataset *ds*.
+
+        Examples
+        --------
+        >>> from yt.units import G
+        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0100")
+        >>> ds.set_custom_base_units({"length":"km", "time":"yr"})
+        >>> my_G = G.in_dataset_units(ds)
+        """
+        return self.in_units(self.units.get_custom_equivalent(ds.custom_base_units))
+
     def to_equivalent(self, unit, equiv, **kwargs):
         """
         Convert a YTArray or YTQuantity to an equivalent, e.g., something that is


https://bitbucket.org/yt_analysis/yt/commits/a449fd9774cc/
Changeset:   a449fd9774cc
Branch:      yt
User:        jzuhone
Date:        2015-11-30 23:01:59+00:00
Summary:     Fixing bugs here
Affected #:  1 file

diff -r 55db2b7e0d3fa129d9c55ada4701df6b2e66aaa7 -r a449fd9774cc06139ccb7dc70bf3dc9bcf3d0204 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -812,8 +812,8 @@
             if isinstance(value, string_types):
                 val = value
             elif isinstance(value, tuple):
-                val = "*".join(value)
-            self.custom_base_units[key] = self.quan(1.0, val).units
+                val = "*".join([str(value[0]), value[1]])
+            self.custom_base_units[key] = val
 
     def _override_code_units(self):
         if len(self.units_override) == 0:


https://bitbucket.org/yt_analysis/yt/commits/06bd847efc0f/
Changeset:   06bd847efc0f
Branch:      yt
User:        jzuhone
Date:        2015-12-01 04:41:10+00:00
Summary:     This should be type
Affected #:  1 file

diff -r a449fd9774cc06139ccb7dc70bf3dc9bcf3d0204 -r 06bd847efc0f7e0a3fabe0d278c187a23e42478d yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -77,7 +77,7 @@
         return 0.5*data[ftype, "density"] * ( data[ftype, "velocity_x"]**2.0
                                               + data[ftype, "velocity_y"]**2.0
                                               + data[ftype, "velocity_z"]**2.0 )
-    registry.add_field(("gas", "kinetic_energy"),
+    registry.add_field((ftype, "kinetic_energy"),
                        function = _kin_energy,
                        units = "erg / cm**3")
 


https://bitbucket.org/yt_analysis/yt/commits/391341369a6f/
Changeset:   391341369a6f
Branch:      yt
User:        jzuhone
Date:        2015-12-01 16:40:29+00:00
Summary:     Set custom equivalent stuff here
Affected #:  1 file

diff -r 06bd847efc0f7e0a3fabe0d278c187a23e42478d -r 391341369a6f3f8b404320a14fd2ea55976e674a yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -85,7 +85,7 @@
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \
                 units not in skip_output_units:
                 u = Unit(units, registry = self.ds.unit_registry)
-                output_units = str(u.get_cgs_equivalent())
+                output_units = str(u.get_custom_equivalent(self.ds.custom_base_units))
             else:
                 output_units = units
             if (ptype, f) not in self.field_list:
@@ -283,7 +283,7 @@
             # as well.
             u = Unit(self[original_name].units,
                       registry = self.ds.unit_registry)
-            units = str(u.get_cgs_equivalent())
+            units = str(u.get_custom_equivalent(self.ds.custom_base_units))
         self.field_aliases[alias_name] = original_name
         self.add_field(alias_name,
             function = TranslationFunc(original_name),


https://bitbucket.org/yt_analysis/yt/commits/556784facfeb/
Changeset:   556784facfeb
Branch:      yt
User:        jzuhone
Date:        2015-12-01 17:19:43+00:00
Summary:     Simplifying this code--we don't need this many options.
Affected #:  2 files

diff -r 391341369a6f3f8b404320a14fd2ea55976e674a -r 556784facfeb010a6fb7a221e8152aee74788085 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -409,43 +409,34 @@
             units.append("(%s)%s" % (unit_string, power_string))
         return " * ".join(units)
 
-    def get_base_equivalent(self):
-        """
-        Create and return dimensionally-equivalent base units.
-        """
+    def _get_yt_base_equivalent(self):
         units_string = self._get_system_unit_string(yt_base_units)
         return Unit(units_string, base_value=1.0,
                     dimensions=self.dimensions, registry=self.registry)
 
+    def get_base_equivalent(self, base_units):
+        """
+        Create and return dimensionally-equivalent units in a specified base.
+        """
+        units_string = self._get_system_unit_string(base_units)
+        base_value = get_conversion_factor(self, self._get_yt_base_equivalent())[0]
+        base_value /= get_conversion_factor(self, Unit(units_string))[0]
+        return Unit(units_string, base_value=base_value,
+                    dimensions=self.dimensions, registry=self.registry)
+
     def get_cgs_equivalent(self):
         """
         Create and return dimensionally-equivalent cgs units.
         """
         if current_mks in self.dimensions.free_symbols:
             raise YTUnitsNotReducible(self, "cgs")
-        units_string = self._get_system_unit_string(cgs_base_units)
-        return Unit(units_string, base_value=1.0,
-                    dimensions=self.dimensions, registry=self.registry)
+        return self.get_base_equivalent(cgs_base_units)
 
     def get_mks_equivalent(self):
         """
         Create and return dimensionally-equivalent mks units.
         """
-        units_string = self._get_system_unit_string(mks_base_units)
-        base_value = get_conversion_factor(self, self.get_base_equivalent())[0]
-        base_value /= get_conversion_factor(self, Unit(units_string))[0]
-        return Unit(units_string, base_value=base_value,
-                    dimensions=self.dimensions, registry=self.registry)
-
-    def get_custom_equivalent(self, base_units):
-        """
-        Create and return dimensionally-equivalent units in a specified base.
-        """
-        units_string = self._get_system_unit_string(base_units)
-        base_value = get_conversion_factor(self, self.get_base_equivalent())[0]
-        base_value /= get_conversion_factor(self, Unit(units_string))[0]
-        return Unit(units_string, base_value=base_value,
-                    dimensions=self.dimensions, registry=self.registry)
+        return self.get_base_equivalent(mks_base_units)
 
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)

diff -r 391341369a6f3f8b404320a14fd2ea55976e674a -r 556784facfeb010a6fb7a221e8152aee74788085 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -32,6 +32,7 @@
 from yt.units.unit_object import Unit, UnitParseError
 from yt.units.unit_registry import UnitRegistry
 from yt.units.dimensions import dimensionless, current_mks, em_dimensions
+from yt.units.unit_lookup_table import yt_base_units
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUnitConversionError, \
     YTUfuncUnitError, YTIterableUnitCoercionError, \
@@ -451,12 +452,28 @@
 
         return self
 
-    def convert_to_base(self):
+    def convert_to_base(self, base_units=None):
         """
         Convert the array and units to the equivalent base units.
 
+        Parameters
+        ----------
+        base_units : dict, optional
+            A dictionary which maps the name of the dimension to the
+            base unit requested. Any dimensions omitted from this dict
+            will revert to the default base units in the conversion.
+            If not specified, the default base units in yt_base_units
+            are used.
+
+        Examples
+        --------
+        >>> E = YTQuantity(2.5, "erg/s")
+        >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
+        >>> E.convert_to_base(base_units)
         """
-        return self.convert_to_units(self.units.get_base_equivalent())
+        if base_units is None:
+            base_units = yt_base_units
+        return self.convert_to_units(self.units.get_base_equivalent(base_units))
 
     def convert_to_cgs(self):
         """
@@ -472,38 +489,6 @@
         """
         return self.convert_to_units(self.units.get_mks_equivalent())
 
-    def convert_to_custom(self, base_units):
-        """
-        Convert the array and units to the equivalent custom units
-        base.
-
-        Parameters
-        ----------
-        base_units : dict
-            A dictionary which maps the name of the dimension to the
-            base unit requested. Any dimensions omitted from this dict
-            will revert to the default base units in the conversion.
-
-        Examples
-        --------
-        >>> E = YTQuantity(2.5, "erg/s")
-        >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
-        >>> E.convert_to_custom(base_units)
-        """
-        return self.convert_to_units(self.units.get_custom_equivalent(base_units))
-
-    def convert_to_dataset_units(self, ds):
-        """
-        Convert the array and units to the custom base units of the dataset *ds*.
-
-        Examples
-        --------
-        >>> E = YTQuantity(2.5, "erg/s")
-        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0100")
-        >>> E.convert_to_dataset_units(ds)
-        """
-        return self.convert_to_units(self.units.get_custom_equivalent(ds.custom_base_units))
-
     def in_units(self, units):
         """
         Creates a copy of this array with the data in the supplied units, and
@@ -538,17 +523,29 @@
         """
         return self.in_units(units)
 
-    def in_base(self):
+    def in_base(self, base_units=None):
         """
-        Creates a copy of this array with the data in the equivalent base units,
+        Creates a copy of this array with the data in the specified base units,
         and returns it.
 
-        Returns
-        -------
-        Quantity object with data converted to cgs units.
+        Parameters
+        ----------
+        base_units : dict, optional
+            A dictionary which maps the name of the dimension to the
+            base unit requested. Any dimensions omitted from this dict
+            will revert to the default base units in the conversion.
+            If not specified, the default base units in yt_base_units
+            are used.
 
+        Examples
+        --------
+        >>> E = YTQuantity(2.5, "erg/s")
+        >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
+        >>> E_new = E.in_base(base_units)
         """
-        return self.in_units(self.units.get_base_equivalent())
+        if base_units is None:
+            base_units=yt_base_units
+        return self.in_units(self.units.get_base_equivalent(base_units))
 
     def in_cgs(self):
         """
@@ -574,48 +571,6 @@
         """
         return self.in_units(self.units.get_mks_equivalent())
 
-    def in_custom(self, base_units):
-        """
-        Creates a copy of this array with the data in a set of custom base units.
-
-        Parameters
-        ----------
-        base_units : dict
-            A dictionary which maps the name of the dimension to the
-            base unit requested. Any dimensions omitted from this dict
-            will revert to the default base units in the conversion.
-
-        Returns
-        -------
-        Quantity object with data converted to the specified units base.
-
-        Examples
-        --------
-        >>> from yt.units import G
-        >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
-        >>> my_G = G.in_custom(base_units)
-        """
-        return self.in_units(self.units.get_custom_equivalent(base_units))
-
-    def in_dataset_units(self, ds):
-        """
-        Creates a copy of this array with the data in the custom base units
-        of the Dataset *ds*.
-
-        Returns
-        -------
-        Quantity object with data converted to the custom units base
-        of a Dataset *ds*.
-
-        Examples
-        --------
-        >>> from yt.units import G
-        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0100")
-        >>> ds.set_custom_base_units({"length":"km", "time":"yr"})
-        >>> my_G = G.in_dataset_units(ds)
-        """
-        return self.in_units(self.units.get_custom_equivalent(ds.custom_base_units))
-
     def to_equivalent(self, unit, equiv, **kwargs):
         """
         Convert a YTArray or YTQuantity to an equivalent, e.g., something that is


https://bitbucket.org/yt_analysis/yt/commits/ffc6f45c68ce/
Changeset:   ffc6f45c68ce
Branch:      yt
User:        jzuhone
Date:        2015-12-01 17:31:49+00:00
Summary:     yet simpler still
Affected #:  2 files

diff -r 556784facfeb010a6fb7a221e8152aee74788085 -r ffc6f45c68ce07ea6240b8f5fefe2a9f2dad6a21 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -409,20 +409,21 @@
             units.append("(%s)%s" % (unit_string, power_string))
         return " * ".join(units)
 
-    def _get_yt_base_equivalent(self):
-        units_string = self._get_system_unit_string(yt_base_units)
-        return Unit(units_string, base_value=1.0,
-                    dimensions=self.dimensions, registry=self.registry)
-
-    def get_base_equivalent(self, base_units):
+    def get_base_equivalent(self, base_units=None):
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
-        units_string = self._get_system_unit_string(base_units)
-        base_value = get_conversion_factor(self, self._get_yt_base_equivalent())[0]
-        base_value /= get_conversion_factor(self, Unit(units_string))[0]
-        return Unit(units_string, base_value=base_value,
-                    dimensions=self.dimensions, registry=self.registry)
+        yt_base_unit_string = self._get_system_unit_string(yt_base_units)
+        yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
+                            dimensions=self.dimensions, registry=self.registry)
+        if base_units is None:
+            return yt_base_unit
+        else:
+            units_string = self._get_system_unit_string(base_units)
+            base_value = get_conversion_factor(self, yt_base_unit)[0]
+            base_value /= get_conversion_factor(self, Unit(units_string))[0]
+            return Unit(units_string, base_value=base_value,
+                        dimensions=self.dimensions, registry=self.registry)
 
     def get_cgs_equivalent(self):
         """
@@ -430,13 +431,16 @@
         """
         if current_mks in self.dimensions.free_symbols:
             raise YTUnitsNotReducible(self, "cgs")
-        return self.get_base_equivalent(cgs_base_units)
+        # Note that the following only works because yt_base_units are simply
+        # cgs_base_units plus Amperes. If that is no longer the case, we will
+        # then need to pass cgs_base_units to the get_base_equivalent call here.
+        return self.get_base_equivalent()
 
     def get_mks_equivalent(self):
         """
         Create and return dimensionally-equivalent mks units.
         """
-        return self.get_base_equivalent(mks_base_units)
+        return self.get_base_equivalent(base_units=mks_base_units)
 
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)

diff -r 556784facfeb010a6fb7a221e8152aee74788085 -r ffc6f45c68ce07ea6240b8f5fefe2a9f2dad6a21 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -471,9 +471,7 @@
         >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
         >>> E.convert_to_base(base_units)
         """
-        if base_units is None:
-            base_units = yt_base_units
-        return self.convert_to_units(self.units.get_base_equivalent(base_units))
+        return self.convert_to_units(self.units.get_base_equivalent(base_units=base_units))
 
     def convert_to_cgs(self):
         """
@@ -543,9 +541,7 @@
         >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
         >>> E_new = E.in_base(base_units)
         """
-        if base_units is None:
-            base_units=yt_base_units
-        return self.in_units(self.units.get_base_equivalent(base_units))
+        return self.in_units(self.units.get_base_equivalent(base_units=base_units))
 
     def in_cgs(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/1442226926ed/
Changeset:   1442226926ed
Branch:      yt
User:        jzuhone
Date:        2015-12-01 17:35:19+00:00
Summary:     Taking account of the simplifications that were made
Affected #:  2 files

diff -r ffc6f45c68ce07ea6240b8f5fefe2a9f2dad6a21 -r 1442226926eda742aa0bd2d0d46a8939796c3506 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -85,7 +85,7 @@
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \
                 units not in skip_output_units:
                 u = Unit(units, registry = self.ds.unit_registry)
-                output_units = str(u.get_custom_equivalent(self.ds.custom_base_units))
+                output_units = str(u.get_base_equivalent(base_units=self.ds.custom_base_units))
             else:
                 output_units = units
             if (ptype, f) not in self.field_list:
@@ -283,7 +283,7 @@
             # as well.
             u = Unit(self[original_name].units,
                       registry = self.ds.unit_registry)
-            units = str(u.get_custom_equivalent(self.ds.custom_base_units))
+            units = str(u.get_base_equivalent(base_units=self.ds.custom_base_units))
         self.field_aliases[alias_name] = original_name
         self.add_field(alias_name,
             function = TranslationFunc(original_name),

diff -r ffc6f45c68ce07ea6240b8f5fefe2a9f2dad6a21 -r 1442226926eda742aa0bd2d0d46a8939796c3506 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -469,9 +469,9 @@
 
     base_units = {'length':'kpc','time':'Myr','mass':'1.0e14*Msun'}
     em3_converted = YTQuantity(1.545436840386756e-05, '100000000000000.0*Msun/(Myr**2*kpc)')
-    yield assert_equal, em3.in_custom(base_units), em3
-    yield assert_array_almost_equal, em3.in_custom(base_units), em3_converted
-    yield assert_equal, str(em3.in_custom(base_units).units), '100000000000000.0*Msun/(Myr**2*kpc)'
+    yield assert_equal, em3.in_base(base_units=base_units), em3
+    yield assert_array_almost_equal, em3.in_base(base_units=base_units), em3_converted
+    yield assert_equal, str(em3.in_base(base_units=base_units).units), '100000000000000.0*Msun/(Myr**2*kpc)'
 
     dimless = YTQuantity(1.0, "")
     yield assert_equal, dimless.in_cgs(), dimless


https://bitbucket.org/yt_analysis/yt/commits/65426de61347/
Changeset:   65426de61347
Branch:      yt
User:        jzuhone
Date:        2015-12-01 17:40:36+00:00
Summary:     Excising convert_to_cgs in favor of convert_to_base
Affected #:  3 files

diff -r 1442226926eda742aa0bd2d0d46a8939796c3506 -r 65426de613475e45441fbcab71efb3fdbc88deec yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -302,7 +302,7 @@
     for field in my_profile:
         # Don't write code units because we might not know those later.
         if isinstance(my_profile[field], YTArray):
-            my_profile[field].convert_to_cgs()
+            my_profile[field].convert_to_base()
         _yt_array_hdf5(profile_group, str(field), my_profile[field])
     variance_storage = "%s_variance" % storage
     if hasattr(halo, variance_storage):
@@ -311,7 +311,7 @@
         for field in my_profile:
             # Don't write code units because we might not know those later.
             if isinstance(my_profile[field], YTArray):
-                my_profile[field].convert_to_cgs()
+                my_profile[field].convert_to_base()
             _yt_array_hdf5(variance_group, str(field), my_profile[field])
     fh.close()
 

diff -r 1442226926eda742aa0bd2d0d46a8939796c3506 -r 65426de613475e45441fbcab71efb3fdbc88deec yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -130,7 +130,7 @@
         if hasattr(data[field], "units"):
             for atom in data[field].units.expr.atoms():
                 if str(atom).startswith("code"):
-                    data[field].convert_to_cgs()
+                    data[field].convert_to_base()
                     break
         if isinstance(field, tuple):
             field_name = field[1]

diff -r 1442226926eda742aa0bd2d0d46a8939796c3506 -r 65426de613475e45441fbcab71efb3fdbc88deec yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -669,8 +669,8 @@
         def _func(*args, **kwargs):
             name = kwargs.pop("result_basename", func.__name__)
             rv = func(*args, **kwargs)
-            if hasattr(rv, "convert_to_cgs"):
-                rv.convert_to_cgs()
+            if hasattr(rv, "convert_to_base"):
+                rv.convert_to_base()
                 _rv = rv.ndarray_view()
             else:
                 _rv = rv
@@ -693,8 +693,8 @@
         def _func(*args, **kwargs):
             name = kwargs.pop("result_basename", func.__name__)
             rv = func(*args, **kwargs)
-            if hasattr(rv, "convert_to_cgs"):
-                rv.convert_to_cgs()
+            if hasattr(rv, "convert_to_base"):
+                rv.convert_to_base()
                 _rv = rv.ndarray_view()
             else:
                 _rv = rv


https://bitbucket.org/yt_analysis/yt/commits/3773528e31ae/
Changeset:   3773528e31ae
Branch:      yt
User:        jzuhone
Date:        2015-12-01 18:14:19+00:00
Summary:     Now excising in_cgs
Affected #:  3 files

diff -r 65426de613475e45441fbcab71efb3fdbc88deec -r 3773528e31aec19b28ffb877bc64373f5d858296 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -186,11 +186,11 @@
             self.center = None
             return
         elif isinstance(center, YTArray):
-            self.center = self.ds.arr(center.in_cgs())
+            self.center = self.ds.arr(center.copy())
             self.center.convert_to_units('code_length')
         elif isinstance(center, (list, tuple, np.ndarray)):
             if isinstance(center[0], YTQuantity):
-                self.center = self.ds.arr([c.in_cgs() for c in center])
+                self.center = self.ds.arr([c.copy() for c in center])
                 self.center.convert_to_units('code_length')
             else:
                 self.center = self.ds.arr(center, 'code_length')
@@ -861,7 +861,7 @@
         s = "%s (%s): " % (self.__class__.__name__, self.ds)
         for i in self._con_args:
             try:
-                s += ", %s=%s" % (i, getattr(self, i).in_cgs())
+                s += ", %s=%s" % (i, getattr(self, i).in_base(self.ds.custom_base_units))
             except AttributeError:
                 s += ", %s=%s" % (i, getattr(self, i))
         return s

diff -r 65426de613475e45441fbcab71efb3fdbc88deec -r 3773528e31aec19b28ffb877bc64373f5d858296 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -23,6 +23,8 @@
 from yt.units.yt_array import \
      YTArray, \
      YTQuantity
+from yt.units.unit_lookup_table import \
+    yt_base_units
 
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs as G, \
@@ -49,6 +51,9 @@
     omega_curvature : the fraction of the energy density of the Universe in 
         curvature.
         Default: 0.0.
+    base_units : dict, optional 
+        The units base to use when making calculations. If not specified,
+        the default yt_units_base is assumed. 
 
     Examples
     --------
@@ -62,7 +67,8 @@
                  omega_matter = 0.27,
                  omega_lambda = 0.73,
                  omega_curvature = 0.0,
-                 unit_registry = None):
+                 unit_registry = None,
+                 base_units = None):
         self.omega_matter = omega_matter
         self.omega_lambda = omega_lambda
         self.omega_curvature = omega_curvature
@@ -76,13 +82,16 @@
                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
+        self.base_units = yt_base_units.copy()
+        if base_units is not None:
+            self.base_units.update(base_units)
 
     def hubble_distance(self):
         r"""
         The distance corresponding to c / h, where c is the speed of light 
         and h is the Hubble parameter in units of 1 / time.
         """
-        return self.quan((speed_of_light_cgs / self.hubble_constant)).in_cgs()
+        return self.quan((speed_of_light_cgs / self.hubble_constant)).in_base(base_units=self.base_units)
 
     def comoving_radial_distance(self, z_i, z_f):
         r"""
@@ -104,7 +113,7 @@
         
         """
         return (self.hubble_distance() *
-                trapzint(self.inverse_expansion_factor, z_i, z_f)).in_cgs()
+                trapzint(self.inverse_expansion_factor, z_i, z_f)).in_base(base_units=self.base_units)
 
     def comoving_transverse_distance(self, z_i, z_f):
         r"""
@@ -130,13 +139,13 @@
             return (self.hubble_distance() / np.sqrt(self.omega_curvature) * 
                     np.sinh(np.sqrt(self.omega_curvature) * 
                             self.comoving_radial_distance(z_i, z_f) /
-                            self.hubble_distance())).in_cgs()
+                            self.hubble_distance())).in_base(base_units=self.base_units)
         elif (self.omega_curvature < 0):
             return (self.hubble_distance() /
                     np.sqrt(np.fabs(self.omega_curvature)) * 
                     np.sin(np.sqrt(np.fabs(self.omega_curvature)) * 
                            self.comoving_radial_distance(z_i, z_f) /
-                           self.hubble_distance())).in_cgs()
+                           self.hubble_distance())).in_base(base_units=self.base_units)
         else:
             return self.comoving_radial_distance(z_i, z_f)
 
@@ -171,7 +180,7 @@
                       np.sinh(np.fabs(self.omega_curvature) * 
                             self.comoving_transverse_distance(z_i, z_f) /
                             self.hubble_distance()) /
-                            np.sqrt(self.omega_curvature))).in_cgs()
+                            np.sqrt(self.omega_curvature))).in_base(base_units=self.base_units)
         elif (self.omega_curvature < 0):
              return (2 * np.pi * np.power(self.hubble_distance(), 3) /
                      np.fabs(self.omega_curvature) * 
@@ -183,11 +192,11 @@
                       np.arcsin(np.fabs(self.omega_curvature) * 
                            self.comoving_transverse_distance(z_i, z_f) /
                            self.hubble_distance()) /
-                      np.sqrt(np.fabs(self.omega_curvature)))).in_cgs()
+                      np.sqrt(np.fabs(self.omega_curvature)))).in_base(base_units=self.base_units)
         else:
              return (4 * np.pi *
                      np.power(self.comoving_transverse_distance(z_i, z_f), 3) /\
-                     3).in_cgs()
+                     3).in_base(base_units=self.base_units)
 
     def angular_diameter_distance(self, z_i, z_f):
         r"""
@@ -210,7 +219,7 @@
         """
         
         return (self.comoving_transverse_distance(0, z_f) / (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_cgs()
+                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_base(base_units=self.base_units)
 
     def angular_scale(self, z_i, z_f):
         r"""
@@ -232,9 +241,10 @@
         
         """
 
-        return self.angular_diameter_distance(z_i, z_f) / \
+        scale = self.angular_diameter_distance(z_i, z_f) / \
           self.quan(1, "radian")
-
+        return scale.in_base(base_units=self.base_units)
+    
     def luminosity_distance(self, z_i, z_f):
         r"""
         The distance that would be inferred from the inverse-square law of 
@@ -256,7 +266,7 @@
         """
 
         return (self.comoving_transverse_distance(0, z_f) * (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) * (1 + z_i)).in_cgs()
+                self.comoving_transverse_distance(0, z_i) * (1 + z_i)).in_base(base_units=self.base_units)
 
     def lookback_time(self, z_i, z_f):
         r"""
@@ -278,7 +288,7 @@
 
         """
         return (trapzint(self.age_integrand, z_i, z_f) / \
-                self.hubble_constant).in_cgs()
+                self.hubble_constant).in_base(base_units=self.base_units)
     
     def hubble_time(self, z, z_inf=1e6):
         r"""
@@ -305,7 +315,7 @@
 
         """
         return (trapzint(self.age_integrand, z, z_inf) /
-                self.hubble_constant).in_cgs()
+                self.hubble_constant).in_base(base_units=self.base_units)
 
     def critical_density(self, z):
         r"""
@@ -328,7 +338,7 @@
         return (3.0 / 8.0 / np.pi * 
                 self.hubble_constant**2 / G *
                 ((1 + z)**3.0 * self.omega_matter + 
-                 self.omega_lambda)).in_cgs()
+                 self.omega_lambda)).in_base(base_units=self.base_units)
 
     def hubble_parameter(self, z):
         r"""
@@ -346,7 +356,7 @@
         >>> print co.hubble_parameter(1.0).in_units("km/s/Mpc")
 
         """
-        return self.hubble_constant * self.expansion_factor(z)
+        return self.hubble_constant.in_base(base_units=self.base_units) * self.expansion_factor(z)
 
     def age_integrand(self, z):
         return (1 / (z + 1) / self.expansion_factor(z))
@@ -522,7 +532,7 @@
   
         my_time = t0 / self.hubble_constant
     
-        return my_time.in_cgs()
+        return my_time.in_base(base_units=self.base_units)
 
     _arr = None
     @property

diff -r 65426de613475e45441fbcab71efb3fdbc88deec -r 3773528e31aec19b28ffb877bc64373f5d858296 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -710,7 +710,7 @@
             # setting plot aspect.
             if self.aspect is None:
                 self.aspect = float((self.ds.quan(1.0, unit_y) /
-                                     self.ds.quan(1.0, unit_x)).in_cgs())
+                                     self.ds.quan(1.0, unit_x)).in_base())
 
             extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
             extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
@@ -1672,7 +1672,7 @@
         self._cb_size = 0.0375*fsize
         self._ax_text_size = [1.2*fontscale, 0.9*fontscale]
         self._top_buff_size = 0.30*fontscale
-        self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2])).in_cgs()
+        self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2])).in_base()
         self._unit_aspect = aspect
 
         size, axrect, caxrect = self._get_best_layout()


https://bitbucket.org/yt_analysis/yt/commits/d539c5cde563/
Changeset:   d539c5cde563
Branch:      yt
User:        jzuhone
Date:        2015-12-01 18:16:04+00:00
Summary:     Just make this "base_units" instead
Affected #:  3 files

diff -r 3773528e31aec19b28ffb877bc64373f5d858296 -r d539c5cde5633bc7727642ad2ca62bcd3519ca99 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -861,7 +861,7 @@
         s = "%s (%s): " % (self.__class__.__name__, self.ds)
         for i in self._con_args:
             try:
-                s += ", %s=%s" % (i, getattr(self, i).in_base(self.ds.custom_base_units))
+                s += ", %s=%s" % (i, getattr(self, i).in_base(self.ds.base_units))
             except AttributeError:
                 s += ", %s=%s" % (i, getattr(self, i))
         return s

diff -r 3773528e31aec19b28ffb877bc64373f5d858296 -r d539c5cde5633bc7727642ad2ca62bcd3519ca99 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -236,7 +236,7 @@
         self._parse_parameter_file()
         self.set_units()
         self._setup_coordinate_handler()
-        self.custom_base_units = yt_base_units.copy()
+        self.base_units = yt_base_units.copy()
 
         # Because we need an instantiated class to check the ds's existence in
         # the cache, we move that check to here from __new__.  This avoids
@@ -807,13 +807,13 @@
             self.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
                                    DW.units.dimensions)
 
-    def set_custom_base_units(self, base_units):
+    def set_base_units(self, base_units):
         for key, value in base_units.items():
             if isinstance(value, string_types):
                 val = value
             elif isinstance(value, tuple):
                 val = "*".join([str(value[0]), value[1]])
-            self.custom_base_units[key] = val
+            self.base_units[key] = val
 
     def _override_code_units(self):
         if len(self.units_override) == 0:

diff -r 3773528e31aec19b28ffb877bc64373f5d858296 -r d539c5cde5633bc7727642ad2ca62bcd3519ca99 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -85,7 +85,7 @@
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \
                 units not in skip_output_units:
                 u = Unit(units, registry = self.ds.unit_registry)
-                output_units = str(u.get_base_equivalent(base_units=self.ds.custom_base_units))
+                output_units = str(u.get_base_equivalent(base_units=self.ds.base_units))
             else:
                 output_units = units
             if (ptype, f) not in self.field_list:
@@ -283,7 +283,7 @@
             # as well.
             u = Unit(self[original_name].units,
                       registry = self.ds.unit_registry)
-            units = str(u.get_base_equivalent(base_units=self.ds.custom_base_units))
+            units = str(u.get_base_equivalent(base_units=self.ds.base_units))
         self.field_aliases[alias_name] = original_name
         self.add_field(alias_name,
             function = TranslationFunc(original_name),


https://bitbucket.org/yt_analysis/yt/commits/fc81c48f2dd6/
Changeset:   fc81c48f2dd6
Branch:      yt
User:        jzuhone
Date:        2015-12-01 19:08:42+00:00
Summary:     More excising of in_cgs
Affected #:  3 files

diff -r d539c5cde5633bc7727642ad2ca62bcd3519ca99 -r fc81c48f2dd6b2eec129d6fa2b49a13622dc2745 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -553,7 +553,7 @@
                 values = self.arr(*values)
             else:
                 values = self.arr(values)
-        values = values.in_cgs()
+        values = values.in_base()
 
         if outputs is None:
             outputs = self.all_outputs

diff -r d539c5cde5633bc7727642ad2ca62bcd3519ca99 -r fc81c48f2dd6b2eec129d6fa2b49a13622dc2745 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -285,7 +285,7 @@
                     x = _get_position_array(ptype, f, "px")
                     y = _get_position_array(ptype, f, "py")
                     z = np.zeros(x.size, dtype="float64") + \
-                      self.ds.domain_left_edge[2].in_cgs().d
+                      self.ds.domain_left_edge[2].in_base().d
                     yield ptype, (x, y, z)
 
     def _read_particle_fields(self, chunks, ptf, selector):
@@ -302,7 +302,7 @@
                     x = _get_position_array(ptype, f, "px")
                     y = _get_position_array(ptype, f, "py")
                     z = np.zeros(all_count[ptype], dtype="float64") + \
-                      self.ds.domain_left_edge[2].in_cgs().d
+                      self.ds.domain_left_edge[2].in_base().d
                     mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
@@ -330,7 +330,7 @@
                 pos[:,0] = _get_position_array(ptype, f, "px")
                 pos[:,1] = _get_position_array(ptype, f, "py")
                 pos[:,2] = np.zeros(all_count[ptype], dtype="float64") + \
-                  self.ds.domain_left_edge[2].in_cgs().d
+                  self.ds.domain_left_edge[2].in_base().d
                 # These are 32 bit numbers, so we give a little lee-way.
                 # Otherwise, for big sets of particles, we often will bump into the
                 # domain edges.  This helps alleviate that.

diff -r d539c5cde5633bc7727642ad2ca62bcd3519ca99 -r fc81c48f2dd6b2eec129d6fa2b49a13622dc2745 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -222,7 +222,7 @@
 
     if val is None: val = "None"
     if hasattr(val, "units"):
-        val = val.in_cgs()
+        val = val.in_base()
         fh.attrs["%s_units" % attr] = str(val.units)
     # The following is a crappy workaround for getting
     # Unicode strings into HDF5 attributes in Python 3


https://bitbucket.org/yt_analysis/yt/commits/ee133e3c47b6/
Changeset:   ee133e3c47b6
Branch:      yt
User:        jzuhone
Date:        2015-12-01 19:36:13+00:00
Summary:     Excising in_cgs
Affected #:  1 file

diff -r fc81c48f2dd6b2eec129d6fa2b49a13622dc2745 -r ee133e3c47b663860a9325b6f9041ee5764c9ffd yt/fields/fluid_vector_fields.py
--- a/yt/fields/fluid_vector_fields.py
+++ b/yt/fields/fluid_vector_fields.py
@@ -77,10 +77,10 @@
     def _vorticity_x(field, data):
         f  = (data[ftype, "velocity_z"][sl_center,sl_right,sl_center] -
               data[ftype, "velocity_z"][sl_center,sl_left,sl_center]) \
-              / (div_fac*just_one(data["index", "dy"]).in_cgs())
+              / (div_fac*just_one(data["index", "dy"]))
         f -= (data[ftype, "velocity_y"][sl_center,sl_center,sl_right] -
               data[ftype, "velocity_y"][sl_center,sl_center,sl_left]) \
-              / (div_fac*just_one(data["index", "dz"].in_cgs()))
+              / (div_fac*just_one(data["index", "dz"]))
         new_field = data.ds.arr(np.zeros_like(data[ftype, "velocity_z"],
                                               dtype=np.float64),
                                 f.units)


https://bitbucket.org/yt_analysis/yt/commits/d5895503a07e/
Changeset:   d5895503a07e
Branch:      yt
User:        jzuhone
Date:        2015-12-01 20:39:24+00:00
Summary:     Letting derived fields use base units
Affected #:  3 files

diff -r ee133e3c47b663860a9325b6f9041ee5764c9ffd -r d5895503a07e8b0461550b5ee7c0ea0fd8ed9b69 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -32,7 +32,7 @@
     ensure_list, \
     fix_axis, \
     iterable
-from yt.units.unit_object import UnitParseError
+from yt.units.unit_object import UnitParseError, Unit
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
@@ -45,7 +45,8 @@
     YTFieldNotParseable, \
     YTFieldNotFound, \
     YTFieldTypeNotFound, \
-    YTDataSelectorNotImplemented
+    YTDataSelectorNotImplemented, \
+    YTFieldDimensionsError
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -1132,12 +1133,14 @@
                         # infer the units from the units of the data we get back
                         # from the field function and use these units for future
                         # field accesses
-                        units = str(getattr(fd, 'units', ''))
-                        fi.units = units
+                        units = getattr(fd, 'units', '')
+                        if fi.dimensions != Unit(units).dimensions:
+                            raise YTFieldDimensionsError(fi, fd.units)
+                        fi.units = str(units)
                         self.field_data[field] = self.ds.arr(fd, units)
                         msg = ("Field %s was added without specifying units, "
                                "assuming units are %s")
-                        mylog.warn(msg % (fi.name, units))
+                        mylog.debug(msg % (fi.name, units))
                         continue
                     try:
                         fd.convert_to_units(fi.units)

diff -r ee133e3c47b663860a9325b6f9041ee5764c9ffd -r d5895503a07e8b0461550b5ee7c0ea0fd8ed9b69 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -80,7 +80,8 @@
     def __init__(self, name, function, units=None,
                  take_log=True, validators=None,
                  particle_type=False, vector_field=False, display_field=True,
-                 not_in_all=False, display_name=None, output_units = None):
+                 not_in_all=False, display_name=None, output_units=None,
+                 dimensions=None):
         self.name = name
         self.take_log = take_log
         self.display_name = display_name
@@ -101,7 +102,10 @@
             self.units = ''
         elif isinstance(units, string_types):
             if units.lower() == 'auto':
+                if dimensions is None:
+                    raise RuntimeError("If units='auto', must specify dimensions!")
                 self.units = None
+                self._dimensions = dimensions
             else:
                 self.units = units
         elif isinstance(units, Unit):
@@ -114,6 +118,8 @@
             output_units = self.units
         self.output_units = output_units
 
+        self._dimensions = dimensions
+
     def _copy_def(self):
         dd = {}
         dd['name'] = self.name
@@ -233,6 +239,13 @@
         s += ")"
         return s
 
+    @property
+    def dimensions(self):
+        if self._dimensions is None:
+            return self.get_units().dimensions
+        else:
+            return self._dimensions
+
 class FieldValidator(object):
     pass
 

diff -r ee133e3c47b663860a9325b6f9041ee5764c9ffd -r d5895503a07e8b0461550b5ee7c0ea0fd8ed9b69 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -242,6 +242,15 @@
     def __str__(self):
         return self.msg
 
+class YTFieldDimensionsError(YTException):
+    def __init__(self, field_info, returned_units):
+        self.msg = ("The field function associated with the field '%s' returned "
+                    "data with units '%s' but was defined with dimensions of '%s'.")
+        self.msg = self.msg % (field_info.name, returned_units, field_info.dimensions)
+
+    def __str__(self):
+        return self.msg
+
 class YTFieldUnitParseError(YTException):
     def __init__(self, field_info):
         self.msg = ("The field '%s' has unparseable units '%s'.")


https://bitbucket.org/yt_analysis/yt/commits/70dc1b918baa/
Changeset:   70dc1b918baa
Branch:      yt
User:        jzuhone
Date:        2015-12-01 21:17:21+00:00
Summary:     Adding angular momentum, specific angular momentum dimensions
Affected #:  1 file

diff -r d5895503a07e8b0461550b5ee7c0ea0fd8ed9b69 -r 70dc1b918baa9bec2705da250e2958d5841492ac yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -49,6 +49,8 @@
 specific_flux = flux / rate
 number_density = 1/(length*length*length)
 density = mass * number_density
+angular_momentum = mass*length*velocity
+specific_angular_momentum = angular_momentum / mass
 
 # Gaussian electromagnetic units
 charge_cgs  = (energy * length)**Rational(1, 2)  # proper 1/2 power


https://bitbucket.org/yt_analysis/yt/commits/f3e500aaad07/
Changeset:   f3e500aaad07
Branch:      yt
User:        jzuhone
Date:        2015-12-01 21:18:09+00:00
Summary:     More work on allowing derived fields to work in base units
Affected #:  1 file

diff -r 70dc1b918baa9bec2705da250e2958d5841492ac -r f3e500aaad07430f7e2899a3d5512f660b130406 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1133,11 +1133,13 @@
                         # infer the units from the units of the data we get back
                         # from the field function and use these units for future
                         # field accesses
-                        units = getattr(fd, 'units', '')
-                        if fi.dimensions != Unit(units).dimensions:
+                        units = str(getattr(fd, 'units', ''))
+                        dimensions = Unit(units, registry=self.ds.unit_registry).dimensions
+                        if fi.dimensions != dimensions:
                             raise YTFieldDimensionsError(fi, fd.units)
-                        fi.units = str(units)
+                        fi.units = units
                         self.field_data[field] = self.ds.arr(fd, units)
+                        self.field_data[field].convert_to_base(base_units=self.ds.base_units)
                         msg = ("Field %s was added without specifying units, "
                                "assuming units are %s")
                         mylog.debug(msg % (fi.name, units))


https://bitbucket.org/yt_analysis/yt/commits/e4f3ab1164fb/
Changeset:   e4f3ab1164fb
Branch:      yt
User:        jzuhone
Date:        2015-12-02 07:02:49+00:00
Summary:     Cleaning up my messes
Affected #:  7 files

diff -r f3e500aaad07430f7e2899a3d5512f660b130406 -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -32,7 +32,7 @@
     ensure_list, \
     fix_axis, \
     iterable
-from yt.units.unit_object import UnitParseError, Unit
+from yt.units.unit_object import UnitParseError
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
@@ -45,8 +45,7 @@
     YTFieldNotParseable, \
     YTFieldNotFound, \
     YTFieldTypeNotFound, \
-    YTDataSelectorNotImplemented, \
-    YTFieldDimensionsError
+    YTDataSelectorNotImplemented
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -1134,15 +1133,11 @@
                         # from the field function and use these units for future
                         # field accesses
                         units = str(getattr(fd, 'units', ''))
-                        dimensions = Unit(units, registry=self.ds.unit_registry).dimensions
-                        if fi.dimensions != dimensions:
-                            raise YTFieldDimensionsError(fi, fd.units)
                         fi.units = units
                         self.field_data[field] = self.ds.arr(fd, units)
-                        self.field_data[field].convert_to_base(base_units=self.ds.base_units)
                         msg = ("Field %s was added without specifying units, "
                                "assuming units are %s")
-                        mylog.debug(msg % (fi.name, units))
+                        mylog.warn(msg % (fi.name, units))
                         continue
                     try:
                         fd.convert_to_units(fi.units)

diff -r f3e500aaad07430f7e2899a3d5512f660b130406 -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -80,8 +80,7 @@
     def __init__(self, name, function, units=None,
                  take_log=True, validators=None,
                  particle_type=False, vector_field=False, display_field=True,
-                 not_in_all=False, display_name=None, output_units=None,
-                 dimensions=None):
+                 not_in_all=False, display_name=None, output_units=None):
         self.name = name
         self.take_log = take_log
         self.display_name = display_name
@@ -102,10 +101,7 @@
             self.units = ''
         elif isinstance(units, string_types):
             if units.lower() == 'auto':
-                if dimensions is None:
-                    raise RuntimeError("If units='auto', must specify dimensions!")
                 self.units = None
-                self._dimensions = dimensions
             else:
                 self.units = units
         elif isinstance(units, Unit):
@@ -118,8 +114,6 @@
             output_units = self.units
         self.output_units = output_units
 
-        self._dimensions = dimensions
-
     def _copy_def(self):
         dd = {}
         dd['name'] = self.name
@@ -239,13 +233,6 @@
         s += ")"
         return s
 
-    @property
-    def dimensions(self):
-        if self._dimensions is None:
-            return self.get_units().dimensions
-        else:
-            return self._dimensions
-
 class FieldValidator(object):
     pass
 

diff -r f3e500aaad07430f7e2899a3d5512f660b130406 -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -197,23 +197,19 @@
     "statohm",
 )
 
-yt_base_units = {
-    'mass': 'g',
-    'length': 'cm',
-    'time': 's',
-    'temperature': 'K',
-    'angle': 'radian',
-    'current_mks': 'A',
+default_base_units = {
+    dimensions.mass: 'g',
+    dimensions.length: 'cm',
+    dimensions.time: 's',
+    dimensions.temperature: 'K',
+    dimensions.angle: 'radian',
+    dimensions.current_mks: 'A',
 }
 
-cgs_base_units = yt_base_units.copy()
-cgs_base_units.pop('current_mks')
+cgs_base_units = default_base_units.copy()
+cgs_base_units.pop(dimensions.current_mks)
 
-mks_base_units = {
-    'mass': 'kg',
-    'length': 'm',
-    'time': 's',
-    'temperature': 'K',
-    'angle': 'radian',
-    'current_mks': 'A',
-}
+mks_base_units = default_base_units.copy()
+mks_base_units.update({dimensions.mass: 'kg',
+                       dimensions.length: 'm'})
+

diff -r f3e500aaad07430f7e2899a3d5512f660b130406 -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -27,8 +27,8 @@
     base_dimensions, temperature, \
     dimensionless, current_mks
 from yt.units.unit_lookup_table import \
-    unit_prefixes, prefixable_units, cgs_base_units, \
-    mks_base_units, latex_prefixes, yt_base_units
+    unit_prefixes, prefixable_units, mks_base_units, \
+    latex_prefixes, default_base_units
 from yt.units.unit_registry import \
     UnitRegistry, \
     UnitParseError
@@ -400,8 +400,8 @@
         if my_dims is dimensionless:
             return ""
         for factor in my_dims.as_ordered_factors():
-            dim = str(list(factor.free_symbols)[0]).strip("()")
-            unit_string = str(base_units[dim])
+            dim = list(factor.free_symbols)[0]
+            unit_string = base_units[dim]
             if factor.is_Pow:
                 power_string = "**(%s)" % factor.as_base_exp()[1]
             else:
@@ -413,7 +413,7 @@
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
-        yt_base_unit_string = self._get_system_unit_string(yt_base_units)
+        yt_base_unit_string = self._get_system_unit_string(default_base_units)
         yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
                             dimensions=self.dimensions, registry=self.registry)
         if base_units is None:

diff -r f3e500aaad07430f7e2899a3d5512f660b130406 -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 yt/units/unit_systems.py
--- /dev/null
+++ b/yt/units/unit_systems.py
@@ -0,0 +1,55 @@
+"""
+Unit system class.
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.extern.six import string_types
+from yt.units.unit_lookup_table import cgs_base_units, mks_base_units
+from yt.units import dimensions
+from yt.units.unit_object import Unit
+
+class UnitSystem(object):
+    def __init__(self, base_units):
+        self.base_units = {}
+        for key, value in base_units.items():
+            self.base_units[key] = Unit(value)
+
+    def __getitem__(self, key):
+        if isinstance(key, string_types):
+            key = getattr(dimensions, key)
+        if key not in self.base_units:
+            dims = key.expand()
+            units = Unit("")
+            for factor in dims.as_ordered_factors():
+                dim = list(factor.free_symbols)[0]
+                u = self.base_units[dim]
+                if factor.is_Pow:
+                    u = u ** factor.as_base_exp()[1]
+                units *= u
+            self.base_units[key] = units
+        return self.base_units[key]
+
+    def __setitem__(self, key, value):
+        if isinstance(key, string_types):
+            key = getattr(dimensions, key)
+        self.base_units[key] = Unit(value)
+
+cgs_unit_system = UnitSystem(cgs_base_units)
+cgs_unit_system[dimensions.energy] = "erg"
+cgs_unit_system[dimensions.pressure] = "dyne/cm**2"
+cgs_unit_system[dimensions.force] = "dyne"
+cgs_unit_system[dimensions.magnetic_field_cgs] = "gauss"
+
+mks_unit_system = UnitSystem(mks_base_units)
+mks_unit_system[dimensions.energy] = "J"
+mks_unit_system[dimensions.pressure] = "Pa"
+mks_unit_system[dimensions.force] = "N"
+mks_unit_system[dimensions.magnetic_field_mks] = "T"
\ No newline at end of file

diff -r f3e500aaad07430f7e2899a3d5512f660b130406 -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -32,7 +32,6 @@
 from yt.units.unit_object import Unit, UnitParseError
 from yt.units.unit_registry import UnitRegistry
 from yt.units.dimensions import dimensionless, current_mks, em_dimensions
-from yt.units.unit_lookup_table import yt_base_units
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUnitConversionError, \
     YTUfuncUnitError, YTIterableUnitCoercionError, \

diff -r f3e500aaad07430f7e2899a3d5512f660b130406 -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -242,15 +242,6 @@
     def __str__(self):
         return self.msg
 
-class YTFieldDimensionsError(YTException):
-    def __init__(self, field_info, returned_units):
-        self.msg = ("The field function associated with the field '%s' returned "
-                    "data with units '%s' but was defined with dimensions of '%s'.")
-        self.msg = self.msg % (field_info.name, returned_units, field_info.dimensions)
-
-    def __str__(self):
-        return self.msg
-
 class YTFieldUnitParseError(YTException):
     def __init__(self, field_info):
         self.msg = ("The field '%s' has unparseable units '%s'.")


https://bitbucket.org/yt_analysis/yt/commits/b707d5b74489/
Changeset:   b707d5b74489
Branch:      yt
User:        jzuhone
Date:        2015-12-02 17:41:36+00:00
Summary:     More unit systems work--added more units and constants to expand our capabilities.
Affected #:  4 files

diff -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 -r b707d5b744892e7cc50bae520fa713645d679127 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -21,7 +21,8 @@
     cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \
     kelvin_per_rankine, speed_of_light_cm_per_s, planck_length_cm, \
     planck_charge_esu, planck_energy_erg, planck_mass_grams, \
-    planck_temperature_K, planck_time_s, mass_hydrogen_grams
+    planck_temperature_K, planck_time_s, mass_hydrogen_grams, \
+    grams_per_pound, standard_gravity_cm_per_s2, pascal_per_atm
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -57,12 +58,15 @@
     "V": (1.0e7, dimensions.electric_potential_mks, 0.0, r"\rm{V}"),
     "ohm": (1.0e7, dimensions.resistance_mks, 0.0, r"\Omega"),
 
-    # Imperial units
+    # Imperial and other non-metric units
     "ft": (30.48, dimensions.length, 0.0, r"\rm{ft}"),
     "mile": (160934, dimensions.length, 0.0, r"\rm{mile}"),
     "degF": (kelvin_per_rankine, dimensions.temperature, -459.67,
              "^\circ\rm{F}"),
     "R": (kelvin_per_rankine, dimensions.temperature, 0.0, r"^\circ\rm{R}"),
+    "lbf": (grams_per_pound*standard_gravity_cm_per_s2, dimensions.force, 0.0, r"\rm{lbf}"),
+    "lbm": (grams_per_pound, dimensions.mass, 0.0, r"\rm{lbm}"),
+    "atm": (pascal_per_atm*10., dimensions.pressure, 0.0, r"\rm{atm}"),
 
     # dimensionless stuff
     "h": (1.0, dimensions.dimensionless, 0.0, r"h"),  # needs to be added for rho_crit_now

diff -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 -r b707d5b744892e7cc50bae520fa713645d679127 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -12,44 +12,63 @@
 #-----------------------------------------------------------------------------
 
 from yt.extern.six import string_types
-from yt.units.unit_lookup_table import cgs_base_units, mks_base_units
 from yt.units import dimensions
 from yt.units.unit_object import Unit
 
 class UnitSystem(object):
-    def __init__(self, base_units):
-        self.base_units = {}
-        for key, value in base_units.items():
-            self.base_units[key] = Unit(value)
+    def __init__(self, length_unit, mass_unit, time_unit,
+                 temperature_unit, angle_unit):
+        self.units_map = {dimensions.length: length_unit,
+                          dimensions.mass: mass_unit,
+                          dimensions.time: time_unit,
+                          dimensions.temperature: temperature_unit,
+                          dimensions.angle: angle_unit}
 
     def __getitem__(self, key):
         if isinstance(key, string_types):
             key = getattr(dimensions, key)
-        if key not in self.base_units:
+        if key not in self.units_map:
             dims = key.expand()
             units = Unit("")
             for factor in dims.as_ordered_factors():
                 dim = list(factor.free_symbols)[0]
-                u = self.base_units[dim]
+                u = self.units_map[dim]
                 if factor.is_Pow:
                     u = u ** factor.as_base_exp()[1]
                 units *= u
-            self.base_units[key] = units
-        return self.base_units[key]
+            self.units_map[key] = units
+        return self.units_map[key]
 
     def __setitem__(self, key, value):
         if isinstance(key, string_types):
             key = getattr(dimensions, key)
-        self.base_units[key] = Unit(value)
+        self.units_map[key] = Unit(value)
 
-cgs_unit_system = UnitSystem(cgs_base_units)
+cgs_unit_system = UnitSystem("cm", "g", "s", "K", "radian")
 cgs_unit_system[dimensions.energy] = "erg"
 cgs_unit_system[dimensions.pressure] = "dyne/cm**2"
 cgs_unit_system[dimensions.force] = "dyne"
 cgs_unit_system[dimensions.magnetic_field_cgs] = "gauss"
 
-mks_unit_system = UnitSystem(mks_base_units)
+mks_unit_system = UnitSystem("m", "kg", "s", "K", "radian")
+mks_unit_system[dimensions.current_mks] = "A"
 mks_unit_system[dimensions.energy] = "J"
 mks_unit_system[dimensions.pressure] = "Pa"
 mks_unit_system[dimensions.force] = "N"
-mks_unit_system[dimensions.magnetic_field_mks] = "T"
\ No newline at end of file
+mks_unit_system[dimensions.magnetic_field_mks] = "T"
+
+imperial_unit_system = UnitSystem("ft", "lbm", "s", "R", "radian")
+imperial_unit_system[dimensions.force] = "lbf"
+
+galactic_unit_system = UnitSystem("kpc", "Msun", "Myr", "K", "radian")
+galactic_unit_system[dimensions.energy] = "keV"
+galactic_unit_system[dimensions.magnetic_field_cgs] = "uG"
+
+geographic_unit_system = UnitSystem("mile", "lbm", "s", "K", "radian")
+geographic_unit_system[dimensions.pressure] = "atm"
+
+unit_system_registry = {"cgs": cgs_unit_system,
+                        "mks": mks_unit_system,
+                        "imperial": imperial_unit_system,
+                        "galactic": galactic_unit_system,
+                        "geographic": geographic_unit_system}

diff -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 -r b707d5b744892e7cc50bae520fa713645d679127 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -20,7 +20,8 @@
     planck_time_s, \
     planck_energy_erg, \
     planck_charge_esu, \
-    planck_temperature_K
+    planck_temperature_K, \
+    standard_gravity_cm_per_s2
 from yt.units.yt_array import YTQuantity
 
 mass_electron_cgs = YTQuantity(mass_electron_grams, 'g')
@@ -130,3 +131,8 @@
 # MKS E&M units
 mu_0 = YTQuantity(4.0e-7*pi, "N/A**2")
 eps_0 = (1.0/(clight.in_mks()**2*mu_0)).in_units("C**2/N/m**2")
+
+# Misc
+standard_gravity_cgs = YTQuantity(standard_gravity_cm_per_s2, "cm/s**2")
+standard_gravity = standard_gravity_cgs
+

diff -r e4f3ab1164fb8b75bf9e10530664d2d7ecd2d6c2 -r b707d5b744892e7cc50bae520fa713645d679127 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -70,8 +70,9 @@
 sec_per_min  = 60.0
 day_per_year = 365.25
 
-# velocities
+# velocities, accelerations
 speed_of_light_cm_per_s = 2.99792458e10
+standard_gravity_cm_per_s2 = 9.80665e2
 
 # temperature / energy
 boltzmann_constant_erg_per_K = 1.3806488e-16
@@ -120,3 +121,7 @@
 planck_energy_erg = planck_mass_grams * speed_of_light_cm_per_s * speed_of_light_cm_per_s
 planck_temperature_K = planck_energy_erg / boltzmann_constant_erg_per_K
 planck_charge_esu = 5.62274532302e-09
+
+# Imperial and other non-metric units
+grams_per_pound = 453.59237
+pascal_per_atm = 101325.0


https://bitbucket.org/yt_analysis/yt/commits/7277957f436b/
Changeset:   7277957f436b
Branch:      yt
User:        jzuhone
Date:        2015-12-02 17:48:56+00:00
Summary:     Automatically add unit system registries
Affected #:  1 file

diff -r b707d5b744892e7cc50bae520fa713645d679127 -r 7277957f436b3b3e693f38992222ec666938b097 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -15,14 +15,17 @@
 from yt.units import dimensions
 from yt.units.unit_object import Unit
 
+unit_system_registry = {}
+
 class UnitSystem(object):
-    def __init__(self, length_unit, mass_unit, time_unit,
+    def __init__(self, name, length_unit, mass_unit, time_unit,
                  temperature_unit, angle_unit):
         self.units_map = {dimensions.length: length_unit,
                           dimensions.mass: mass_unit,
                           dimensions.time: time_unit,
                           dimensions.temperature: temperature_unit,
                           dimensions.angle: angle_unit}
+        unit_system_registry[name] = self
 
     def __getitem__(self, key):
         if isinstance(key, string_types):
@@ -44,31 +47,25 @@
             key = getattr(dimensions, key)
         self.units_map[key] = Unit(value)
 
-cgs_unit_system = UnitSystem("cm", "g", "s", "K", "radian")
+cgs_unit_system = UnitSystem("cgs", "cm", "g", "s", "K", "radian")
 cgs_unit_system[dimensions.energy] = "erg"
 cgs_unit_system[dimensions.pressure] = "dyne/cm**2"
 cgs_unit_system[dimensions.force] = "dyne"
 cgs_unit_system[dimensions.magnetic_field_cgs] = "gauss"
 
-mks_unit_system = UnitSystem("m", "kg", "s", "K", "radian")
+mks_unit_system = UnitSystem("mks", "m", "kg", "s", "K", "radian")
 mks_unit_system[dimensions.current_mks] = "A"
 mks_unit_system[dimensions.energy] = "J"
 mks_unit_system[dimensions.pressure] = "Pa"
 mks_unit_system[dimensions.force] = "N"
 mks_unit_system[dimensions.magnetic_field_mks] = "T"
 
-imperial_unit_system = UnitSystem("ft", "lbm", "s", "R", "radian")
+imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", "R", "radian")
 imperial_unit_system[dimensions.force] = "lbf"
 
-galactic_unit_system = UnitSystem("kpc", "Msun", "Myr", "K", "radian")
+galactic_unit_system = UnitSystem("galactic", "kpc", "Msun", "Myr", "K", "radian")
 galactic_unit_system[dimensions.energy] = "keV"
 galactic_unit_system[dimensions.magnetic_field_cgs] = "uG"
 
-geographic_unit_system = UnitSystem("mile", "lbm", "s", "K", "radian")
+geographic_unit_system = UnitSystem("geographic", "mile", "lbm", "s", "K", "radian")
 geographic_unit_system[dimensions.pressure] = "atm"
-
-unit_system_registry = {"cgs": cgs_unit_system,
-                        "mks": mks_unit_system,
-                        "imperial": imperial_unit_system,
-                        "galactic": galactic_unit_system,
-                        "geographic": geographic_unit_system}


https://bitbucket.org/yt_analysis/yt/commits/fcfeaaae3953/
Changeset:   fcfeaaae3953
Branch:      yt
User:        jzuhone
Date:        2015-12-02 19:10:26+00:00
Summary:     More work on integrating unit systems
Affected #:  3 files

diff -r 7277957f436b3b3e693f38992222ec666938b097 -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -40,10 +40,9 @@
     ParameterFileStore, \
     NoParameterShelf, \
     output_type_registry
-from yt.units import dimensions
 from yt.units.unit_object import Unit
 from yt.units.unit_registry import UnitRegistry
-from yt.units.unit_lookup_table import yt_base_units
+from yt.units.unit_systems import unit_system_registry
 from yt.fields.derived_field import \
     ValidateSpatial
 from yt.fields.fluid_fields import \
@@ -194,7 +193,8 @@
             obj = _cached_datasets[apath]
         return obj
 
-    def __init__(self, filename, dataset_type=None, file_style=None, units_override=None):
+    def __init__(self, filename, dataset_type=None, file_style=None, 
+                 units_override=None, unit_system="cgs"):
         """
         Base class for generating new output types.  Principally consists of
         a *filename* and a *dataset_type* which will be passed on to children.
@@ -236,7 +236,7 @@
         self._parse_parameter_file()
         self.set_units()
         self._setup_coordinate_handler()
-        self.base_units = yt_base_units.copy()
+        self.unit_system = unit_system_registry[unit_system]
 
         # Because we need an instantiated class to check the ds's existence in
         # the cache, we move that check to here from __new__.  This avoids
@@ -807,14 +807,6 @@
             self.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
                                    DW.units.dimensions)
 
-    def set_base_units(self, base_units):
-        for key, value in base_units.items():
-            if isinstance(value, string_types):
-                val = value
-            elif isinstance(value, tuple):
-                val = "*".join([str(value[0]), value[1]])
-            self.base_units[key] = val
-
     def _override_code_units(self):
         if len(self.units_override) == 0:
             return

diff -r 7277957f436b3b3e693f38992222ec666938b097 -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -51,6 +51,7 @@
 density = mass * number_density
 angular_momentum = mass*length*velocity
 specific_angular_momentum = angular_momentum / mass
+specific_energy = energy / mass
 
 # Gaussian electromagnetic units
 charge_cgs  = (energy * length)**Rational(1, 2)  # proper 1/2 power

diff -r 7277957f436b3b3e693f38992222ec666938b097 -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -19,12 +19,16 @@
 
 class UnitSystem(object):
     def __init__(self, name, length_unit, mass_unit, time_unit,
-                 temperature_unit, angle_unit):
+                 temperature_unit, angle_unit, current_mks_unit=None,
+                 unit_registry=None):
         self.units_map = {dimensions.length: length_unit,
                           dimensions.mass: mass_unit,
                           dimensions.time: time_unit,
                           dimensions.temperature: temperature_unit,
                           dimensions.angle: angle_unit}
+        if current_mks_unit is not None:
+            self.units_map[dimensions.current_mks] = current_mks_unit
+        self.unit_registry = unit_registry
         unit_system_registry[name] = self
 
     def __getitem__(self, key):
@@ -45,27 +49,35 @@
     def __setitem__(self, key, value):
         if isinstance(key, string_types):
             key = getattr(dimensions, key)
-        self.units_map[key] = Unit(value)
+        self.units_map[key] = Unit(value, unit_registry=self.unit_registry)
+
+def create_code_unit_system(ds):
+    code_unit_system = UnitSystem(str(ds), "code_length", "code_mass", "code_time",
+                                  "code_temperature", "radian")
+    code_unit_system[dimensions.velocity] = "code_velocity"
 
 cgs_unit_system = UnitSystem("cgs", "cm", "g", "s", "K", "radian")
 cgs_unit_system[dimensions.energy] = "erg"
+cgs_unit_system[dimensions.specific_energy] = "erg/g"
 cgs_unit_system[dimensions.pressure] = "dyne/cm**2"
 cgs_unit_system[dimensions.force] = "dyne"
 cgs_unit_system[dimensions.magnetic_field_cgs] = "gauss"
 
-mks_unit_system = UnitSystem("mks", "m", "kg", "s", "K", "radian")
-mks_unit_system[dimensions.current_mks] = "A"
+mks_unit_system = UnitSystem("mks", "m", "kg", "s", "K", "radian",
+                             current_mks_unit="A")
 mks_unit_system[dimensions.energy] = "J"
+mks_unit_system[dimensions.specific_energy] = "J/kg"
 mks_unit_system[dimensions.pressure] = "Pa"
 mks_unit_system[dimensions.force] = "N"
 mks_unit_system[dimensions.magnetic_field_mks] = "T"
 
 imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", "R", "radian")
 imperial_unit_system[dimensions.force] = "lbf"
+imperial_unit_system[dimensions.energy] = "ft*lbf"
 
 galactic_unit_system = UnitSystem("galactic", "kpc", "Msun", "Myr", "K", "radian")
 galactic_unit_system[dimensions.energy] = "keV"
 galactic_unit_system[dimensions.magnetic_field_cgs] = "uG"
 
-geographic_unit_system = UnitSystem("geographic", "mile", "lbm", "s", "K", "radian")
+geographic_unit_system = UnitSystem("geographic", "mile", "lbm", "s", "K", "deg")
 geographic_unit_system[dimensions.pressure] = "atm"


https://bitbucket.org/yt_analysis/yt/commits/d164dd5f36f9/
Changeset:   d164dd5f36f9
Branch:      yt
User:        jzuhone
Date:        2015-12-02 19:35:47+00:00
Summary:     Consolidating a lot of code and simplifying things
Affected #:  6 files

diff -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 -r d164dd5f36f9f78402cb8bd9bbd4379be5c45ac1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -861,7 +861,7 @@
         s = "%s (%s): " % (self.__class__.__name__, self.ds)
         for i in self._con_args:
             try:
-                s += ", %s=%s" % (i, getattr(self, i).in_base(self.ds.base_units))
+                s += ", %s=%s" % (i, getattr(self, i).in_base(unit_system=self.ds.unit_system))
             except AttributeError:
                 s += ", %s=%s" % (i, getattr(self, i))
         return s

diff -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 -r d164dd5f36f9f78402cb8bd9bbd4379be5c45ac1 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -85,7 +85,7 @@
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \
                 units not in skip_output_units:
                 u = Unit(units, registry = self.ds.unit_registry)
-                output_units = str(u.get_base_equivalent(base_units=self.ds.base_units))
+                output_units = str(u.get_base_equivalent(unit_system=self.ds.unit_system))
             else:
                 output_units = units
             if (ptype, f) not in self.field_list:
@@ -283,7 +283,7 @@
             # as well.
             u = Unit(self[original_name].units,
                       registry = self.ds.unit_registry)
-            units = str(u.get_base_equivalent(base_units=self.ds.base_units))
+            units = str(u.get_base_equivalent(unit_system=self.ds.unit_system))
         self.field_aliases[alias_name] = original_name
         self.add_field(alias_name,
             function = TranslationFunc(original_name),

diff -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 -r d164dd5f36f9f78402cb8bd9bbd4379be5c45ac1 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -209,11 +209,3 @@
     dimensions.angle: 'radian',
     dimensions.current_mks: 'A',
 }
-
-cgs_base_units = default_base_units.copy()
-cgs_base_units.pop(dimensions.current_mks)
-
-mks_base_units = default_base_units.copy()
-mks_base_units.update({dimensions.mass: 'kg',
-                       dimensions.length: 'm'})
-

diff -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 -r d164dd5f36f9f78402cb8bd9bbd4379be5c45ac1 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -22,16 +22,16 @@
 from sympy.parsing.sympy_parser import \
     parse_expr, auto_number, rationalize
 from keyword import iskeyword
-from yt.units import dimensions
 from yt.units.dimensions import \
     base_dimensions, temperature, \
     dimensionless, current_mks
 from yt.units.unit_lookup_table import \
-    unit_prefixes, prefixable_units, mks_base_units, \
-    latex_prefixes, default_base_units
+    unit_prefixes, prefixable_units, latex_prefixes, \
+    default_base_units
 from yt.units.unit_registry import \
     UnitRegistry, \
     UnitParseError
+from yt.units.unit_systems import unit_system_registry
 from yt.utilities.exceptions import YTUnitsNotReducible
 
 import copy
@@ -409,17 +409,17 @@
             units.append("(%s)%s" % (unit_string, power_string))
         return " * ".join(units)
 
-    def get_base_equivalent(self, base_units=None):
+    def get_base_equivalent(self, unit_system=None):
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
         yt_base_unit_string = self._get_system_unit_string(default_base_units)
         yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
                             dimensions=self.dimensions, registry=self.registry)
-        if base_units is None:
+        if unit_system is None:
             return yt_base_unit
         else:
-            units_string = self._get_system_unit_string(base_units)
+            units_string = self._get_system_unit_string(unit_system.base_units)
             base_value = get_conversion_factor(self, yt_base_unit)[0]
             base_value /= get_conversion_factor(self, Unit(units_string))[0]
             return Unit(units_string, base_value=base_value,
@@ -431,16 +431,17 @@
         """
         if current_mks in self.dimensions.free_symbols:
             raise YTUnitsNotReducible(self, "cgs")
-        # Note that the following only works because yt_base_units are simply
-        # cgs_base_units plus Amperes. If that is no longer the case, we will
-        # then need to pass cgs_base_units to the get_base_equivalent call here.
+        # Note that the following call only works because the default base units
+        # are simply cgs units plus Amperes. If that is no longer the case, we will
+        # then need to pass "unit_system=unit_system_registry['cgs']" to the
+        # get_base_equivalent call here.
         return self.get_base_equivalent()
 
     def get_mks_equivalent(self):
         """
         Create and return dimensionally-equivalent mks units.
         """
-        return self.get_base_equivalent(base_units=mks_base_units)
+        return self.get_base_equivalent(unit_system=unit_system_registry["mks"])
 
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)

diff -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 -r d164dd5f36f9f78402cb8bd9bbd4379be5c45ac1 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -29,6 +29,7 @@
         if current_mks_unit is not None:
             self.units_map[dimensions.current_mks] = current_mks_unit
         self.unit_registry = unit_registry
+        self.base_units = self.units_map.copy()
         unit_system_registry[name] = self
 
     def __getitem__(self, key):

diff -r fcfeaaae395322f67ee518c02ee5c7ba253c1950 -r d164dd5f36f9f78402cb8bd9bbd4379be5c45ac1 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -16,6 +16,7 @@
 import copy
 import numpy as np
 
+from yt.extern.six import string_types
 from functools import wraps
 from numpy import \
     add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
@@ -32,6 +33,7 @@
 from yt.units.unit_object import Unit, UnitParseError
 from yt.units.unit_registry import UnitRegistry
 from yt.units.dimensions import dimensionless, current_mks, em_dimensions
+from yt.units.unit_systems import unit_system_registry
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUnitConversionError, \
     YTUfuncUnitError, YTIterableUnitCoercionError, \
@@ -451,26 +453,25 @@
 
         return self
 
-    def convert_to_base(self, base_units=None):
+    def convert_to_base(self, unit_system=None):
         """
-        Convert the array and units to the equivalent base units.
+        Convert the array and units to the equivalent base units in
+        the specified unit system.
 
         Parameters
         ----------
-        base_units : dict, optional
-            A dictionary which maps the name of the dimension to the
-            base unit requested. Any dimensions omitted from this dict
-            will revert to the default base units in the conversion.
-            If not specified, the default base units in yt_base_units
-            are used.
+        unit_system : UnitRegistry object or string, optional
+            The unit system to be used in the conversion. If not specified,
+            the default base units are used.
 
         Examples
         --------
         >>> E = YTQuantity(2.5, "erg/s")
-        >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
-        >>> E.convert_to_base(base_units)
+        >>> E.convert_to_base(unit_system="galactic")
         """
-        return self.convert_to_units(self.units.get_base_equivalent(base_units=base_units))
+        if isinstance(unit_system, string_types):
+            unit_system = unit_system_registry[unit_system]
+        return self.convert_to_units(self.units.get_base_equivalent(unit_system=unit_system))
 
     def convert_to_cgs(self):
         """
@@ -520,27 +521,25 @@
         """
         return self.in_units(units)
 
-    def in_base(self, base_units=None):
+    def in_base(self, unit_system=None):
         """
-        Creates a copy of this array with the data in the specified base units,
-        and returns it.
+        Creates a copy of this array with the data in the specified unit system,
+        and returns it in that system's base units.
 
         Parameters
         ----------
-        base_units : dict, optional
-            A dictionary which maps the name of the dimension to the
-            base unit requested. Any dimensions omitted from this dict
-            will revert to the default base units in the conversion.
-            If not specified, the default base units in yt_base_units
-            are used.
+        unit_system : UnitRegistry object or string, optional
+            The unit system to be used in the conversion. If not specified,
+            the default base units are used.
 
         Examples
         --------
         >>> E = YTQuantity(2.5, "erg/s")
-        >>> base_units = {"length":"kpc","time":"Myr","mass":"Msun"}
-        >>> E_new = E.in_base(base_units)
+        >>> E_new = E.in_base(unit_system="galactic")
         """
-        return self.in_units(self.units.get_base_equivalent(base_units=base_units))
+        if isinstance(unit_system, string_types):
+            unit_system = unit_system_registry[unit_system]
+        return self.in_units(self.units.get_base_equivalent(unit_system=unit_system))
 
     def in_cgs(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/3c4625e2e65f/
Changeset:   3c4625e2e65f
Branch:      yt
User:        jzuhone
Date:        2015-12-02 19:44:48+00:00
Summary:     Bringing cosmology up to speed with unit systems
Affected #:  1 file

diff -r d164dd5f36f9f78402cb8bd9bbd4379be5c45ac1 -r 3c4625e2e65f6b46df103f43314085f678013227 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -1,7 +1,7 @@
 """
 Cosmology calculator.
 Cosmology calculator based originally on http://www.kempner.net/cosmic.php 
-and featuring time and redshift conversion functions from Enzo..
+and featuring time and redshift conversion functions from Enzo.
 
 """
 from __future__ import print_function
@@ -17,14 +17,14 @@
 import functools
 import numpy as np
 
+from yt.extern.six import string_types
 from yt.units import dimensions
 from yt.units.unit_registry import \
      UnitRegistry
 from yt.units.yt_array import \
      YTArray, \
      YTQuantity
-from yt.units.unit_lookup_table import \
-    yt_base_units
+from yt.units.unit_systems import unit_system_registry
 
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs as G, \
@@ -51,9 +51,9 @@
     omega_curvature : the fraction of the energy density of the Universe in 
         curvature.
         Default: 0.0.
-    base_units : dict, optional 
-        The units base to use when making calculations. If not specified,
-        the default yt_units_base is assumed. 
+    unit_system : UnitSystem, optional 
+        The units system to use when making calculations. If not specified,
+        cgs units are assumed.
 
     Examples
     --------
@@ -68,7 +68,7 @@
                  omega_lambda = 0.73,
                  omega_curvature = 0.0,
                  unit_registry = None,
-                 base_units = None):
+                 unit_system = "cgs"):
         self.omega_matter = omega_matter
         self.omega_lambda = omega_lambda
         self.omega_curvature = omega_curvature
@@ -82,16 +82,16 @@
                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
-        self.base_units = yt_base_units.copy()
-        if base_units is not None:
-            self.base_units.update(base_units)
+        if isinstance(unit_system, string_types):
+            unit_system = unit_system_registry[unit_system]
+        self.unit_system = unit_system
 
     def hubble_distance(self):
         r"""
         The distance corresponding to c / h, where c is the speed of light 
         and h is the Hubble parameter in units of 1 / time.
         """
-        return self.quan((speed_of_light_cgs / self.hubble_constant)).in_base(base_units=self.base_units)
+        return self.quan((speed_of_light_cgs / self.hubble_constant)).in_units(self.unit_system["length"])
 
     def comoving_radial_distance(self, z_i, z_f):
         r"""
@@ -113,7 +113,7 @@
         
         """
         return (self.hubble_distance() *
-                trapzint(self.inverse_expansion_factor, z_i, z_f)).in_base(base_units=self.base_units)
+                trapzint(self.inverse_expansion_factor, z_i, z_f)).in_units(self.unit_system["length"])
 
     def comoving_transverse_distance(self, z_i, z_f):
         r"""
@@ -139,13 +139,13 @@
             return (self.hubble_distance() / np.sqrt(self.omega_curvature) * 
                     np.sinh(np.sqrt(self.omega_curvature) * 
                             self.comoving_radial_distance(z_i, z_f) /
-                            self.hubble_distance())).in_base(base_units=self.base_units)
+                            self.hubble_distance())).in_units(self.unit_system["length"])
         elif (self.omega_curvature < 0):
             return (self.hubble_distance() /
                     np.sqrt(np.fabs(self.omega_curvature)) * 
                     np.sin(np.sqrt(np.fabs(self.omega_curvature)) * 
                            self.comoving_radial_distance(z_i, z_f) /
-                           self.hubble_distance())).in_base(base_units=self.base_units)
+                           self.hubble_distance())).in_units(self.unit_system["length"])
         else:
             return self.comoving_radial_distance(z_i, z_f)
 
@@ -180,7 +180,7 @@
                       np.sinh(np.fabs(self.omega_curvature) * 
                             self.comoving_transverse_distance(z_i, z_f) /
                             self.hubble_distance()) /
-                            np.sqrt(self.omega_curvature))).in_base(base_units=self.base_units)
+                            np.sqrt(self.omega_curvature))).in_units(self.unit_system["volume"])
         elif (self.omega_curvature < 0):
              return (2 * np.pi * np.power(self.hubble_distance(), 3) /
                      np.fabs(self.omega_curvature) * 
@@ -192,11 +192,11 @@
                       np.arcsin(np.fabs(self.omega_curvature) * 
                            self.comoving_transverse_distance(z_i, z_f) /
                            self.hubble_distance()) /
-                      np.sqrt(np.fabs(self.omega_curvature)))).in_base(base_units=self.base_units)
+                      np.sqrt(np.fabs(self.omega_curvature)))).in_units(self.unit_system["volume"])
         else:
              return (4 * np.pi *
                      np.power(self.comoving_transverse_distance(z_i, z_f), 3) /\
-                     3).in_base(base_units=self.base_units)
+                     3).in_units(self.unit_system["volume"])
 
     def angular_diameter_distance(self, z_i, z_f):
         r"""
@@ -219,7 +219,7 @@
         """
         
         return (self.comoving_transverse_distance(0, z_f) / (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_base(base_units=self.base_units)
+                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_units(self.unit_system["volume"])
 
     def angular_scale(self, z_i, z_f):
         r"""
@@ -243,7 +243,7 @@
 
         scale = self.angular_diameter_distance(z_i, z_f) / \
           self.quan(1, "radian")
-        return scale.in_base(base_units=self.base_units)
+        return scale.in_units(self.unit_system["length"]/self.unit_system["angle"])
     
     def luminosity_distance(self, z_i, z_f):
         r"""
@@ -266,7 +266,7 @@
         """
 
         return (self.comoving_transverse_distance(0, z_f) * (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) * (1 + z_i)).in_base(base_units=self.base_units)
+                self.comoving_transverse_distance(0, z_i) * (1 + z_i)).in_units(self.unit_system["length"])
 
     def lookback_time(self, z_i, z_f):
         r"""
@@ -288,7 +288,7 @@
 
         """
         return (trapzint(self.age_integrand, z_i, z_f) / \
-                self.hubble_constant).in_base(base_units=self.base_units)
+                self.hubble_constant).in_units(self.unit_system["time"])
     
     def hubble_time(self, z, z_inf=1e6):
         r"""
@@ -315,7 +315,7 @@
 
         """
         return (trapzint(self.age_integrand, z, z_inf) /
-                self.hubble_constant).in_base(base_units=self.base_units)
+                self.hubble_constant).in_units(self.unit_system["time"])
 
     def critical_density(self, z):
         r"""
@@ -338,7 +338,7 @@
         return (3.0 / 8.0 / np.pi * 
                 self.hubble_constant**2 / G *
                 ((1 + z)**3.0 * self.omega_matter + 
-                 self.omega_lambda)).in_base(base_units=self.base_units)
+                 self.omega_lambda)).in_units(self.unit_system["density"])
 
     def hubble_parameter(self, z):
         r"""
@@ -356,7 +356,7 @@
         >>> print co.hubble_parameter(1.0).in_units("km/s/Mpc")
 
         """
-        return self.hubble_constant.in_base(base_units=self.base_units) * self.expansion_factor(z)
+        return self.hubble_constant.in_units(self.unit_system["frequency"]) * self.expansion_factor(z)
 
     def age_integrand(self, z):
         return (1 / (z + 1) / self.expansion_factor(z))
@@ -532,7 +532,7 @@
   
         my_time = t0 / self.hubble_constant
     
-        return my_time.in_base(base_units=self.base_units)
+        return my_time.in_units(self.unit_system["time"])
 
     _arr = None
     @property


https://bitbucket.org/yt_analysis/yt/commits/48bd6cf0f21f/
Changeset:   48bd6cf0f21f
Branch:      yt
User:        jzuhone
Date:        2015-12-02 19:53:06+00:00
Summary:     Avoiding a circular import
Affected #:  5 files

diff -r 3c4625e2e65f6b46df103f43314085f678013227 -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -40,9 +40,8 @@
     ParameterFileStore, \
     NoParameterShelf, \
     output_type_registry
-from yt.units.unit_object import Unit
+from yt.units.unit_object import Unit, unit_system_registry
 from yt.units.unit_registry import UnitRegistry
-from yt.units.unit_systems import unit_system_registry
 from yt.fields.derived_field import \
     ValidateSpatial
 from yt.fields.fluid_fields import \

diff -r 3c4625e2e65f6b46df103f43314085f678013227 -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -31,7 +31,6 @@
 from yt.units.unit_registry import \
     UnitRegistry, \
     UnitParseError
-from yt.units.unit_systems import unit_system_registry
 from yt.utilities.exceptions import YTUnitsNotReducible
 
 import copy
@@ -52,6 +51,8 @@
     'sqrt': sqrt
 }
 
+unit_system_registry = {}
+
 def auto_positive_symbol(tokens, local_dict, global_dict):
     """
     Inserts calls to ``Symbol`` for undefined variables.

diff -r 3c4625e2e65f6b46df103f43314085f678013227 -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -13,9 +13,7 @@
 
 from yt.extern.six import string_types
 from yt.units import dimensions
-from yt.units.unit_object import Unit
-
-unit_system_registry = {}
+from yt.units.unit_object import Unit, unit_system_registry
 
 class UnitSystem(object):
     def __init__(self, name, length_unit, mass_unit, time_unit,

diff -r 3c4625e2e65f6b46df103f43314085f678013227 -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -30,10 +30,9 @@
     isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
     modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs
 
-from yt.units.unit_object import Unit, UnitParseError
+from yt.units.unit_object import Unit, UnitParseError, unit_system_registry
 from yt.units.unit_registry import UnitRegistry
 from yt.units.dimensions import dimensionless, current_mks, em_dimensions
-from yt.units.unit_systems import unit_system_registry
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUnitConversionError, \
     YTUfuncUnitError, YTIterableUnitCoercionError, \

diff -r 3c4625e2e65f6b46df103f43314085f678013227 -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -24,7 +24,7 @@
 from yt.units.yt_array import \
      YTArray, \
      YTQuantity
-from yt.units.unit_systems import unit_system_registry
+from yt.units.unit_object import unit_system_registry
 
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs as G, \


https://bitbucket.org/yt_analysis/yt/commits/237a4a1d3511/
Changeset:   237a4a1d3511
Branch:      yt
User:        jzuhone
Date:        2015-12-02 20:03:02+00:00
Summary:     Fixing bugs. Need to add the unit_system kwarg to all of the frontends (sigh)
Affected #:  5 files

diff -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 -r 237a4a1d3511e9cc3d3b54fef1b4ed252e1ff59d yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -176,3 +176,5 @@
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position
+
+from yt.units.unit_systems import UnitSystem

diff -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 -r 237a4a1d3511e9cc3d3b54fef1b4ed252e1ff59d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -311,7 +311,8 @@
                  z_axis_decomp=False,
                  suppress_astropy_warnings=True,
                  parameters=None,
-                 units_override=None):
+                 units_override=None,
+                 unit_system="cgs"):
 
         if parameters is None:
             parameters = {}
@@ -417,7 +418,8 @@
 
         self.refine_by = 2
 
-        Dataset.__init__(self, fn, dataset_type, units_override=units_override)
+        Dataset.__init__(self, fn, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):

diff -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 -r 237a4a1d3511e9cc3d3b54fef1b4ed252e1ff59d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -182,7 +182,8 @@
     def __init__(self, filename, dataset_type='flash_hdf5',
                  storage_filename = None,
                  particle_filename = None, 
-                 units_override = None):
+                 units_override = None,
+                 unit_system = "cgs"):
 
         self.fluid_types += ("flash",)
         if self._handle is not None: return
@@ -202,7 +203,8 @@
         # generalization.
         self.refine_by = 2
 
-        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
 
         self.parameters["HydroMethod"] = 'flash' # always PPM DE

diff -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 -r 237a4a1d3511e9cc3d3b54fef1b4ed252e1ff59d yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -18,7 +18,7 @@
 class UnitSystem(object):
     def __init__(self, name, length_unit, mass_unit, time_unit,
                  temperature_unit, angle_unit, current_mks_unit=None,
-                 unit_registry=None):
+                 registry=None):
         self.units_map = {dimensions.length: length_unit,
                           dimensions.mass: mass_unit,
                           dimensions.time: time_unit,
@@ -26,7 +26,7 @@
                           dimensions.angle: angle_unit}
         if current_mks_unit is not None:
             self.units_map[dimensions.current_mks] = current_mks_unit
-        self.unit_registry = unit_registry
+        self.registry = registry
         self.base_units = self.units_map.copy()
         unit_system_registry[name] = self
 
@@ -48,7 +48,7 @@
     def __setitem__(self, key, value):
         if isinstance(key, string_types):
             key = getattr(dimensions, key)
-        self.units_map[key] = Unit(value, unit_registry=self.unit_registry)
+        self.units_map[key] = Unit(value, registry=self.registry)
 
 def create_code_unit_system(ds):
     code_unit_system = UnitSystem(str(ds), "code_length", "code_mass", "code_time",

diff -r 48bd6cf0f21fffa3ecedaa0265882b4a044a4572 -r 237a4a1d3511e9cc3d3b54fef1b4ed252e1ff59d yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -130,7 +130,7 @@
 
 # MKS E&M units
 mu_0 = YTQuantity(4.0e-7*pi, "N/A**2")
-eps_0 = (1.0/(clight.in_mks()**2*mu_0)).in_units("C**2/N/m**2")
+eps_0 = (1.0/(clight**2*mu_0)).in_units("C**2/N/m**2")
 
 # Misc
 standard_gravity_cgs = YTQuantity(standard_gravity_cm_per_s2, "cm/s**2")


https://bitbucket.org/yt_analysis/yt/commits/66de7b31ebf9/
Changeset:   66de7b31ebf9
Branch:      yt
User:        jzuhone
Date:        2015-12-02 20:15:09+00:00
Summary:     Code unit system
Affected #:  3 files

diff -r 237a4a1d3511e9cc3d3b54fef1b4ed252e1ff59d -r 66de7b31ebf9019058f22ce272e3156229971da1 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -59,6 +59,7 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
+from yt.units.unit_systems import create_code_unit_system
 from yt.data_objects.region_expression import \
     RegionExpression
 
@@ -235,6 +236,9 @@
         self._parse_parameter_file()
         self.set_units()
         self._setup_coordinate_handler()
+        create_code_unit_system(self)
+        if unit_system == "code":
+            unit_system = str(self)
         self.unit_system = unit_system_registry[unit_system]
 
         # Because we need an instantiated class to check the ds's existence in

diff -r 237a4a1d3511e9cc3d3b54fef1b4ed252e1ff59d -r 66de7b31ebf9019058f22ce272e3156229971da1 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -421,8 +421,9 @@
             return yt_base_unit
         else:
             units_string = self._get_system_unit_string(unit_system.base_units)
+            u = Unit(units_string, registry=self.registry)
             base_value = get_conversion_factor(self, yt_base_unit)[0]
-            base_value /= get_conversion_factor(self, Unit(units_string))[0]
+            base_value /= get_conversion_factor(self, u)[0]
             return Unit(units_string, base_value=base_value,
                         dimensions=self.dimensions, registry=self.registry)
 

diff -r 237a4a1d3511e9cc3d3b54fef1b4ed252e1ff59d -r 66de7b31ebf9019058f22ce272e3156229971da1 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -52,7 +52,7 @@
 
 def create_code_unit_system(ds):
     code_unit_system = UnitSystem(str(ds), "code_length", "code_mass", "code_time",
-                                  "code_temperature", "radian")
+                                  "code_temperature", "radian", registry=ds.unit_registry)
     code_unit_system[dimensions.velocity] = "code_velocity"
 
 cgs_unit_system = UnitSystem("cgs", "cm", "g", "s", "K", "radian")


https://bitbucket.org/yt_analysis/yt/commits/7dd1e8336cdd/
Changeset:   7dd1e8336cdd
Branch:      yt
User:        jzuhone
Date:        2015-12-02 20:29:24+00:00
Summary:     Need a convert_to_cgs here now just in case
Affected #:  1 file

diff -r 66de7b31ebf9019058f22ce272e3156229971da1 -r 7dd1e8336cdd930649e6bcd64154109e1875392a yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -135,6 +135,7 @@
             vol = chunk["cell_volume"].in_cgs().v
             EM = (chunk["density"]/mp).v**2
             EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+            EM.convert_to_cgs()
 
             if isinstance(self.Zmet, string_types):
                 metalZ = chunk[self.Zmet].v


https://bitbucket.org/yt_analysis/yt/commits/7ba3cc2210d8/
Changeset:   7ba3cc2210d8
Branch:      yt
User:        jzuhone
Date:        2015-12-02 20:29:38+00:00
Summary:     Adding unit_system to enzo
Affected #:  1 file

diff -r 7dd1e8336cdd930649e6bcd64154109e1875392a -r 7ba3cc2210d8531b0367b09ec52b76c35af5ffbc yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -665,7 +665,8 @@
                  parameter_override = None,
                  conversion_override = None,
                  storage_filename = None,
-                 units_override=None):
+                 units_override=None,
+                 unit_system="cgs"):
         """
         This class is a stripped down class that simply reads and parses
         *filename* without looking at the index.  *dataset_type* gets passed
@@ -683,7 +684,7 @@
         self._conversion_override = conversion_override
         self.storage_filename = storage_filename
         Dataset.__init__(self, filename, dataset_type, file_style=file_style,
-                         units_override=units_override)
+                         units_override=units_override, unit_system=unit_system)
 
     def _setup_1d(self):
         self._index_class = EnzoHierarchy1D


https://bitbucket.org/yt_analysis/yt/commits/dbf7799c5352/
Changeset:   dbf7799c5352
Branch:      yt
User:        jzuhone
Date:        2015-12-02 20:32:56+00:00
Summary:     these weren't necessary anyway
Affected #:  1 file

diff -r 7ba3cc2210d8531b0367b09ec52b76c35af5ffbc -r dbf7799c5352972bd07c8b093c3e9621b6f5b255 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -710,7 +710,7 @@
             # setting plot aspect.
             if self.aspect is None:
                 self.aspect = float((self.ds.quan(1.0, unit_y) /
-                                     self.ds.quan(1.0, unit_x)).in_base())
+                                     self.ds.quan(1.0, unit_x)))
 
             extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
             extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
@@ -1672,7 +1672,7 @@
         self._cb_size = 0.0375*fsize
         self._ax_text_size = [1.2*fontscale, 0.9*fontscale]
         self._top_buff_size = 0.30*fontscale
-        self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2])).in_base()
+        self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2]))
         self._unit_aspect = aspect
 
         size, axrect, caxrect = self._get_best_layout()


https://bitbucket.org/yt_analysis/yt/commits/667f55d702bc/
Changeset:   667f55d702bc
Branch:      yt
User:        jzuhone
Date:        2015-12-02 20:37:59+00:00
Summary:     Fixing these tests
Affected #:  1 file

diff -r dbf7799c5352972bd07c8b093c3e9621b6f5b255 -r 667f55d702bcf1f10023b6208537bfb1df6cea62 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -467,11 +467,10 @@
     yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'
     yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
 
-    base_units = {'length':'kpc','time':'Myr','mass':'1.0e14*Msun'}
-    em3_converted = YTQuantity(1.545436840386756e-05, '100000000000000.0*Msun/(Myr**2*kpc)')
-    yield assert_equal, em3.in_base(base_units=base_units), em3
-    yield assert_array_almost_equal, em3.in_base(base_units=base_units), em3_converted
-    yield assert_equal, str(em3.in_base(base_units=base_units).units), '100000000000000.0*Msun/(Myr**2*kpc)'
+    em3_converted = YTQuantity(1545436840.386756, 'Msun/(Myr**2*kpc)')
+    yield assert_equal, em3.in_base(unit_system="galactic"), em3
+    yield assert_array_almost_equal, em3.in_base(unit_system="galactic"), em3_converted
+    yield assert_equal, str(em3.in_base(unit_system="galactic").units), 'Msun/(Myr**2*kpc)'
 
     dimless = YTQuantity(1.0, "")
     yield assert_equal, dimless.in_cgs(), dimless


https://bitbucket.org/yt_analysis/yt/commits/2a6850406964/
Changeset:   2a6850406964
Branch:      yt
User:        jzuhone
Date:        2015-12-02 20:44:58+00:00
Summary:     Adding unit_system to ytdata frontend
Affected #:  1 file

diff -r 667f55d702bcf1f10023b6208537bfb1df6cea62 -r 2a68504069641c1247e43b36d7217f9663e35440 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -158,11 +158,12 @@
     fluid_types = ("grid", "gas", "deposit", "index")
 
     def __init__(self, filename, dataset_type="ytdatacontainer_hdf5",
-                 n_ref = 16, over_refine_factor = 1, units_override=None):
+                 n_ref = 16, over_refine_factor = 1, units_override=None,
+                 unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(YTDataContainerDataset, self).__init__(filename, dataset_type,
-            units_override=units_override)
+            units_override=units_override, unit_system=unit_system)
 
     def _parse_parameter_file(self):
         super(YTDataContainerDataset, self)._parse_parameter_file()
@@ -343,8 +344,9 @@
     default_fluid_type = "grid"
     fluid_types = ("grid", "gas", "deposit", "index")
 
-    def __init__(self, filename):
-        super(YTGridDataset, self).__init__(filename, self._dataset_type)
+    def __init__(self, filename, unit_system="cgs"):
+        super(YTGridDataset, self).__init__(filename, self._dataset_type,
+                                            unit_system=unit_system)
         self.data = self.index.grids[0]
 
     def _parse_parameter_file(self):
@@ -585,8 +587,9 @@
 
 class YTProfileDataset(YTNonspatialDataset):
     """Dataset for saved profile objects."""
-    def __init__(self, filename):
-        super(YTProfileDataset, self).__init__(filename)
+    def __init__(self, filename, unit_system="cgs"):
+        super(YTProfileDataset, self).__init__(filename,
+                                               unit_system=unit_system)
 
     @property
     def profile(self):


https://bitbucket.org/yt_analysis/yt/commits/484dac773e6f/
Changeset:   484dac773e6f
Branch:      yt
User:        jzuhone
Date:        2015-12-02 20:52:07+00:00
Summary:     Put this back the way it was
Affected #:  1 file

diff -r 2a68504069641c1247e43b36d7217f9663e35440 -r 484dac773e6f4381b2c0dbf7ffb9561d7d2970be yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -302,7 +302,7 @@
     for field in my_profile:
         # Don't write code units because we might not know those later.
         if isinstance(my_profile[field], YTArray):
-            my_profile[field].convert_to_base()
+            my_profile[field].convert_to_cgs()
         _yt_array_hdf5(profile_group, str(field), my_profile[field])
     variance_storage = "%s_variance" % storage
     if hasattr(halo, variance_storage):
@@ -311,7 +311,7 @@
         for field in my_profile:
             # Don't write code units because we might not know those later.
             if isinstance(my_profile[field], YTArray):
-                my_profile[field].convert_to_base()
+                my_profile[field].convert_to_cgs()
             _yt_array_hdf5(variance_group, str(field), my_profile[field])
     fh.close()
 


https://bitbucket.org/yt_analysis/yt/commits/63d669c50bc0/
Changeset:   63d669c50bc0
Branch:      yt
User:        jzuhone
Date:        2015-12-02 21:14:41+00:00
Summary:     put these back for now
Affected #:  1 file

diff -r 484dac773e6f4381b2c0dbf7ffb9561d7d2970be -r 63d669c50bc0aba8b54014e9fe8f7539c2ef7c32 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -285,7 +285,7 @@
                     x = _get_position_array(ptype, f, "px")
                     y = _get_position_array(ptype, f, "py")
                     z = np.zeros(x.size, dtype="float64") + \
-                      self.ds.domain_left_edge[2].in_base().d
+                      self.ds.domain_left_edge[2].in_cgs().d
                     yield ptype, (x, y, z)
 
     def _read_particle_fields(self, chunks, ptf, selector):
@@ -302,7 +302,7 @@
                     x = _get_position_array(ptype, f, "px")
                     y = _get_position_array(ptype, f, "py")
                     z = np.zeros(all_count[ptype], dtype="float64") + \
-                      self.ds.domain_left_edge[2].in_base().d
+                      self.ds.domain_left_edge[2].in_cgs().d
                     mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
@@ -330,7 +330,7 @@
                 pos[:,0] = _get_position_array(ptype, f, "px")
                 pos[:,1] = _get_position_array(ptype, f, "py")
                 pos[:,2] = np.zeros(all_count[ptype], dtype="float64") + \
-                  self.ds.domain_left_edge[2].in_base().d
+                  self.ds.domain_left_edge[2].in_cgs().d
                 # These are 32 bit numbers, so we give a little lee-way.
                 # Otherwise, for big sets of particles, we often will bump into the
                 # domain edges.  This helps alleviate that.


https://bitbucket.org/yt_analysis/yt/commits/3dc4380095e9/
Changeset:   3dc4380095e9
Branch:      yt
User:        jzuhone
Date:        2015-12-02 22:46:02+00:00
Summary:     Small enhancements for the UnitSystem class
Affected #:  3 files

diff -r 63d669c50bc0aba8b54014e9fe8f7539c2ef7c32 -r 3dc4380095e9fab6cbb3b9435bf4f018749048d9 yt/units/dimensions.py
--- a/yt/units/dimensions.py
+++ b/yt/units/dimensions.py
@@ -30,6 +30,7 @@
 #
 
 rate = 1 / time
+frequency = rate
 
 velocity     = length / time
 acceleration = length / time**2

diff -r 63d669c50bc0aba8b54014e9fe8f7539c2ef7c32 -r 3dc4380095e9fab6cbb3b9435bf4f018749048d9 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -402,7 +402,7 @@
             return ""
         for factor in my_dims.as_ordered_factors():
             dim = list(factor.free_symbols)[0]
-            unit_string = base_units[dim]
+            unit_string = str(base_units[dim])
             if factor.is_Pow:
                 power_string = "**(%s)" % factor.as_base_exp()[1]
             else:

diff -r 63d669c50bc0aba8b54014e9fe8f7539c2ef7c32 -r 3dc4380095e9fab6cbb3b9435bf4f018749048d9 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -19,16 +19,18 @@
     def __init__(self, name, length_unit, mass_unit, time_unit,
                  temperature_unit, angle_unit, current_mks_unit=None,
                  registry=None):
-        self.units_map = {dimensions.length: length_unit,
-                          dimensions.mass: mass_unit,
-                          dimensions.time: time_unit,
-                          dimensions.temperature: temperature_unit,
-                          dimensions.angle: angle_unit}
+        self.registry = registry
+        self.units_map = {dimensions.length: Unit(length_unit, registry=self.registry),
+                          dimensions.mass: Unit(mass_unit, registry=self.registry),
+                          dimensions.time: Unit(time_unit, registry=self.registry),
+                          dimensions.temperature: Unit(temperature_unit, registry=self.registry),
+                          dimensions.angle: Unit(angle_unit, registry=self.registry)}
         if current_mks_unit is not None:
-            self.units_map[dimensions.current_mks] = current_mks_unit
+            self.units_map[dimensions.current_mks] = Unit(current_mks_unit, registry=self.registry)
         self.registry = registry
         self.base_units = self.units_map.copy()
         unit_system_registry[name] = self
+        self.name = name
 
     def __getitem__(self, key):
         if isinstance(key, string_types):
@@ -50,6 +52,9 @@
             key = getattr(dimensions, key)
         self.units_map[key] = Unit(value, registry=self.registry)
 
+    def __str__(self):
+        return self.name
+
 def create_code_unit_system(ds):
     code_unit_system = UnitSystem(str(ds), "code_length", "code_mass", "code_time",
                                   "code_temperature", "radian", registry=ds.unit_registry)


https://bitbucket.org/yt_analysis/yt/commits/eff29029a9ce/
Changeset:   eff29029a9ce
Branch:      yt
User:        jzuhone
Date:        2015-12-02 22:46:35+00:00
Summary:     adding unit systems to vector operations
Affected #:  1 file

diff -r 3dc4380095e9fab6cbb3b9435bf4f018749048d9 -r eff29029a9cee3d427bcd00861d20e712096c040 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -68,6 +68,7 @@
 
 def create_vector_fields(registry, basename, field_units,
                          ftype="gas", slice_info=None):
+    from yt.units.unit_object import Unit
     # slice_info would be the left, the right, and the factor.
     # For example, with the old Enzo-ZEUS fields, this would be:
     # slice(None, -2, None)
@@ -217,14 +218,17 @@
     def _divergence_abs(field, data):
         return np.abs(data[ftype, "%s_divergence" % basename])
 
+    field_units = Unit(field_units, registry=registry.ds.unit_registry)
+    div_units = field_units / registry.ds.unit_system["length"]
+
     registry.add_field((ftype, "%s_divergence" % basename),
                        function=_divergence,
-                       units="1/s",
+                       units=div_units,
                        validators=[ValidateSpatial(1)])
     
     registry.add_field((ftype, "%s_divergence_absolute" % basename),
                        function=_divergence_abs,
-                       units="1/s")
+                       units=div_units)
 
     def _tangential_over_magnitude(field, data):
         tr = data[ftype, "tangential_%s" % basename] / \


https://bitbucket.org/yt_analysis/yt/commits/bbde5edae67e/
Changeset:   bbde5edae67e
Branch:      yt
User:        jzuhone
Date:        2015-12-02 22:46:52+00:00
Summary:     Adding unit systems to astro and cosmology fields
Affected #:  2 files

diff -r eff29029a9cee3d427bcd00861d20e712096c040 -r bbde5edae67e6608256f3fdaaa2e9c999a0b8f57 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -32,6 +32,7 @@
 
 @register_field_plugin
 def setup_astro_fields(registry, ftype = "gas", slice_info = None):
+    unit_system = registry.ds.unit_system
     # slice_info would be the left, the right, and the factor.
     # For example, with the old Enzo-ZEUS fields, this would be:
     # slice(None, -2, None)
@@ -53,7 +54,7 @@
 
     registry.add_field((ftype, "dynamical_time"),
                        function=_dynamical_time,
-                       units="s")
+                       units=unit_system["time"])
 
     def _jeans_mass(field, data):
         MJ_constant = (((5.0 * kboltz) / (G * mh)) ** (1.5)) * \
@@ -66,7 +67,7 @@
 
     registry.add_field((ftype, "jeans_mass"),
                        function=_jeans_mass,
-                       units="g")
+                       units=unit_system["mass"])
 
     def _chandra_emissivity(field, data):
         logT0 = np.log10(data[ftype, "temperature"].to_ndarray().astype(np.float64)) - 7
@@ -103,7 +104,7 @@
     
     registry.add_field((ftype, "emission_measure"),
                        function=_emission_measure,
-                       units="cm**-3")
+                       units=unit_system["number_density"])
 
     def _xray_emissivity(field, data):
         # old scaling coefficient was 2.168e60
@@ -134,7 +135,7 @@
 
     registry.add_field((ftype, "sz_kinetic"),
                        function=_sz_kinetic,
-                       units="1/cm",
+                       units=unit_system["length"]**-1,
                        validators=[ValidateParameter("axis")])
 
     def _szy(field, data):
@@ -143,4 +144,4 @@
 
     registry.add_field((ftype, "szy"),
                        function=_szy,
-                       units="1/cm")
+                       units=unit_system["length"]**-1)

diff -r eff29029a9cee3d427bcd00861d20e712096c040 -r bbde5edae67e6608256f3fdaaa2e9c999a0b8f57 yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -27,6 +27,7 @@
 
 @register_field_plugin
 def setup_cosmology_fields(registry, ftype = "gas", slice_info = None):
+    unit_system = registry.ds.unit_system
     # slice_info would be the left, the right, and the factor.
     # For example, with the old Enzo-ZEUS fields, this would be:
     # slice(None, -2, None)
@@ -46,14 +47,14 @@
 
     registry.add_field((ftype, "matter_density"),
                        function=_matter_density,
-                       units="g/cm**3")
+                       units=unit_system["density"])
 
     def _matter_mass(field, data):
         return data[ftype, "matter_density"] * data["index", "cell_volume"]
 
     registry.add_field((ftype, "matter_mass"),
                        function=_matter_mass,
-                       units="g")
+                       units=unit_system["mass"])
 
     # rho_total / rho_cr(z).
     def _overdensity(field, data):
@@ -138,6 +139,6 @@
 
     registry.add_field((ftype, "weak_lensing_convergence"),
                        function=_weak_lensing_convergence,
-                       units="1/cm",
+                       units=unit_system["length"]**-1,
         validators=[ValidateParameter("observer_redshift"),
                     ValidateParameter("source_redshift")])


https://bitbucket.org/yt_analysis/yt/commits/4c9389161697/
Changeset:   4c9389161697
Branch:      yt
User:        jzuhone
Date:        2015-12-02 22:47:16+00:00
Summary:     Adding vector operations to angular momentum and fluid vector fields
Affected #:  2 files

diff -r bbde5edae67e6608256f3fdaaa2e9c999a0b8f57 -r 4c93891616970add5247028d00aa262db7f7c5cd yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -37,6 +37,7 @@
 
 @register_field_plugin
 def setup_angular_momentum(registry, ftype = "gas", slice_info = None):
+    unit_system = registry.ds.unit_system
     def _specific_angular_momentum_x(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         rv = obtain_rvec(data)
@@ -60,26 +61,26 @@
 
     registry.add_field((ftype, "specific_angular_momentum_x"),
                         function=_specific_angular_momentum_x,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
     registry.add_field((ftype, "specific_angular_momentum_y"),
                         function=_specific_angular_momentum_y,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
     registry.add_field((ftype, "specific_angular_momentum_z"),
                         function=_specific_angular_momentum_z,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
 
     create_magnitude_field(registry, "specific_angular_momentum",
-                           "cm**2 / s", ftype=ftype)
+                           unit_system["specific_angular_momentum"], ftype=ftype)
 
     def _angular_momentum_x(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_x"]
     registry.add_field((ftype, "angular_momentum_x"),
                        function=_angular_momentum_x,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     def _angular_momentum_y(field, data):
@@ -87,7 +88,7 @@
              * data[ftype, "specific_angular_momentum_y"]
     registry.add_field((ftype, "angular_momentum_y"),
                        function=_angular_momentum_y,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     def _angular_momentum_z(field, data):
@@ -95,8 +96,8 @@
              * data[ftype, "specific_angular_momentum_z"]
     registry.add_field((ftype, "angular_momentum_z"),
                        function=_angular_momentum_z,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     create_magnitude_field(registry, "angular_momentum",
-                           "g * cm**2 / s", ftype=ftype)
+                           unit_system["angular_momentum"], ftype=ftype)

diff -r bbde5edae67e6608256f3fdaaa2e9c999a0b8f57 -r 4c93891616970add5247028d00aa262db7f7c5cd yt/fields/fluid_vector_fields.py
--- a/yt/fields/fluid_vector_fields.py
+++ b/yt/fields/fluid_vector_fields.py
@@ -30,6 +30,7 @@
 
 @register_field_plugin
 def setup_fluid_vector_fields(registry, ftype = "gas", slice_info = None):
+    unit_system = registry.ds.unit_system
     # slice_info would be the left, the right, and the factor.
     # For example, with the old Enzo-ZEUS fields, this would be:
     # slice(None, -2, None)
@@ -68,9 +69,10 @@
         n = "baroclinic_vorticity_%s" % ax
         registry.add_field((ftype, n), function=eval("_%s" % n),
                            validators=bv_validators,
-                           units="s**(-2)")
+                           units=unit_system["frequency"]**2)
 
-    create_magnitude_field(registry, "baroclinic_vorticity", "s**(-2)",
+    create_magnitude_field(registry, "baroclinic_vorticity", 
+                           unit_system["frequency"]**2,
                            ftype=ftype, slice_info=slice_info,
                            validators=bv_validators)
 
@@ -119,12 +121,12 @@
         n = "vorticity_%s" % ax
         registry.add_field((ftype, n),
                            function=eval("_%s" % n),
-                           units="1/s",
+                           units=unit_system["frequency"],
                            validators=vort_validators)
-    create_magnitude_field(registry, "vorticity", "1/s",
+    create_magnitude_field(registry, "vorticity", unit_system["frequency"],
                            ftype=ftype, slice_info=slice_info,
                            validators=vort_validators)
-    create_squared_field(registry, "vorticity", "1/s**2",
+    create_squared_field(registry, "vorticity", unit_system["frequency"]**2,
                          ftype=ftype, slice_info=slice_info,
                          validators=vort_validators)
 
@@ -138,10 +140,11 @@
         n = "vorticity_stretching_%s" % ax
         registry.add_field((ftype, n), 
                            function=eval("_%s" % n),
-                           units = "s**(-2)",
+                           units = unit_system["frequency"]**2,
                            validators=vort_validators)
 
-    create_magnitude_field(registry, "vorticity_stretching", "s**(-2)",
+    create_magnitude_field(registry, "vorticity_stretching", 
+                           unit_system["frequency"]**2,
                            ftype=ftype, slice_info=slice_info,
                            validators=vort_validators)
 
@@ -158,7 +161,7 @@
         n = "vorticity_growth_%s" % ax
         registry.add_field((ftype, n),
                            function=eval("_%s" % n),
-                           units="s**(-2)",
+                           units=unit_system["frequency"]**2,
                            validators=vort_validators)
 
     def _vorticity_growth_magnitude(field, data):
@@ -174,7 +177,7 @@
     
     registry.add_field((ftype, "vorticity_growth_magnitude"),
               function=_vorticity_growth_magnitude,
-              units="s**(-2)",
+              units=unit_system["frequency"]**2,
               validators=vort_validators,
               take_log=False)
 
@@ -185,7 +188,7 @@
     
     registry.add_field((ftype, "vorticity_growth_magnitude_absolute"),
                        function=_vorticity_growth_magnitude_absolute,
-                       units="s**(-2)",
+                       units=unit_system["frequency"]**2,
                        validators=vort_validators)
 
     def _vorticity_growth_timescale(field, data):
@@ -196,7 +199,7 @@
     
     registry.add_field((ftype, "vorticity_growth_timescale"),
                        function=_vorticity_growth_timescale,
-                       units="s",
+                       units=unit_system["time"],
                        validators=vort_validators)
 
     ########################################################################
@@ -231,10 +234,11 @@
         n = "vorticity_radiation_pressure_%s" % ax
         registry.add_field((ftype, n),
                            function=eval("_%s" % n),
-                           units="s**(-2)",
+                           units=unit_system["frequency"]**2,
                            validators=vrp_validators)
 
-    create_magnitude_field(registry, "vorticity_radiation_pressure", "s**(-2)",
+    create_magnitude_field(registry, "vorticity_radiation_pressure",
+                           unit_system["frequency"]**2,
                            ftype=ftype, slice_info=slice_info,
                            validators=vrp_validators)
 
@@ -255,7 +259,7 @@
         n = "vorticity_radiation_pressure_growth_%s" % ax
         registry.add_field((ftype, n),
                            function=eval("_%s" % n),
-                           units="s**(-2)",
+                           units=unit_system["frequency"]**2,
                            validators=vrp_validators)
 
     def _vorticity_radiation_pressure_growth_magnitude(field, data):
@@ -271,7 +275,7 @@
     
     registry.add_field((ftype, "vorticity_radiation_pressure_growth_magnitude"),
                        function=_vorticity_radiation_pressure_growth_magnitude,
-                       units="s**(-2)",
+                       units=unit_system["frequency"]**2,
                        validators=vrp_validators,
                        take_log=False)
 
@@ -296,7 +300,7 @@
     
     registry.add_field((ftype, "vorticity_radiation_pressure_growth_timescale"),
                        function=_vorticity_radiation_pressure_growth_timescale,
-                       units="s",
+                       units=unit_system["time"],
                        validators=vrp_validators)
 
     def _shear(field, data):
@@ -346,11 +350,11 @@
                         [(ftype, "velocity_x"),
                          (ftype, "velocity_y"),
                          (ftype, "velocity_z")])],
-                        units=r"1/s")
+                        units=unit_system["frequency"])
 
     def _shear_criterion(field, data):
         """
-        Divide by c_s to leave shear in units of cm**-1, which 
+        Divide by c_s to leave shear in units of length**-1, which 
         can be compared against the inverse of the local cell size (1/dx) 
         to determine if refinement should occur.
         """
@@ -359,7 +363,7 @@
 
     registry.add_field((ftype, "shear_criterion"),
                        function=_shear_criterion,
-                       units="1/cm",
+                       units=unit_system["length"]**-1,
                        validators=[ValidateSpatial(1,
                         [(ftype, "sound_speed"),
                          (ftype, "velocity_x"),


https://bitbucket.org/yt_analysis/yt/commits/056ccf5d803a/
Changeset:   056ccf5d803a
Branch:      yt
User:        jzuhone
Date:        2015-12-02 22:47:31+00:00
Summary:     adding unit systems to magnetic fields
Affected #:  1 file

diff -r 4c93891616970add5247028d00aa262db7f7c5cd -r 056ccf5d803ac1a217b497bc4062e62988aeccee yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -27,23 +27,24 @@
 
 @register_field_plugin
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):
+    from yt.utilities.physical_constants import mu_0
+    unit_system = registry.ds.unit_system
+    if str(unit_system) == "mks":
+        mag_units = unit_system["magnetic_field_mks"]
+        mag_fac = 1.0/mu_0
+    else:
+        mag_fac = 1.0/(4.0*np.pi)
+        mag_units = unit_system["magnetic_field_cgs"]
+
     def _magnetic_energy(field,data):
-        """This assumes that your front end has provided Bx, By, Bz in
-        units of Gauss. If you use MKS, make sure to write your own
-        magnetic_energy field to deal with non-unitary \mu_0.
-        """
-        return (data[ftype,"magnetic_field_x"]**2 +
-                data[ftype,"magnetic_field_y"]**2 +
-                data[ftype,"magnetic_field_z"]**2)/(8*np.pi)
+        return 0.5*mag_fac*(data[ftype,"magnetic_field_x"]**2 +
+                            data[ftype,"magnetic_field_y"]**2 +
+                            data[ftype,"magnetic_field_z"]**2)
     registry.add_field((ftype, "magnetic_energy"),
              function=_magnetic_energy,
-             units="erg / cm**3")
+             units=unit_system["pressure"])
 
     def _plasma_beta(field,data):
-        """This assumes that your front end has provided Bx, By, Bz in
-        units of Gauss. If you use MKS, make sure to write your own
-        plasma_beta field to deal with non-unitary \mu_0.
-        """
         return data[ftype,'pressure']/data[ftype,'magnetic_energy']
     registry.add_field((ftype, "plasma_beta"),
              function=_plasma_beta,
@@ -53,17 +54,13 @@
         return data[ftype,'magnetic_energy']
     registry.add_field((ftype, "magnetic_pressure"),
              function=_magnetic_pressure,
-             units="erg / cm**3")
+             units=unit_system["pressure"])
 
     def _magnetic_field_strength(field,data):
-        """This assumes that your front end has provided Bx, By, Bz in
-        units of Gauss. If you use MKS, make sure to write your own
-        PlasmaBeta field to deal with non-unitary \mu_0.
-        """
-        return np.sqrt(8.*np.pi*data[ftype,"magnetic_energy"])
+        return np.sqrt(2.*mag_fac*data[ftype,"magnetic_energy"])
     registry.add_field((ftype,"magnetic_field_strength"),
                        function=_magnetic_field_strength,
-                       units = "gauss")
+                       units = mag_units)
 
     def _magnetic_field_poloidal(field,data):
         normal = data.get_field_parameter("normal")
@@ -81,7 +78,7 @@
 
     registry.add_field((ftype, "magnetic_field_poloidal"),
              function=_magnetic_field_poloidal,
-             units="gauss",
+             units=mag_units,
              validators=[ValidateParameter("normal")])
 
     def _magnetic_field_toroidal(field,data):
@@ -98,17 +95,13 @@
 
     registry.add_field((ftype, "magnetic_field_toroidal"),
              function=_magnetic_field_toroidal,
-             units="gauss",
+             units=mag_units,
              validators=[ValidateParameter("normal")])
 
     def _alfven_speed(field,data):
-        """This assumes that your front end has provided Bx, By, Bz in
-        units of Gauss. If you use MKS, make sure to write your own
-        alfven_speed field to deal with non-unitary \mu_0.
-        """
-        return data[ftype,'magnetic_field_strength']/np.sqrt(4.*np.pi*data[ftype,'density'])
+        return data[ftype,'magnetic_field_strength']/np.sqrt(mag_fac*data[ftype,'density'])
     registry.add_field((ftype, "alfven_speed"), function=_alfven_speed,
-                       units="cm/s")
+                       units=mag_units)
 
     def _mach_alfven(field,data):
         return data[ftype,'velocity_magnitude']/data[ftype,'alfven_speed']


https://bitbucket.org/yt_analysis/yt/commits/744b97b074ed/
Changeset:   744b97b074ed
Branch:      yt
User:        jzuhone
Date:        2015-12-02 22:47:53+00:00
Summary:     adding unit systems to fluid and geometric fields
Affected #:  2 files

diff -r 056ccf5d803ac1a217b497bc4062e62988aeccee -r 744b97b074ed1019d391c077bd1f4fd1918b64ba yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -49,21 +49,23 @@
     else:
         sl_left, sl_right, div_fac = slice_info
 
-    create_vector_fields(registry, "velocity", "cm / s", ftype, slice_info)
+    unit_system = registry.ds.unit_system
+
+    create_vector_fields(registry, "velocity", unit_system["velocity"], ftype, slice_info)
 
     def _cell_mass(field, data):
         return data[ftype, "density"] * data[ftype, "cell_volume"]
 
     registry.add_field((ftype, "cell_mass"),
         function=_cell_mass,
-        units="g")
+        units=unit_system["mass"])
 
     def _sound_speed(field, data):
         tr = data.ds.gamma * data[ftype, "pressure"] / data[ftype, "density"]
         return np.sqrt(tr)
     registry.add_field((ftype, "sound_speed"),
              function=_sound_speed,
-             units="cm/s")
+             units=unit_system["velocity"])
 
     def _radial_mach_number(field, data):
         """ Radial component of M{|v|/c_sound} """
@@ -79,7 +81,7 @@
                                               + data[ftype, "velocity_z"]**2.0 )
     registry.add_field((ftype, "kinetic_energy"),
                        function = _kin_energy,
-                       units = "erg / cm**3")
+                       units = unit_system["pressure"])
 
     def _mach_number(field, data):
         """ M{|v|/c_sound} """
@@ -100,7 +102,7 @@
 
     registry.add_field((ftype, "courant_time_step"),
              function=_courant_time_step,
-             units="s")
+             units=unit_system["time"])
 
     def _pressure(field, data):
         """ M{(Gamma-1.0)*rho*E} """
@@ -110,7 +112,7 @@
 
     registry.add_field((ftype, "pressure"),
              function=_pressure,
-             units="dyne/cm**2")
+             units=unit_system["pressure"])
 
     def _kT(field, data):
         return (kboltz*data[ftype, "temperature"]).in_units("keV")
@@ -143,7 +145,7 @@
         return data[ftype, "metal_density"] * data[ftype, "cell_volume"]
     registry.add_field((ftype, "metal_mass"),
                        function=_metal_mass,
-                       units="g")
+                       units=unit_system["mass"])
 
     def _number_density(field, data):
         field_data = np.zeros_like(data["gas", "%s_number_density" % \
@@ -153,7 +155,7 @@
         return field_data
     registry.add_field((ftype, "number_density"),
                        function = _number_density,
-                       units="cm**-3")
+                       units=unit_system["number_density"])
     
     def _mean_molecular_weight(field, data):
         return (data[ftype, "density"] / (mh * data[ftype, "number_density"]))
@@ -161,17 +163,18 @@
               function=_mean_molecular_weight,
               units="")
 
-    setup_gradient_fields(registry, (ftype, "pressure"), "dyne/cm**2",
+    setup_gradient_fields(registry, (ftype, "pressure"), unit_system["pressure"],
                           slice_info)
 
-    setup_gradient_fields(registry, (ftype, "density"), "g / cm**3",
+    setup_gradient_fields(registry, (ftype, "density"), unit_system["density"],
                           slice_info)
 
-    create_averaged_field(registry, "density", "g/cm**3",
+    create_averaged_field(registry, "density", unit_system["density"],
                           ftype=ftype, slice_info=slice_info,
                           weight="cell_mass")
 
 def setup_gradient_fields(registry, grad_field, field_units, slice_info = None):
+    from yt.units.unit_object import Unit
     assert(isinstance(grad_field, tuple))
     ftype, fname = grad_field
     if slice_info is None:
@@ -197,10 +200,8 @@
             return new_field
         return func
 
-    if field_units != "":
-        grad_units = "(%s) / cm" % field_units
-    else:
-        grad_units = "1 / cm"
+    field_units = Unit(field_units, registry=registry.ds.unit_registry)
+    grad_units = field_units / registry.ds.unit_system["length"]
 
     for axi, ax in enumerate('xyz'):
         f = grad_func(axi, ax)

diff -r 056ccf5d803ac1a217b497bc4062e62988aeccee -r 744b97b074ed1019d391c077bd1f4fd1918b64ba yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -35,7 +35,7 @@
 
 @register_field_plugin
 def setup_geometric_fields(registry, ftype="gas", slice_info=None):
-
+    unit_system = registry.ds.unit_system
     def _radius(field, data):
         """The spherical radius component of the mesh cells.
 
@@ -47,7 +47,7 @@
     registry.add_field(("index", "radius"),
                        function=_radius,
                        validators=[ValidateParameter("center")],
-                       units="cm")
+                       units=unit_system["length"])
 
     def _grid_level(field, data):
         """The AMR refinement level"""
@@ -75,7 +75,7 @@
 
     registry.add_field(("index", "ones_over_dx"),
                        function=_ones_over_dx,
-                       units="1 / cm",
+                       units=unit_system["length"]**-1,
                        display_field=False)
 
     def _zeros(field, data):
@@ -107,12 +107,12 @@
         parameter.
         """
         coords = get_periodic_rvec(data)
-        return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
+        return data.ds.arr(get_sph_r(coords), "code_length").in_units(unit_system["length"])
 
     registry.add_field(("index", "spherical_radius"),
                        function=_spherical_radius,
                        validators=[ValidateParameter("center")],
-                       units="cm")
+                       units=unit_system["length"])
 
     def _spherical_r(field, data):
         """This field is deprecated and will be removed in a future release"""
@@ -121,7 +121,7 @@
     registry.add_field(("index", "spherical_r"),
                        function=_spherical_r,
                        validators=[ValidateParameter("center")],
-                       units="cm")
+                       units=unit_system["length"])
 
     def _spherical_theta(field, data):
         """The spherical theta component of the positions of the mesh cells.
@@ -169,13 +169,13 @@
         """
         normal = data.get_field_parameter("normal")
         coords = get_periodic_rvec(data)
-        return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
+        return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_units(unit_system["length"])
 
     registry.add_field(("index", "cylindrical_radius"),
                        function=_cylindrical_radius,
                        validators=[ValidateParameter("center"),
                                    ValidateParameter("normal")],
-                       units="cm")
+                       units=unit_system["length"])
 
     def _cylindrical_r(field, data):
         """This field is deprecated and will be removed in a future release"""
@@ -184,7 +184,7 @@
     registry.add_field(("index", "cylindrical_r"),
                        function=_cylindrical_r,
                        validators=[ValidateParameter("center")],
-                       units="cm")
+                       units=unit_system["length"])
 
     def _cylindrical_z(field, data):
         """The cylindrical z component of the positions of the mesh cells.
@@ -194,13 +194,13 @@
         """
         normal = data.get_field_parameter("normal")
         coords = get_periodic_rvec(data)
-        return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
+        return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_units(unit_system["length"])
 
     registry.add_field(("index", "cylindrical_z"),
                        function=_cylindrical_z,
                        validators=[ValidateParameter("center"),
                                    ValidateParameter("normal")],
-                       units="cm")
+                       units=unit_system["length"])
 
     def _cylindrical_theta(field, data):
         """The cylindrical z component of the positions of the mesh cells.
@@ -241,5 +241,5 @@
                        function=_height,
                        validators=[ValidateParameter("center"),
                                    ValidateParameter("normal")],
-                       units="cm",
+                       units=unit_system["length"],
                        display_field=False)


https://bitbucket.org/yt_analysis/yt/commits/842ef150214f/
Changeset:   842ef150214f
Branch:      yt
User:        jzuhone
Date:        2015-12-02 22:49:18+00:00
Summary:     adding unit systems to particle and species fields
Affected #:  2 files

diff -r 744b97b074ed1019d391c077bd1f4fd1918b64ba -r 842ef150214fb70cf69c7197ad30c1c0c0362b3e yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -93,6 +93,7 @@
     return _AllFields
 
 def particle_deposition_functions(ptype, coord_name, mass_name, registry):
+    unit_system = registry.ds.unit_system
     orig = set(registry.keys())
     ptype_dn = ptype.replace("_"," ").title()
     def particle_count(field, data):
@@ -118,7 +119,7 @@
              function = particle_mass,
              validators = [ValidateSpatial()],
              display_name = r"\mathrm{%s Mass}" % ptype_dn,
-             units = "g")
+             units = unit_system["mass"])
 
     def particle_density(field, data):
         pos = data[ptype, coord_name].convert_to_units("code_length")
@@ -132,7 +133,7 @@
              function = particle_density,
              validators = [ValidateSpatial()],
              display_name = r"\mathrm{%s Density}" % ptype_dn,
-             units = "g/cm**3")
+             units = unit_system["density"])
 
     def particle_cic(field, data):
         pos = data[ptype, coord_name]
@@ -145,7 +146,7 @@
              function = particle_cic,
              validators = [ValidateSpatial()],
              display_name = r"\mathrm{%s CIC Density}" % ptype_dn,
-             units = "g/cm**3")
+             units = unit_system["density"])
 
     def _get_density_weighted_deposit_field(fname, units, method):
         def _deposit_field(field, data):
@@ -171,7 +172,7 @@
                 "particle_velocity_%s" % ax, "cm/s", method)
             registry.add_field(
                 ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)),
-                function=function, units="cm/s", take_log=False,
+                function=function, units=unit_system["velocity"], take_log=False,
                 validators=[ValidateSpatial(0)])
 
     # Now some translation functions.
@@ -227,6 +228,8 @@
 
 def particle_vector_functions(ptype, coord_names, vel_names, registry):
 
+    unit_system = registry.ds.unit_system
+    
     # This will column_stack a set of scalars to create vector fields.
 
     def _get_vec_func(_ptype, names):
@@ -242,7 +245,7 @@
                        particle_type=True)
     registry.add_field((ptype, "particle_velocity"),
                        function=_get_vec_func(ptype, vel_names),
-                       units = "cm / s",
+                       units = unit_system["velocity"],
                        particle_type=True)
 
 def get_angular_momentum_components(ptype, data, spos, svel):
@@ -258,6 +261,7 @@
 def standard_particle_fields(registry, ptype,
                              spos = "particle_position_%s",
                              svel = "particle_velocity_%s"):
+    unit_system = registry.ds.unit_system
     # This function will set things up based on the scalar fields and standard
     # yt field names.
     # data.get_field_parameter("bulk_velocity") defaults to YTArray([0,0,0] cm/s)
@@ -273,7 +277,7 @@
                   function=_particle_velocity_magnitude,
                   particle_type=True,
                   take_log=False,
-                  units="cm/s")
+                  units=unit_system["velocity"])
 
     def _particle_specific_angular_momentum(field, data):
         """
@@ -291,7 +295,7 @@
     registry.add_field((ptype, "particle_specific_angular_momentum"),
               function=_particle_specific_angular_momentum,
               particle_type=True,
-              units="cm**2/s",
+              units=unit_system["specific_angular_momentum"],
               validators=[ValidateParameter("center")])
 
     def _get_spec_ang_mom_comp(axi, ax, _ptype):
@@ -306,11 +310,11 @@
         f, v = _get_spec_ang_mom_comp(axi, ax, ptype)
         registry.add_field(
             (ptype, "particle_specific_angular_momentum_%s" % ax),
-            particle_type = True, function=f, units="cm**2/s",
+            particle_type = True, function=f, units=unit_system["specific_angular_momentum"],
             validators=[ValidateParameter("center")]
         )
         registry.add_field((ptype, "particle_angular_momentum_%s" % ax),
-            function=v, units="g*cm**2/s", particle_type=True,
+            function=v, units=unit_system["angular_momentum"], particle_type=True,
             validators=[ValidateParameter('center')])
 
     def _particle_angular_momentum(field, data):
@@ -320,11 +324,12 @@
     registry.add_field((ptype, "particle_angular_momentum"),
               function=_particle_angular_momentum,
               particle_type=True,
-              units="g*cm**2/s",
+              units=unit_system["angular_momentum"],
               validators=[ValidateParameter("center")])
 
     create_magnitude_field(registry, "particle_angular_momentum",
-                           "g*cm**2/s", ftype=ptype, particle_type=True)
+                           unit_system["angular_momentum"], 
+                           ftype=ptype, particle_type=True)
 
     def _particle_radius(field, data):
         """The spherical radius component of the particle positions
@@ -337,7 +342,7 @@
     registry.add_field(
         (ptype, "particle_radius"),
         function=_particle_radius,
-        units="cm",
+        units=unit_system["length"],
         particle_type=True,
         validators=[ValidateParameter("center")])
 
@@ -359,7 +364,7 @@
         (ptype, "particle_position_relative"),
         function=_particle_position_relative,
         particle_type=True,
-        units="cm",
+        units=unit_system["length"],
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
     def _particle_velocity_relative(field, data):
@@ -379,7 +384,7 @@
 
     registry.add_field((ptype, "particle_velocity_relative"),
               function=_particle_velocity_relative,
-              particle_type=True, units="cm/s",
+              particle_type=True, units=unit_system["velocity"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -404,7 +409,7 @@
     # consistent naming
     registry.add_field((ptype, "particle_position_spherical_radius"),
               function=_particle_radius,
-              particle_type=True, units="cm",
+              particle_type=True, units=unit_system["length"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -414,7 +419,7 @@
 
     registry.add_field((ptype, "particle_spherical_position_radius"),
               function=_particle_spherical_position_radius,
-              particle_type=True, units="cm",
+              particle_type=True, units=unit_system["length"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -497,7 +502,7 @@
 
     registry.add_field((ptype, "particle_velocity_spherical_radius"),
               function=_particle_velocity_spherical_radius,
-              particle_type=True, units="cm/s",
+              particle_type=True, units=unit_system["velocity"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -507,7 +512,7 @@
 
     registry.add_field((ptype, "particle_spherical_velocity_radius"),
               function=_particle_spherical_velocity_radius,
-              particle_type=True, units="cm/s",
+              particle_type=True, units=unit_system["velocity"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -515,7 +520,7 @@
     # "particle_radial_velocity" for convenience
     registry.add_field((ptype, "particle_radial_velocity"),
               function=_particle_spherical_velocity_radius,
-              particle_type=True, units="cm/s",
+              particle_type=True, units=unit_system["velocity"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -542,7 +547,7 @@
         (ptype, "particle_velocity_spherical_theta"),
         function=_particle_velocity_spherical_theta,
         particle_type=True,
-        units="cm/s",
+        units=unit_system["velocity"],
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
     def _particle_spherical_velocity_theta(field, data):
@@ -551,7 +556,7 @@
 
     registry.add_field((ptype, "particle_spherical_velocity_theta"),
               function=_particle_spherical_velocity_theta,
-              particle_type=True, units="cm/s",
+              particle_type=True, units=unit_system["velocity"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -576,7 +581,7 @@
         (ptype, "particle_velocity_spherical_phi"),
         function=_particle_velocity_spherical_phi,
         particle_type=True,
-        units="cm/s",
+        units=unit_system["velocity"],
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
     def _particle_spherical_velocity_phi(field, data):
@@ -585,7 +590,7 @@
 
     registry.add_field((ptype, "particle_spherical_velocity_phi"),
               function=_particle_spherical_velocity_phi,
-              particle_type=True, units="cm/s",
+              particle_type=True, units=unit_system["velocity"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -605,7 +610,7 @@
     registry.add_field(
         (ptype, "particle_position_cylindrical_radius"),
         function=_particle_position_cylindrical_radius,
-        units="cm",
+        units=unit_system["length"],
         particle_type=True,
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
@@ -644,7 +649,7 @@
     registry.add_field(
         (ptype, "particle_position_cylindrical_z"),
         function=_particle_position_cylindrical_z,
-        units="cm",
+        units=unit_system["length"],
         particle_type=True,
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
@@ -669,7 +674,7 @@
         (ptype, "particle_velocity_cylindrical_radius"),
         function=_particle_velocity_cylindrical_radius,
         particle_type=True,
-        units="cm/s",
+        units=unit_system["velocity"],
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
     def _particle_velocity_cylindrical_theta(field, data):
@@ -693,7 +698,7 @@
         (ptype, "particle_velocity_cylindrical_theta"),
         function=_particle_velocity_cylindrical_theta,
         particle_type=True,
-        units="cm/s",
+        units=unit_system["velocity"],
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
     def _particle_cylindrical_velocity_theta(field, data):
@@ -726,7 +731,7 @@
         (ptype, "particle_velocity_cylindrical_z"),
         function=_particle_velocity_cylindrical_z,
         particle_type=True,
-        units="cm/s",
+        units=unit_system["velocity"],
         validators=[ValidateParameter("normal"), ValidateParameter("center")])
 
     def _particle_cylindrical_velocity_z(field, data):
@@ -735,7 +740,7 @@
 
     registry.add_field((ptype, "particle_cylindrical_velocity_z"),
               function=_particle_cylindrical_velocity_z,
-              particle_type=True, units="cm/s",
+              particle_type=True, units=unit_system["velocity"],
               validators=[ValidateParameter("normal"),
                           ValidateParameter("center")])
 
@@ -765,6 +770,7 @@
 def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
         smoothing_length_name, density_name, smoothed_field, registry,
         nneighbors = None, kernel_name = 'cubic'):
+    unit_system = registry.ds.unit_system
     if kernel_name == 'cubic':
         field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
     else:
@@ -774,8 +780,8 @@
     def _vol_weight(field, data):
         pos = data[ptype, coord_name]
         pos = pos.convert_to_units("code_length")
-        mass = data[ptype, mass_name].in_cgs()
-        dens = data[ptype, density_name].in_cgs()
+        mass = data[ptype, mass_name].in_units(unit_system["mass"])
+        dens = data[ptype, density_name].in_units(unit_system["density"])
         quan = data[ptype, smoothed_field]
         quan = quan.convert_to_units(field_units)
 
@@ -800,7 +806,7 @@
         rv[np.isnan(rv)] = 0.0
         # Now some quick unit conversions.
         # This should be used when seeking a non-normalized value:
-        rv /= hsml.uq**3 / hsml.uq.in_cgs().uq**3
+        rv /= hsml.uq**3 / hsml.uq.in_units(unit_system["length"]).uq**3
         rv = data.apply_units(rv, field_units)
         return rv
     registry.add_field(field_name, function = _vol_weight,

diff -r 744b97b074ed1019d391c077bd1f4fd1918b64ba -r 842ef150214fb70cf69c7197ad30c1c0c0362b3e yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -73,6 +73,8 @@
     adds the other fluids based on that.  This assumes that the field
     "SPECIES_density" already exists and refers to mass density.
     """
+    unit_system = registry.ds.unit_system
+
     registry.add_field((ftype, "%s_fraction" % species), 
                        function = _create_fraction_func(ftype, species),
                        particle_type = particle_type,
@@ -81,12 +83,12 @@
     registry.add_field((ftype, "%s_mass" % species),
                        function = _create_mass_func(ftype, species),
                        particle_type = particle_type,
-                       units = "g")
+                       units = unit_system["mass"])
 
     registry.add_field((ftype, "%s_number_density" % species),
                        function = _create_number_density_func(ftype, species),
                        particle_type = particle_type,
-                       units = "cm**-3")
+                       units = unit_system["number_density"])
 
 def add_species_field_by_fraction(registry, ftype, species, 
                                   particle_type = False):
@@ -95,35 +97,38 @@
     adds the other fluids based on that.  This assumes that the field
     "SPECIES_fraction" already exists and refers to mass fraction.
     """
+    unit_system = registry.ds.unit_system
+
     registry.add_field((ftype, "%s_density" % species), 
                        function = _create_density_func(ftype, species),
                        particle_type = particle_type,
-                       units = "g/cm**3")
+                       units = unit_system["density"])
 
     registry.add_field((ftype, "%s_mass" % species),
                        function = _create_mass_func(ftype, species),
                        particle_type = particle_type,
-                       units = "g")
+                       units = unit_system["mass"])
 
     registry.add_field((ftype, "%s_number_density" % species),
                        function = _create_number_density_func(ftype, species),
                        particle_type = particle_type,
-                       units = "cm**-3")
+                       units = unit_system["number_density"])
 
 def add_nuclei_density_fields(registry, ftype,
                               particle_type = False):
+    unit_system = registry.ds.unit_system
     elements = _get_all_elements(registry.species_names)
     for element in elements:
         registry.add_field((ftype, "%s_nuclei_density" % element),
                            function = _nuclei_density,
                            particle_type = particle_type,
-                           units = "cm**-3")
+                           units = unit_system["number_density"])
     if len(elements) == 0:
         for element in ["H", "He"]:
             registry.add_field((ftype, "%s_nuclei_density" % element),
                                function = _default_nuclei_density,
                                particle_type = particle_type,
-                               units = "cm**-3")
+                               units = unit_system["number_density"])
 
 def _default_nuclei_density(field, data):
     element = field.name[1][:field.name[1].find("_")]


https://bitbucket.org/yt_analysis/yt/commits/907715657ef3/
Changeset:   907715657ef3
Branch:      yt
User:        jzuhone
Date:        2015-12-02 22:55:25+00:00
Summary:     Adding unit_system to the art, artio, and athena frontends.
Affected #:  3 files

diff -r 842ef150214fb70cf69c7197ad30c1c0c0362b3e -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -162,7 +162,7 @@
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
                  file_particle_data=None, file_particle_stars=None,
-                 units_override=None):
+                 units_override=None, unit_system="cgs"):
         self.fluid_types += ("art", )
         if fields is None:
             fields = fluid_fields
@@ -181,7 +181,8 @@
         self.force_max_level = force_max_level
         self.spread_age = spread_age
         Dataset.__init__(self, filename, dataset_type,
-                         units_override=units_override)
+                         units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
 
     def _find_files(self, file_amr):
@@ -419,7 +420,8 @@
                           skip_particles=False, skip_stars=False,
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
-                 file_particle_stars=None):
+                 file_particle_stars=None, units_override=None,
+                 unit_system="cgs"):
         self.over_refine_factor = 1
         self.n_ref = 64
         self.particle_types += ("all",)
@@ -433,7 +435,9 @@
         self.parameter_filename = filename
         self.skip_stars = skip_stars
         self.spread_age = spread_age
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
 
     def _find_files(self, file_particle):

diff -r 842ef150214fb70cf69c7197ad30c1c0c0362b3e -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -318,7 +318,7 @@
 
     def __init__(self, filename, dataset_type='artio',
                  storage_filename=None, max_range = 1024,
-                 units_override=None):
+                 units_override=None, unit_system="cgs"):
         from sys import version
         if self._handle is not None:
             return
@@ -333,7 +333,8 @@
         self.artio_parameters = self._handle.parameters
         # Here we want to initiate a traceback, if the reader is not built.
         Dataset.__init__(self, filename, dataset_type,
-                         units_override=units_override)
+                         units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):

diff -r 842ef150214fb70cf69c7197ad30c1c0c0362b3e -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -447,7 +447,7 @@
 
     def __init__(self, filename, dataset_type='athena',
                  storage_filename=None, parameters=None,
-                 units_override=None, nprocs=1):
+                 units_override=None, nprocs=1, unit_system="cgs"):
         self.fluid_types += ("athena",)
         self.nprocs = nprocs
         if parameters is None:
@@ -464,7 +464,8 @@
                                   "and will be removed in a future release. Use units_override instead.")
                     already_warned = True
                 units_override[k] = self.specified_parameters.pop(k)
-        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
         self.filename = filename
         if storage_filename is None:
             storage_filename = '%s.yt' % filename.split('/')[-1]


https://bitbucket.org/yt_analysis/yt/commits/d995e5b56e7c/
Changeset:   d995e5b56e7c
Branch:      yt
User:        jzuhone
Date:        2015-12-02 23:06:17+00:00
Summary:     Adding unit system to boxlib, combo, gadget, gadget_fof, gdf, halo_catalog, http_stream, and tipsy frontends.
Affected #:  8 files

diff -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -369,7 +369,8 @@
                  fparam_filename="probin",
                  dataset_type='boxlib_native',
                  storage_filename=None,
-                 units_override=None):
+                 units_override=None,
+                 unit_system="cgs"):
         """
         The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -384,7 +385,8 @@
         self.storage_filename = storage_filename
 
         Dataset.__init__(self, output_dir, dataset_type,
-                         units_override=units_override)
+                         units_override=units_override,
+                         unit_system=unit_system)
 
         # These are still used in a few places.
         if "HydroMethod" not in self.parameters.keys():
@@ -726,11 +728,13 @@
                  fparam_filename="probin",
                  dataset_type='orion_native',
                  storage_filename=None,
-                 units_override=None):
+                 units_override=None,
+                 unit_system="cgs"):
 
         BoxlibDataset.__init__(self, output_dir,
                                cparam_filename, fparam_filename,
-                               dataset_type, units_override=units_override)
+                               dataset_type, units_override=units_override,
+                               unit_system=unit_system)
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):

diff -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -246,7 +246,7 @@
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None,
-                 units_override=None):
+                 units_override=None, unit_system="cgs"):
         self.fluid_types += ("chombo",)
         self._handle = HDF5FileHandler(filename)
         self.dataset_type = dataset_type
@@ -255,7 +255,8 @@
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
         Dataset.__init__(self, filename, self.dataset_type,
-                         units_override=units_override)
+                         units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
         self.cosmological_simulation = False
 
@@ -446,11 +447,12 @@
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None,
-                 units_override=None):
+                 units_override=None, unit_system="cgs"):
 
         ChomboDataset.__init__(self, filename, dataset_type, 
                                storage_filename, ini_filename,
-                               units_override=units_override)
+                               units_override=units_override,
+                               unit_system=unit_system)
 
     def _parse_parameter_file(self):
         """

diff -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -78,7 +78,8 @@
                  header_spec = "default",
                  field_spec = "default",
                  ptype_spec = "default",
-                 units_override=None):
+                 units_override=None,
+                 unit_system="cgs"):
         if self._instantiated: return
         self._header_spec = self._setup_binary_spec(
             header_spec, gadget_header_specs)
@@ -105,7 +106,7 @@
         if units_override is not None:
             raise RuntimeError("units_override is not supported for GadgetDataset. "+
                                "Use unit_base instead.")
-        super(GadgetDataset, self).__init__(filename, dataset_type)
+        super(GadgetDataset, self).__init__(filename, dataset_type, unit_system=unit_system)
         if self.cosmological_simulation:
             self.time_unit.convert_to_units('s/h')
             self.length_unit.convert_to_units('kpccm/h')
@@ -342,7 +343,8 @@
                  unit_base = None, n_ref=64,
                  over_refine_factor=1,
                  bounding_box = None,
-                 units_override=None):
+                 units_override=None,
+                 unit_system="cgs"):
         self.storage_filename = None
         filename = os.path.abspath(filename)
         if units_override is not None:
@@ -351,7 +353,7 @@
         super(GadgetHDF5Dataset, self).__init__(
             filename, dataset_type, unit_base=unit_base, n_ref=n_ref,
             over_refine_factor=over_refine_factor,
-            bounding_box = bounding_box)
+            bounding_box = bounding_box, unit_system=unit_system)
 
     def _get_hvals(self):
         handle = h5py.File(self.parameter_filename, mode="r")

diff -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -109,7 +109,7 @@
 
     def __init__(self, filename, dataset_type="gadget_fof_hdf5",
                  n_ref=16, over_refine_factor=1,
-                 unit_base=None, units_override=None):
+                 unit_base=None, units_override=None, unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
@@ -121,7 +121,8 @@
             raise RuntimeError("units_override is not supported for GadgetFOFDataset. "+
                                "Use unit_base instead.")
         super(GadgetFOFDataset, self).__init__(filename, dataset_type,
-                                                 units_override=units_override)
+                                               units_override=units_override,
+                                               unit_system=unit_system)
 
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")

diff -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -174,10 +174,11 @@
 
     def __init__(self, filename, dataset_type='grid_data_format',
                  storage_filename=None, geometry=None,
-                 units_override=None):
+                 units_override=None, unit_system="cgs"):
         self.geometry = geometry
         self.fluid_types += ("gdf",)
-        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override, unit_system=unit_system)
         self.storage_filename = storage_filename
         self.filename = filename
 

diff -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -44,11 +44,13 @@
     _suffix = ".h5"
 
     def __init__(self, filename, dataset_type="halocatalog_hdf5",
-                 n_ref = 16, over_refine_factor = 1, units_override=None):
+                 n_ref = 16, over_refine_factor = 1, units_override=None,
+                 unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(HaloCatalogDataset, self).__init__(filename, dataset_type,
-                                                 units_override=units_override)
+                                                 units_override=units_override,
+                                                 unit_system=unit_system)
 
     def _parse_parameter_file(self):
         with h5py.File(self.parameter_filename, "r") as f:

diff -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 yt/frontends/http_stream/data_structures.py
--- a/yt/frontends/http_stream/data_structures.py
+++ b/yt/frontends/http_stream/data_structures.py
@@ -47,13 +47,15 @@
     
     def __init__(self, base_url,
                  dataset_type = "http_particle_stream",
-                 n_ref = 64, over_refine_factor=1):
+                 n_ref = 64, over_refine_factor=1, 
+                 unit_system="cgs"):
         if requests is None:
             raise RuntimeError
         self.base_url = base_url
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(HTTPStreamDataset, self).__init__("", dataset_type)
+        super(HTTPStreamDataset, self).__init__("", dataset_type, 
+                                                unit_system=unit_system)
 
     def __repr__(self):
         return self.base_url

diff -r 907715657ef3e09eceb7c66c1a4a0b70440426d0 -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -71,7 +71,8 @@
                  cosmology_parameters=None,
                  n_ref=64, over_refine_factor=1,
                  bounding_box=None,
-                 units_override=None):
+                 units_override=None,
+                 unit_system="cgs"):
         # Because Tipsy outputs don't have a fixed domain boundary, one can
         # specify a bounding box which effectively gives a domain_left_edge
         # and domain_right_edge
@@ -108,7 +109,8 @@
         if units_override is not None:
             raise RuntimeError("units_override is not supported for TipsyDataset. "+
                                "Use unit_base instead.")
-        super(TipsyDataset, self).__init__(filename, dataset_type)
+        super(TipsyDataset, self).__init__(filename, dataset_type,
+                                           unit_system=unit_system)
 
     def __repr__(self):
         return os.path.basename(self.parameter_filename)


https://bitbucket.org/yt_analysis/yt/commits/4553c6e3c381/
Changeset:   4553c6e3c381
Branch:      yt
User:        jzuhone
Date:        2015-12-02 23:11:18+00:00
Summary:     Adding unit systems to moab, owls, owls_subfind, ramses, rockstar, and sdf frontends.
Affected #:  5 files

diff -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 -r 4553c6e3c381cba187e0a46dedd31a97218256c7 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -65,10 +65,12 @@
     periodicity = (False, False, False)
 
     def __init__(self, filename, dataset_type='moab_hex8',
-                 storage_filename = None, units_override=None):
+                 storage_filename = None, units_override=None,
+                 unit_system="cgs"):
         self.fluid_types += ("moab",)
         Dataset.__init__(self, filename, dataset_type,
-                         units_override=units_override)
+                         units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
         self.filename = filename
         self._handle = HDF5FileHandler(filename)
@@ -145,12 +147,14 @@
     periodicity = (False, False, False)
 
     def __init__(self, pyne_mesh, dataset_type='moab_hex8_pyne',
-                 storage_filename = None, units_override=None):
+                 storage_filename = None, units_override=None,
+                 unit_system="cgs"):
         self.fluid_types += ("pyne",)
         filename = "pyne_mesh_" + str(id(pyne_mesh))
         self.pyne_mesh = pyne_mesh
         Dataset.__init__(self, str(filename), dataset_type,
-                         units_override=units_override)
+                         units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
         self.filename = filename
 

diff -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 -r 4553c6e3c381cba187e0a46dedd31a97218256c7 yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -105,11 +105,13 @@
     _suffix = ".hdf5"
 
     def __init__(self, filename, dataset_type="subfind_hdf5",
-                 n_ref = 16, over_refine_factor = 1, units_override=None):
+                 n_ref = 16, over_refine_factor = 1, units_override=None,
+                 unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(OWLSSubfindDataset, self).__init__(filename, dataset_type,
-                                                 units_override=units_override)
+                                                 units_override=units_override,
+                                                 unit_system=unit_system)
 
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")

diff -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 -r 4553c6e3c381cba187e0a46dedd31a97218256c7 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -520,7 +520,7 @@
     
     def __init__(self, filename, dataset_type='ramses',
                  fields = None, storage_filename = None,
-                 units_override=None):
+                 units_override=None, unit_system="cgs"):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]
@@ -530,7 +530,8 @@
         '''
         self.fluid_types += ("ramses",)
         self._fields_in_file = fields
-        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
         self.storage_filename = storage_filename
 
     def __repr__(self):

diff -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 -r 4553c6e3c381cba187e0a46dedd31a97218256c7 yt/frontends/rockstar/data_structures.py
--- a/yt/frontends/rockstar/data_structures.py
+++ b/yt/frontends/rockstar/data_structures.py
@@ -51,11 +51,12 @@
 
     def __init__(self, filename, dataset_type="rockstar_binary",
                  n_ref = 16, over_refine_factor = 1,
-                 units_override=None):
+                 units_override=None, unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(RockstarDataset, self).__init__(filename, dataset_type,
-                                              units_override=units_override)
+                                              units_override=units_override,
+                                              unit_system=unit_system)
 
     def _parse_parameter_file(self):
         with open(self.parameter_filename, "rb") as f:

diff -r d995e5b56e7cd9ced7764b1bcfb3dbf99ba87e11 -r 4553c6e3c381cba187e0a46dedd31a97218256c7 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -75,7 +75,8 @@
                  midx_header = None,
                  midx_level = None,
                  field_map = None,
-                 units_override=None):
+                 units_override=None,
+                 unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
@@ -101,7 +102,8 @@
             prefix += 'http_'
         dataset_type = prefix + 'sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type,
-                                         units_override=units_override)
+                                         units_override=units_override,
+                                         unit_system=unit_system)
 
     def _parse_parameter_file(self):
         if self.parameter_filename.startswith("http"):


https://bitbucket.org/yt_analysis/yt/commits/fe847f38620a/
Changeset:   fe847f38620a
Branch:      yt
User:        jzuhone
Date:        2015-12-02 23:22:11+00:00
Summary:     adding unit systems to the stream dataset
Affected #:  1 file

diff -r 4553c6e3c381cba187e0a46dedd31a97218256c7 -r fe847f38620aaa459d2bc37f68cf5557a0c57093 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -288,7 +288,7 @@
     _dataset_type = 'stream'
 
     def __init__(self, stream_handler, storage_filename=None,
-                 geometry="cartesian"):
+                 geometry="cartesian", unit_system="cgs"):
         #if parameter_override is None: parameter_override = {}
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}
@@ -299,7 +299,8 @@
         name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
         from yt.data_objects.static_output import _cached_datasets
         _cached_datasets[name] = self
-        Dataset.__init__(self, name, self._dataset_type)
+        Dataset.__init__(self, name, self._dataset_type, 
+                         unit_system=unit_system)
 
     def _parse_parameter_file(self):
         self.basename = self.stream_handler.name
@@ -521,7 +522,7 @@
                       nprocs=1, sim_time=0.0, mass_unit=None, time_unit=None,
                       velocity_unit=None, magnetic_unit=None,
                       periodicity=(True, True, True),
-                      geometry="cartesian"):
+                      geometry="cartesian", unit_system="cgs"):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -682,7 +683,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    sds = StreamDataset(handler, geometry = geometry)
+    sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system)
 
     check_fields = [("io", "particle_position_x"), ("io", "particle_position")]
 
@@ -709,7 +710,7 @@
                    bbox=None, sim_time=0.0, length_unit=None,
                    mass_unit=None, time_unit=None, velocity_unit=None,
                    magnetic_unit=None, periodicity=(True, True, True),
-                   geometry="cartesian", refine_by=2):
+                   geometry="cartesian", refine_by=2, unit_system="cgs"):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
     This should allow a sequence of grids of varying resolution of data to be
@@ -863,7 +864,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    sds = StreamDataset(handler, geometry=geometry)
+    sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system)
     return sds
 
 
@@ -1003,7 +1004,8 @@
                    sim_time=0.0, mass_unit = None, time_unit = None,
                    velocity_unit=None, magnetic_unit=None,
                    periodicity=(True, True, True),
-                   n_ref = 64, over_refine_factor = 1, geometry = "cartesian"):
+                   n_ref = 64, over_refine_factor = 1, geometry = "cartesian",
+                   unit_system="cgs"):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -1122,7 +1124,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    sds = StreamParticlesDataset(handler, geometry=geometry)
+    sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system)
     sds.n_ref = n_ref
     sds.over_refine_factor = over_refine_factor
 
@@ -1231,7 +1233,7 @@
                          mass_unit = None, time_unit = None,
                          velocity_unit = None, magnetic_unit = None,
                          periodicity=(True, True, True),
-                         geometry = "cartesian"):
+                         geometry = "cartesian", unit_system="cgs"):
     r"""Load a hexahedral mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -1348,7 +1350,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    sds = StreamHexahedralDataset(handler, geometry = geometry)
+    sds = StreamHexahedralDataset(handler, geometry=geometry, unit_system=unit_system)
 
     return sds
 
@@ -1460,7 +1462,8 @@
                 mass_unit=None, time_unit=None,
                 velocity_unit=None, magnetic_unit=None,
                 periodicity=(True, True, True),
-                over_refine_factor = 1, partial_coverage = 1):
+                over_refine_factor = 1, partial_coverage = 1,
+                unit_system="cgs"):
     r"""Load an octree mask into yt.
 
     Octrees can be saved out by calling save_octree on an OctreeContainer.
@@ -1561,7 +1564,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    sds = StreamOctreeDataset(handler)
+    sds = StreamOctreeDataset(handler, unit_system=unit_system)
     sds.octree_mask = octree_mask
     sds.partial_coverage = partial_coverage
     sds.over_refine_factor = over_refine_factor
@@ -1608,7 +1611,7 @@
                          mass_unit = None, time_unit = None,
                          velocity_unit = None, magnetic_unit = None,
                          periodicity=(False, False, False),
-                         geometry = "cartesian"):
+                         geometry = "cartesian", unit_system="cgs"):
     r"""Load an unstructured mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -1731,7 +1734,8 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    sds = StreamUnstructuredMeshDataset(handler, geometry = geometry)
+    sds = StreamUnstructuredMeshDataset(handler, geometry=geometry,
+                                        unit_system=unit_system)
 
     return sds
 


https://bitbucket.org/yt_analysis/yt/commits/67197d74e632/
Changeset:   67197d74e632
Branch:      yt
User:        jzuhone
Date:        2015-12-02 23:29:22+00:00
Summary:     adding unit systems to flash and athena fields
Affected #:  2 files

diff -r fe847f38620aaa459d2bc37f68cf5557a0c57093 -r 67197d74e6323657f57eba973c727feb9ca398f2 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -42,6 +42,7 @@
 # case that the file contains the conservative ones.
 
     def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
         # Add velocity fields
         for comp in "xyz":
             vel_field = ("athena", "velocity_%s" % comp)
@@ -49,12 +50,12 @@
             if vel_field in self.field_list:
                 self.add_output_field(vel_field, units="code_length/code_time")
                 self.alias(("gas","velocity_%s" % comp), vel_field,
-                           units="cm/s")
+                           units=unit_system["velocity"])
             elif mom_field in self.field_list:
                 self.add_output_field(mom_field,
                                       units="code_mass/code_time/code_length**2")
                 self.add_field(("gas","velocity_%s" % comp),
-                               function=velocity_field(comp), units = "cm/s")
+                               function=velocity_field(comp), units = unit_system["velocity"])
         # Add pressure, energy, and temperature fields
         def ekin1(data):
             return 0.5*(data["athena","momentum_x"]**2 +
@@ -84,35 +85,35 @@
             self.add_output_field(("athena","pressure"),
                                   units=pres_units)
             self.alias(("gas","pressure"),("athena","pressure"),
-                       units="dyne/cm**2")
+                       units=unit_system["pressure"])
             def _thermal_energy(field, data):
                 return data["athena","pressure"] / \
                        (data.ds.gamma-1.)/data["athena","density"]
             self.add_field(("gas","thermal_energy"),
                            function=_thermal_energy,
-                           units="erg/g")
+                           units=unit_system["specific_energy"])
             def _total_energy(field, data):
                 return etot_from_pres(data)/data["athena","density"]
             self.add_field(("gas","total_energy"),
                            function=_total_energy,
-                           units="erg/g")
+                           units=unit_system["specific_energy"])
         elif ("athena","total_energy") in self.field_list:
             self.add_output_field(("athena","total_energy"),
                                   units=pres_units)
             def _pressure(field, data):
                 return eint_from_etot(data)*(data.ds.gamma-1.0)
             self.add_field(("gas","pressure"), function=_pressure,
-                           units="dyne/cm**2")
+                           units=unit_system["pressure"])
             def _thermal_energy(field, data):
                 return eint_from_etot(data)/data["athena","density"]
             self.add_field(("gas","thermal_energy"),
                            function=_thermal_energy,
-                           units="erg/g")
+                           units=unit_system["specific_energy"])
             def _total_energy(field, data):
                 return data["athena","total_energy"]/data["athena","density"]
             self.add_field(("gas","total_energy"),
                            function=_total_energy,
-                           units="erg/g")
+                           units=unit_system["specific_energy"])
 
         def _temperature(field, data):
             if data.has_field_parameter("mu"):
@@ -121,4 +122,4 @@
                 mu = 0.6
             return mu*mh*data["gas","pressure"]/data["gas","density"]/kboltz
         self.add_field(("gas","temperature"), function=_temperature,
-                       units="K")
+                       units=unit_system["temperature"])

diff -r fe847f38620aaa459d2bc37f68cf5557a0c57093 -r 67197d74e6323657f57eba973c727feb9ca398f2 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -96,6 +96,7 @@
     )
 
     def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
         for i in range(1, 1000):
             self.add_output_field(("flash", "r{0:03}".format(i)), 
                 units = "",
@@ -112,7 +113,7 @@
             self.add_output_field(("flash","ener"),
                                   units="code_length**2/code_time**2")
             self.alias(("gas","total_energy"),("flash","ener"),
-                       units="erg/g")
+                       units=unit_system["specific_energy"])
         else:
             def _ener(field, data):
                 ener = data["flash","eint"]+ekin(data)
@@ -122,12 +123,12 @@
                     pass
                 return ener
             self.add_field(("gas","total_energy"), function=_ener,
-                           units="erg/g")
+                           units=unit_system["specific_energy"])
         if ("flash","eint") in self.field_list:
             self.add_output_field(("flash","eint"),
                                   units="code_length**2/code_time**2")
             self.alias(("gas","thermal_energy"),("flash","eint"),
-                       units="erg/g")
+                       units=unit_system["specific_energy"])
         else:
             def _eint(field, data):
                 eint = data["flash","ener"]-ekin(data)
@@ -137,7 +138,7 @@
                     pass
                 return eint
             self.add_field(("gas","thermal_energy"), function=_eint,
-                           units="erg/g")
+                           units=unit_system["specific_energy"])
         ## Derived FLASH Fields
         def _nele(field, data):
             Na_code = data.ds.quan(Na, '1/code_mass')
@@ -154,9 +155,9 @@
             except:
                 return 1.0/data["flash","sumy"]
         self.add_field(("flash","abar"), function=_abar, units="1")
-        def _number_density(fields,data) :
+        def _number_density(fields,data):
             return (data["nele"]+data["nion"])
         self.add_field(("gas","number_density"), function=_number_density,
-                       units="cm**-3")
+                       units=unit_system["number_density"])
 
 


https://bitbucket.org/yt_analysis/yt/commits/5aa0bc66c4ec/
Changeset:   5aa0bc66c4ec
Branch:      yt
User:        jzuhone
Date:        2015-12-02 23:37:38+00:00
Summary:     Adding unit systems to art, artio, ramses fields.
Affected #:  3 files

diff -r 67197d74e6323657f57eba973c727feb9ca398f2 -r 5aa0bc66c4ecfd4d914475d6169ac99abcf4345d yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -57,6 +57,7 @@
     )
 
     def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
         def _temperature(field, data):
             r0 = data.ds.parameters['boxh'] / data.ds.parameters['ng']
             tr = data.ds.quan(3.03e5 * r0**2, 'K/code_velocity**2')
@@ -66,7 +67,7 @@
             return tr * data['art', 'GasEnergy'] / data['art', 'Density']
         self.add_field(('gas', 'temperature'),
                        function=_temperature, 
-                       units='K')
+                       units=unit_system["temperature"])
 
         def _get_vel(axis):
             def velocity(field, data):
@@ -76,7 +77,7 @@
         for ax in 'xyz':
             self.add_field(('gas','velocity_%s' % ax),
                            function = _get_vel(ax),
-                           units='cm/s')
+                           units=unit_system["velocity"])
 
         def _momentum_magnitude(field, data):
             tr = (data['gas','momentum_x']**2 +
@@ -86,7 +87,7 @@
             return tr
         self.add_field(('gas', 'momentum_magnitude'),
                        function=_momentum_magnitude,
-                       units='g*cm/s')
+                       units=unit_system["momentum"])
 
         def _velocity_magnitude(field, data):
             tr = data['gas','momentum_magnitude']
@@ -94,7 +95,7 @@
             return tr
         self.add_field(('gas', 'velocity_magnitude'),
                        function=_velocity_magnitude,
-                       units='cm/s')
+                       units=unit_system["velocity"])
 
         def _metal_density(field, data):
             tr = data['gas','metal_ia_density']
@@ -102,7 +103,7 @@
             return tr
         self.add_field(('gas','metal_density'),
                        function=_metal_density,
-                       units='g/cm**3')
+                       units=unit_system["density"])
 
         def _metal_mass_fraction(field, data):
             tr = data['gas','metal_density']

diff -r 67197d74e6323657f57eba973c727feb9ca398f2 -r 5aa0bc66c4ecfd4d914475d6169ac99abcf4345d yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -67,6 +67,7 @@
     )
 
     def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
         def _get_vel(axis):
             def velocity(field, data):
                 return data["momentum_%s" % axis]/data["density"]
@@ -74,7 +75,7 @@
         for ax in 'xyz':
             self.add_field(("gas", "velocity_%s" % ax),
                            function = _get_vel(ax),
-                           units = "cm/s")
+                           units = unit_system["velocity"])
 
         def _temperature(field, data):
             tr = data["thermal_energy"]/data["density"]
@@ -97,7 +98,7 @@
         # it was set as:
         # unit_T = unit_v**2.0*mb / constants.k
         self.add_field(("gas", "temperature"), function = _temperature,
-                       units = "K")
+                       units = unit_system["temperature"])
 
         # Create a metal_density field as sum of existing metal fields. 
         flag1 = ("artio", "HVAR_METAL_DENSITY_Ia") in self.field_list
@@ -118,7 +119,7 @@
                     return tr
             self.add_field(("gas","metal_density"),
                            function=_metal_density,
-                           units="g/cm**3",
+                           units=unit_system["density"],
                            take_log=True)
 
     def setup_particle_fields(self, ptype):

diff -r 67197d74e6323657f57eba973c727feb9ca398f2 -r 5aa0bc66c4ecfd4d914475d6169ac99abcf4345d yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -90,7 +90,7 @@
             rv *= mass_hydrogen_cgs/boltzmann_constant_cgs
             return rv
         self.add_field(("gas", "temperature"), function=_temperature,
-                        units="K")
+                        units=self.ds.unit_system["temperature"])
         self.create_cooling_fields()
 
     def create_cooling_fields(self):


https://bitbucket.org/yt_analysis/yt/commits/3c4dfe4d53aa/
Changeset:   3c4dfe4d53aa
Branch:      yt
User:        jzuhone
Date:        2015-12-03 01:41:07+00:00
Summary:     Adding unit system to the rest of the frontend fields
Affected #:  5 files

diff -r 5aa0bc66c4ecfd4d914475d6169ac99abcf4345d -r 3c4dfe4d53aa798c5e0b0bd4d69d0b0a4d63c6d4 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -91,6 +91,7 @@
     )
 
     def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
         # Now, let's figure out what fields are included.
         if any(f[1] == "xmom" for f in self.field_list):
             self.setup_momentum_to_velocity()
@@ -98,14 +99,14 @@
             self.setup_velocity_to_momentum()
         self.add_field(("gas", "thermal_energy"),
                        function=_thermal_energy,
-                       units="erg/g")
+                       units=unit_system["specific_energy"])
         self.add_field(("gas", "thermal_energy_density"),
                        function=_thermal_energy_density,
-                       units="erg/cm**3")
+                       units=unit_system["pressure"])
         if ("gas", "temperature") not in self.field_aliases:
             self.add_field(("gas", "temperature"),
                            function=_temperature,
-                           units="K")
+                           units=unit_system["temperature"])
 
     def setup_momentum_to_velocity(self):
         def _get_vel(axis):
@@ -115,7 +116,7 @@
         for ax in 'xyz':
             self.add_field(("gas", "velocity_%s" % ax),
                            function=_get_vel(ax),
-                           units="cm/s")
+                           units=self.ds.unit_system["velocity"])
 
     def setup_velocity_to_momentum(self):
         def _get_mom(axis):
@@ -179,7 +180,7 @@
                 func = _create_density_func(("gas", "%s_fraction" % nice_name))
                 self.add_field(name=("gas", "%s_density" % nice_name),
                                function = func,
-                               units = "g/cm**3")
+                               units = self.ds.unit_system["density"])
                 # We know this will either have one letter, or two.
                 if field[3] in string.ascii_letters:
                     element, weight = field[2:4], field[4:-1]
@@ -247,13 +248,14 @@
     )
 
     def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
         # pick the correct temperature field
         if self.ds.parameters["use_tfromp"]:
             self.alias(("gas", "temperature"), ("boxlib", "tfromp"),
-                       units="K")
+                       units=unit_system["temperature"])
         else:
             self.alias(("gas", "temperature"), ("boxlib", "tfromh"),
-                       units="K")
+                       units=unit_system["temperature"])
 
         # Add X's and omegadots, units of 1/s
         for _, field in self.ds.field_list:
@@ -270,7 +272,7 @@
                 func = _create_density_func(("gas", "%s_fraction" % nice_name))
                 self.add_field(name=("gas", "%s_density" % nice_name),
                                function=func,
-                               units="g/cm**3",
+                               units=unit_system["density"],
                                display_name=r'\rho %s' % tex_label)
 
                 # Most of the time our species will be of the form
@@ -291,10 +293,10 @@
                 nice_name, tex_label = _nice_species_name(field)
                 display_name = r'\dot{\omega}\left[%s\right]' % tex_label
                 # Overwrite field to use nicer tex_label'ed display_name
-                self.add_output_field(("boxlib", field), units="1/s",
+                self.add_output_field(("boxlib", field), units=unit_system["rate"],
                                       display_name=display_name)
                 self.alias(("gas", "%s_creation_rate" % nice_name),
-                           ("boxlib", field), units="1/s")
+                           ("boxlib", field), units=unit_system["rate"])
 
 
 def _nice_species_name(field):

diff -r 5aa0bc66c4ecfd4d914475d6169ac99abcf4345d -r 3c4dfe4d53aa798c5e0b0bd4d69d0b0a4d63c6d4 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -79,6 +79,7 @@
     )
 
     def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
         def _thermal_energy_density(field, data):
             try:
                 return data['energy-density'] - data['kinetic_energy_density'] - \
@@ -123,27 +124,27 @@
 
         for ax in 'xyz':
             self.add_field(("gas", "velocity_%s" % ax), function = _get_vel(ax),
-                           units = "cm/s")
+                           units = unit_system["velocity"])
         self.add_field(("gas", "thermal_energy"),
                        function = _thermal_energy,
-                       units = "erg/g")
+                       units = unit_system["specific_energy"])
         self.add_field(("gas", "thermal_energy_density"),
                        function = _thermal_energy_density,
-                       units = "erg/cm**3")
+                       units = unit_system["pressure"])
         self.add_field(("gas", "kinetic_energy"),
                        function = _kinetic_energy,
-                       units = "erg/g")
+                       units = unit_system["specific_energy"])
         self.add_field(("gas", "kinetic_energy_density"),
                        function = _kinetic_energy_density,
-                       units = "erg/cm**3")
+                       units = unit_system["pressure"])
         self.add_field(("gas", "magnetic_energy"),
                        function = _magnetic_energy,
-                       units = "erg/g")
+                       units = unit_system["specific_energy"])
         self.add_field(("gas", "magnetic_energy_density"),
                        function = _magnetic_energy_density,
-                       units = "erg/cm**3")
+                       units = unit_system["pressure"])
         self.add_field(("gas", "temperature"), function=_temperature,
-                       units="K")
+                       units=unit_system["temperature"])
 
 
 class ChomboPICFieldInfo3D(FieldInfoContainer):

diff -r 5aa0bc66c4ecfd4d914475d6169ac99abcf4345d -r 3c4dfe4d53aa798c5e0b0bd4d69d0b0a4d63c6d4 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -140,7 +140,7 @@
             return data["Electron_Density"] * (me/mp)
         self.add_field(("gas", "El_density"),
                        function = _electron_density,
-                       units = "g/cm**3")
+                       units = self.ds.unit_registry["density"])
         for sp in species_names:
             self.add_species_field(sp)
             self.species_names.append(known_species_names[sp])
@@ -158,6 +158,7 @@
         self.setup_energy_field()
 
     def setup_energy_field(self):
+        unit_system = self.ds.unit_system
         # We check which type of field we need, and then we add it.
         ge_name = None
         te_name = None
@@ -192,7 +193,7 @@
             self.alias(
                 ("gas", "thermal_energy"),
                 ("enzo", ge_name),
-                units = "erg/g")
+                units = unit_system["specific_energy"])
         elif hydro_method in (4, 6):
             self.add_output_field(
                 ("enzo", te_name),
@@ -208,7 +209,7 @@
                 return ret
             self.add_field(
                 ("gas", "thermal_energy"),
-                function=_sub_b, units = "erg/g")
+                function=_sub_b, units = unit_system["specific_energy"])
         else: # Otherwise, we assume TotalEnergy is kinetic+thermal
             self.add_output_field(
                 ("enzo", te_name),
@@ -224,14 +225,14 @@
             self.add_field(
                 ("gas", "thermal_energy"),
                 function = _tot_minus_kin,
-                units = "erg/g")
+                units = unit_system["specific_energy"])
         if multi_species == 0 and 'Mu' in params:
             def _number_density(field, data):
                 return data['gas', 'density']/(mp*params['Mu'])
             self.add_field(
                 ("gas", "number_density"),
                 function = _number_density,
-                units="1/cm**3")
+                units=unit_system["number_density"])
 
     def setup_particle_fields(self, ptype):
 

diff -r 5aa0bc66c4ecfd4d914475d6169ac99abcf4345d -r 3c4dfe4d53aa798c5e0b0bd4d69d0b0a4d63c6d4 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -66,7 +66,7 @@
             self.add_field( (ptype, metal_name+"_density"),
                             function=_Density_wrap(i), 
                             particle_type=True,
-                            units="g/cm**3")
+                            units=self.ds.unit_system["density"])
 
     def setup_gas_particle_fields(self, ptype):
         if (ptype, "ElectronAbundance") in self.ds.field_list:
@@ -77,7 +77,7 @@
                 a_e = data[ptype, 'ElectronAbundance']
                 mu = 4.0 / (3.0 * x_H + 1.0 + 4.0 * x_H * a_e)
                 ret = data[ptype, "InternalEnergy"]*(gamma-1)*mu*mp/kb
-                return ret.in_units('K')
+                return ret.in_units(self.ds.unit_system["temperature"])
         else:
             def _temperature(field, data):
                 # Assume cosmic abundances
@@ -86,13 +86,13 @@
                 # Assume zero ionization
                 mu = 4.0 / (3.0 * x_H + 1.0)
                 ret = data[ptype, "InternalEnergy"]*(gamma-1)*mu*mp/kb
-                return ret.in_units('K')
+                return ret.in_units(self.ds.unit_system["temperature"])
 
         self.add_field(
             (ptype, "Temperature"),
             function=_temperature,
             particle_type=True,
-            units="K")
+            units=self.ds.unit_system["temperature"])
 
         # For now, we hardcode num_neighbors.  We should make this configurable
         # in the future.

diff -r 5aa0bc66c4ecfd4d914475d6169ac99abcf4345d -r 3c4dfe4d53aa798c5e0b0bd4d69d0b0a4d63c6d4 yt/frontends/owls/fields.py
--- a/yt/frontends/owls/fields.py
+++ b/yt/frontends/owls/fields.py
@@ -211,7 +211,7 @@
             dens_func = self._create_ion_density_func( ftype, ion )
             self.add_field( (ftype, fname),
                             function = dens_func, 
-                            units="g/cm**3",
+                            units=self.ds.unit_system["density"],
                             particle_type=True )            
             self._show_field_errors.append( (ftype,fname) )
 


https://bitbucket.org/yt_analysis/yt/commits/067c6297e75b/
Changeset:   067c6297e75b
Branch:      yt
User:        jzuhone
Date:        2015-12-03 03:17:46+00:00
Summary:     Fixing bugs
Affected #:  2 files

diff -r 3c4dfe4d53aa798c5e0b0bd4d69d0b0a4d63c6d4 -r 067c6297e75bd1279b8e69c736fc1adfeec7e571 yt/data_objects/region_expression.py
--- a/yt/data_objects/region_expression.py
+++ b/yt/data_objects/region_expression.py
@@ -11,8 +11,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from yt.extern.six import string_types
 import weakref
-import types
 
 from yt.utilities.exceptions import YTDimensionalityError
 
@@ -31,11 +31,11 @@
         # At first, we will only implement this as accepting a slice that is
         # (optionally) unitful corresponding to a specific set of coordinates
         # that result in a rectangular prism or a slice.
-        if isinstance(item, types.StringTypes):
+        if isinstance(item, string_types):
             # This is some field; we will instead pass this back to the
             # all_data object.
             return self.all_data[item]
-        if isinstance(item, tuple) and isinstance(item[1], types.StringTypes):
+        if isinstance(item, tuple) and isinstance(item[1], string_types):
             return self.all_data[item]
         if len(item) != self.ds.dimensionality:
             # Not the right specification, and we don't want to do anything

diff -r 3c4dfe4d53aa798c5e0b0bd4d69d0b0a4d63c6d4 -r 067c6297e75bd1279b8e69c736fc1adfeec7e571 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -140,7 +140,7 @@
             return data["Electron_Density"] * (me/mp)
         self.add_field(("gas", "El_density"),
                        function = _electron_density,
-                       units = self.ds.unit_registry["density"])
+                       units = self.ds.unit_system["density"])
         for sp in species_names:
             self.add_species_field(sp)
             self.species_names.append(known_species_names[sp])


https://bitbucket.org/yt_analysis/yt/commits/d426f52748f7/
Changeset:   d426f52748f7
Branch:      yt
User:        jzuhone
Date:        2015-12-03 03:18:16+00:00
Summary:     Replacing "cm" with unit_registry["length"] in a few places
Affected #:  3 files

diff -r 067c6297e75bd1279b8e69c736fc1adfeec7e571 -r d426f52748f7061b679f7dfaea547044943da324 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -19,8 +19,9 @@
     obtain_rvec
 
 def get_radius(data, field_prefix):
-    center = data.get_field_parameter("center").in_units("cm")
-    DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).in_units("cm")
+    unit_system = data.ds.unit_system
+    center = data.get_field_parameter("center").in_units(unit_system["length"])
+    DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).in_units(unit_system["length"])
     # This is in cm**2 so it can be the destination for our r later.
     radius2 = data.ds.arr(np.zeros(data[field_prefix+"x"].shape,
                          dtype='float64'), 'cm**2')
@@ -30,7 +31,7 @@
     for i, ax in enumerate('xyz'):
         # This will coerce the units, so we don't need to worry that we copied
         # it from a cm**2 array.
-        np.subtract(data["%s%s" % (field_prefix, ax)].in_units("cm"),
+        np.subtract(data["%s%s" % (field_prefix, ax)].in_units(unit_system["length"]),
                     center[i], r)
         if data.ds.periodicity[i] is True:
             np.abs(r, r)

diff -r 067c6297e75bd1279b8e69c736fc1adfeec7e571 -r d426f52748f7061b679f7dfaea547044943da324 yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -201,7 +201,7 @@
 
     if method == "integrate":
         if weight is None:
-            dl = width[2].in_units("cm")
+            dl = width[2].in_units(data_source.ds.unit_system["length"])
             image *= dl
         else:
             image[:,:,0] /= image[:,:,1]

diff -r 067c6297e75bd1279b8e69c736fc1adfeec7e571 -r d426f52748f7061b679f7dfaea547044943da324 yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -1877,7 +1877,7 @@
         dl = 1.0
         if self.method == "integrate":
             if self.weight is None:
-                dl = self.width[2].in_units("cm")
+                dl = self.width[2].in_units(ds.unit_system["length"])
             else:
                 image[:, : ,0] /= image[:, :, 1]
 


https://bitbucket.org/yt_analysis/yt/commits/544282e00cde/
Changeset:   544282e00cde
Branch:      yt
User:        jzuhone
Date:        2015-12-03 04:09:46+00:00
Summary:     Replacing cm
Affected #:  2 files

diff -r d426f52748f7061b679f7dfaea547044943da324 -r 544282e00cdec3d99ad1a3c6537437bab3a8f6c1 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -424,7 +424,7 @@
             # will not be necessary at all, as the final conversion will occur
             # at the display layer.
             if not dl.units.is_dimensionless:
-                dl.convert_to_units("cm")
+                dl.convert_to_units(self.ds.unit_registry["length"])
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
             d = chunk[field] * dl

diff -r d426f52748f7061b679f7dfaea547044943da324 -r 544282e00cdec3d99ad1a3c6537437bab3a8f6c1 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -566,7 +566,7 @@
                                    north_vector=dd.north_vector, method=dd.method)
         units = Unit(dd.ds.field_info[item].units, registry=dd.ds.unit_registry)
         if dd.weight_field is None and dd.method == "integrate":
-            units *= Unit('cm', registry=dd.ds.unit_registry)
+            units *= Unit(self.ds.unit_system["length"], registry=dd.ds.unit_registry)
         ia = ImageArray(buff.swapaxes(0,1), input_units=units, info=self._get_info(item))
         self[item] = ia
         return ia


https://bitbucket.org/yt_analysis/yt/commits/f7317eb0a45b/
Changeset:   f7317eb0a45b
Branch:      yt
User:        jzuhone
Date:        2015-12-03 04:41:14+00:00
Summary:     Bug fixes
Affected #:  2 files

diff -r 544282e00cdec3d99ad1a3c6537437bab3a8f6c1 -r f7317eb0a45b5174323f289ddd7d6684f727b049 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -424,7 +424,7 @@
             # will not be necessary at all, as the final conversion will occur
             # at the display layer.
             if not dl.units.is_dimensionless:
-                dl.convert_to_units(self.ds.unit_registry["length"])
+                dl.convert_to_units(self.ds.unit_system["length"])
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
             d = chunk[field] * dl

diff -r 544282e00cdec3d99ad1a3c6537437bab3a8f6c1 -r f7317eb0a45b5174323f289ddd7d6684f727b049 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -101,7 +101,7 @@
     def _alfven_speed(field,data):
         return data[ftype,'magnetic_field_strength']/np.sqrt(mag_fac*data[ftype,'density'])
     registry.add_field((ftype, "alfven_speed"), function=_alfven_speed,
-                       units=mag_units)
+                       units=unit_system["velocity"])
 
     def _mach_alfven(field,data):
         return data[ftype,'velocity_magnitude']/data[ftype,'alfven_speed']


https://bitbucket.org/yt_analysis/yt/commits/4de2d3ddd08c/
Changeset:   4de2d3ddd08c
Branch:      yt
User:        jzuhone
Date:        2015-12-03 05:43:02+00:00
Summary:     Fixing magnetic field units
Affected #:  1 file

diff -r f7317eb0a45b5174323f289ddd7d6684f727b049 -r 4de2d3ddd08cb6fa87788960a00e323c115d2966 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -15,6 +15,9 @@
 
 import numpy as np
 
+from yt.units import dimensions
+from yt.units.unit_object import Unit
+
 from yt.fields.derived_field import \
     ValidateParameter
 
@@ -29,17 +32,17 @@
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):
     from yt.utilities.physical_constants import mu_0
     unit_system = registry.ds.unit_system
-    if str(unit_system) == "mks":
-        mag_units = unit_system["magnetic_field_mks"]
-        mag_fac = 1.0/mu_0
+    mag_units = registry.ds.field_info[ftype,"magnetic_field_x"].units
+    mag_dims = Unit(mag_units).dimensions
+    if mag_dims is dimensions.magnetic_field_mks:
+        mag_fac = mu_0
     else:
-        mag_fac = 1.0/(4.0*np.pi)
-        mag_units = unit_system["magnetic_field_cgs"]
+        mag_fac = 4.0*np.pi
 
     def _magnetic_energy(field,data):
-        return 0.5*mag_fac*(data[ftype,"magnetic_field_x"]**2 +
-                            data[ftype,"magnetic_field_y"]**2 +
-                            data[ftype,"magnetic_field_z"]**2)
+        return 0.5*(data[ftype,"magnetic_field_x"]**2 +
+                    data[ftype,"magnetic_field_y"]**2 +
+                    data[ftype,"magnetic_field_z"]**2)/mag_fac
     registry.add_field((ftype, "magnetic_energy"),
              function=_magnetic_energy,
              units=unit_system["pressure"])


https://bitbucket.org/yt_analysis/yt/commits/ce7542e7382f/
Changeset:   ce7542e7382f
Branch:      yt
User:        jzuhone
Date:        2015-12-03 05:43:13+00:00
Summary:     py2/3 fix here
Affected #:  1 file

diff -r 4de2d3ddd08cb6fa87788960a00e323c115d2966 -r ce7542e7382f0ec66641da5cdea2001625941917 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -14,7 +14,7 @@
 import contextlib
 import inspect
 
-from yt.extern.six import string_types
+from yt.extern.six import string_types, PY2
 from yt.funcs import \
     ensure_list
 from .field_exceptions import \
@@ -215,9 +215,13 @@
         return data_label
 
     def __repr__(self):
+        if PY2:
+            func_name = self._function.func_name
+        else:
+            func_name = self._function.__name__
         if self._function == NullFunc:
             s = "On-Disk Field "
-        elif self._function.func_name == "_TranslationFunc":
+        elif func_name == "_TranslationFunc":
             s = "Alias Field for \"%s\" " % (self._function.alias_name,)
         else:
             s = "Derived Field "


https://bitbucket.org/yt_analysis/yt/commits/11d4637907b3/
Changeset:   11d4637907b3
Branch:      yt
User:        jzuhone
Date:        2015-12-03 19:55:14+00:00
Summary:     Final fixes for the magnetic field units
Affected #:  2 files

diff -r ce7542e7382f0ec66641da5cdea2001625941917 -r 11d4637907b315d7c225d75c71f185207af3556d yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -17,6 +17,7 @@
 
 from yt.units import dimensions
 from yt.units.unit_object import Unit
+from yt.utilities.physical_constants import mu_0
 
 from yt.fields.derived_field import \
     ValidateParameter
@@ -28,21 +29,33 @@
     get_sph_theta_component, \
     get_sph_phi_component
 
+mag_factors = {dimensions.magnetic_field_cgs: 4.0*np.pi,
+               dimensions.magnetic_field_mks: mu_0}
+mag_units = {dimensions.magnetic_field_cgs: "gauss",
+             dimensions.magnetic_field_mks: "T"}
+
 @register_field_plugin
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):
-    from yt.utilities.physical_constants import mu_0
     unit_system = registry.ds.unit_system
-    mag_units = registry.ds.field_info[ftype,"magnetic_field_x"].units
-    mag_dims = Unit(mag_units).dimensions
-    if mag_dims is dimensions.magnetic_field_mks:
-        mag_fac = mu_0
-    else:
-        mag_fac = 4.0*np.pi
 
-    def _magnetic_energy(field,data):
-        return 0.5*(data[ftype,"magnetic_field_x"]**2 +
-                    data[ftype,"magnetic_field_y"]**2 +
-                    data[ftype,"magnetic_field_z"]**2)/mag_fac
+    if (ftype,"magnetic_field_x") not in registry.ds.field_info:
+        return
+
+    u = Unit(registry.ds.field_info[ftype,"magnetic_field_x"].units)
+    mag_dims = u.dimensions
+
+    def _magnetic_field_strength(field,data):
+        B2 = (data[ftype,"magnetic_field_x"]**2 +
+              data[ftype,"magnetic_field_y"]**2 +
+              data[ftype,"magnetic_field_z"]**2)
+        return np.sqrt(B2)
+    registry.add_field((ftype,"magnetic_field_strength"),
+                       function=_magnetic_field_strength,
+                       units = unit_system[mag_dims])
+
+    def _magnetic_energy(field, data):
+        B = data[ftype,"magnetic_field_strength"]
+        return 0.5*B*B/mag_factors[B.units.dimensions]
     registry.add_field((ftype, "magnetic_energy"),
              function=_magnetic_energy,
              units=unit_system["pressure"])
@@ -59,12 +72,6 @@
              function=_magnetic_pressure,
              units=unit_system["pressure"])
 
-    def _magnetic_field_strength(field,data):
-        return np.sqrt(2.*mag_fac*data[ftype,"magnetic_energy"])
-    registry.add_field((ftype,"magnetic_field_strength"),
-                       function=_magnetic_field_strength,
-                       units = mag_units)
-
     def _magnetic_field_poloidal(field,data):
         normal = data.get_field_parameter("normal")
         d = data[ftype,'magnetic_field_x']
@@ -81,7 +88,7 @@
 
     registry.add_field((ftype, "magnetic_field_poloidal"),
              function=_magnetic_field_poloidal,
-             units=mag_units,
+             units=unit_system[mag_dims],
              validators=[ValidateParameter("normal")])
 
     def _magnetic_field_toroidal(field,data):
@@ -98,11 +105,12 @@
 
     registry.add_field((ftype, "magnetic_field_toroidal"),
              function=_magnetic_field_toroidal,
-             units=mag_units,
+             units=unit_system[mag_dims],
              validators=[ValidateParameter("normal")])
 
     def _alfven_speed(field,data):
-        return data[ftype,'magnetic_field_strength']/np.sqrt(mag_fac*data[ftype,'density'])
+        B = data[ftype,'magnetic_field_strength']
+        return B/np.sqrt(mag_factors[B.units.dimensions]*data[ftype,'density'])
     registry.add_field((ftype, "alfven_speed"), function=_alfven_speed,
                        units=unit_system["velocity"])
 

diff -r ce7542e7382f0ec66641da5cdea2001625941917 -r 11d4637907b315d7c225d75c71f185207af3556d yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -17,16 +17,15 @@
 
 class UnitSystem(object):
     def __init__(self, name, length_unit, mass_unit, time_unit,
-                 temperature_unit, angle_unit, current_mks_unit=None,
+                 temperature_unit, angle_unit, current_mks_unit="A",
                  registry=None):
         self.registry = registry
         self.units_map = {dimensions.length: Unit(length_unit, registry=self.registry),
                           dimensions.mass: Unit(mass_unit, registry=self.registry),
                           dimensions.time: Unit(time_unit, registry=self.registry),
                           dimensions.temperature: Unit(temperature_unit, registry=self.registry),
-                          dimensions.angle: Unit(angle_unit, registry=self.registry)}
-        if current_mks_unit is not None:
-            self.units_map[dimensions.current_mks] = Unit(current_mks_unit, registry=self.registry)
+                          dimensions.angle: Unit(angle_unit, registry=self.registry),
+                          dimensions.current_mks: Unit(current_mks_unit, registry=self.registry)}
         self.registry = registry
         self.base_units = self.units_map.copy()
         unit_system_registry[name] = self


https://bitbucket.org/yt_analysis/yt/commits/bf7a836b2365/
Changeset:   bf7a836b2365
Branch:      yt
User:        jzuhone
Date:        2015-12-03 19:57:55+00:00
Summary:     Magnetic field testing
Affected #:  1 file

diff -r 11d4637907b315d7c225d75c71f185207af3556d -r bf7a836b2365a9d5af4d55414691e161d8d50bb3 yt/fields/tests/test_magnetic_fields.py
--- /dev/null
+++ b/yt/fields/tests/test_magnetic_fields.py
@@ -0,0 +1,46 @@
+import numpy as np
+from yt.frontends.stream.api import load_uniform_grid
+from yt.utilities.physical_constants import mu_0
+from yt.testing import assert_array_almost_equal_nulp
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_magnetic_fields():
+
+    ddims = (32,32,32)
+
+    data_cgs = {"magnetic_field_x":(np.random.random(size=ddims), "gauss"),
+                "magnetic_field_y":(np.random.random(size=ddims), "gauss"),
+                "magnetic_field_z":(np.random.random(size=ddims), "gauss")}
+    data_mks = {"magnetic_field_x":(np.random.random(size=ddims), "T"),
+                "magnetic_field_y":(np.random.random(size=ddims), "T"),
+                "magnetic_field_z":(np.random.random(size=ddims), "T")}
+
+    ds_cgs = load_uniform_grid(data_cgs, ddims)
+    ds_mks = load_uniform_grid(data_mks, ddims, unit_system="mks")
+
+    ds_cgs.index
+    ds_mks.index
+
+    dd_cgs = ds_cgs.all_data()
+    dd_mks = ds_mks.all_data()
+
+    assert ds_cgs.field_info["gas","magnetic_field_strength"].units == "gauss"
+    assert ds_cgs.field_info["gas","magnetic_field_poloidal"].units == "gauss"
+    assert ds_cgs.field_info["gas","magnetic_field_toroidal"].units == "gauss"
+    assert ds_mks.field_info["gas","magnetic_field_strength"].units == "T"
+    assert ds_mks.field_info["gas","magnetic_field_poloidal"].units == "T"
+    assert ds_mks.field_info["gas","magnetic_field_toroidal"].units == "T"
+
+    emag_cgs = (dd_cgs["magnetic_field_x"]**2 +
+                dd_cgs["magnetic_field_y"]**2 +
+                dd_cgs["magnetic_field_z"]**2)/(8.0*np.pi)
+
+    emag_mks = (dd_mks["magnetic_field_x"]**2 +
+                dd_mks["magnetic_field_y"]**2 +
+                dd_mks["magnetic_field_z"]**2)/(2.0*mu_0)
+
+    yield assert_array_almost_equal_nulp, emag_cgs, dd_cgs["magnetic_energy"], 2
+    yield assert_array_almost_equal_nulp, emag_mks, dd_mks["magnetic_energy"], 2


https://bitbucket.org/yt_analysis/yt/commits/8fc522914bc1/
Changeset:   8fc522914bc1
Branch:      yt
User:        jzuhone
Date:        2015-12-03 20:06:41+00:00
Summary:     Testing this a little more
Affected #:  1 file

diff -r bf7a836b2365a9d5af4d55414691e161d8d50bb3 -r 8fc522914bc19610cd1a25c1a6ee035a962fe05d yt/fields/tests/test_magnetic_fields.py
--- a/yt/fields/tests/test_magnetic_fields.py
+++ b/yt/fields/tests/test_magnetic_fields.py
@@ -37,10 +37,16 @@
     emag_cgs = (dd_cgs["magnetic_field_x"]**2 +
                 dd_cgs["magnetic_field_y"]**2 +
                 dd_cgs["magnetic_field_z"]**2)/(8.0*np.pi)
-
+    emag_cgs.convert_to_units("dyne/cm**2")
+    
     emag_mks = (dd_mks["magnetic_field_x"]**2 +
                 dd_mks["magnetic_field_y"]**2 +
                 dd_mks["magnetic_field_z"]**2)/(2.0*mu_0)
-
+    emag_mks.convert_to_units("Pa")
+    
     yield assert_array_almost_equal_nulp, emag_cgs, dd_cgs["magnetic_energy"], 2
     yield assert_array_almost_equal_nulp, emag_mks, dd_mks["magnetic_energy"], 2
+    
+    assert str(emag_cgs.units) == str(dd_cgs["magnetic_energy"].units)
+    assert str(emag_mks.units) == str(dd_mks["magnetic_energy"].units)
+


https://bitbucket.org/yt_analysis/yt/commits/e7ab8aa18c31/
Changeset:   e7ab8aa18c31
Branch:      yt
User:        jzuhone
Date:        2015-12-03 20:38:37+00:00
Summary:     Don't create this unless we need it
Affected #:  1 file

diff -r 8fc522914bc19610cd1a25c1a6ee035a962fe05d -r e7ab8aa18c31d570f03b5644b5d3e8e6a59160ee yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -236,8 +236,9 @@
         self._parse_parameter_file()
         self.set_units()
         self._setup_coordinate_handler()
-        create_code_unit_system(self)
+
         if unit_system == "code":
+            create_code_unit_system(self)
             unit_system = str(self)
         self.unit_system = unit_system_registry[unit_system]
 


https://bitbucket.org/yt_analysis/yt/commits/a474e9dc4a6f/
Changeset:   a474e9dc4a6f
Branch:      yt
User:        jzuhone
Date:        2015-12-03 21:18:15+00:00
Summary:     allow fake_random_ds to take a unit system
Affected #:  1 file

diff -r e7ab8aa18c31d570f03b5644b5d3e8e6a59160ee -r a474e9dc4a6f746bf7f49434006d8cff9657d33e yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -165,7 +165,8 @@
         fields = ("density", "velocity_x", "velocity_y", "velocity_z"),
         units = ('g/cm**3', 'cm/s', 'cm/s', 'cm/s'),
         particle_fields=None, particle_field_units=None,
-        negative = False, nprocs = 1, particles = 0, length_unit=1.0):
+        negative = False, nprocs = 1, particles = 0, length_unit=1.0,
+        unit_system="cgs"):
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
         ndims = [ndims, ndims, ndims]
@@ -201,7 +202,8 @@
                 data['io', f] = (np.random.random(size=particles) - 0.5, 'cm/s')
             data['io', 'particle_mass'] = (np.random.random(particles), 'g')
         data['number_of_particles'] = particles
-    ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs)
+    ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs,
+                           unit_system=unit_system)
     return ug
 
 _geom_transforms = {


https://bitbucket.org/yt_analysis/yt/commits/f8a57e35dac5/
Changeset:   f8a57e35dac5
Branch:      yt
User:        jzuhone
Date:        2015-12-03 21:39:46+00:00
Summary:     writing this test more sensibly
Affected #:  1 file

diff -r a474e9dc4a6f746bf7f49434006d8cff9657d33e -r f8a57e35dac5d5e295c6693ce1f18193f1d5305b yt/fields/tests/test_magnetic_fields.py
--- a/yt/fields/tests/test_magnetic_fields.py
+++ b/yt/fields/tests/test_magnetic_fields.py
@@ -1,7 +1,6 @@
 import numpy as np
-from yt.frontends.stream.api import load_uniform_grid
 from yt.utilities.physical_constants import mu_0
-from yt.testing import assert_array_almost_equal_nulp
+from yt.testing import assert_almost_equal, fake_random_ds
 
 def setup():
     from yt.config import ytcfg
@@ -9,17 +8,13 @@
 
 def test_magnetic_fields():
 
-    ddims = (32,32,32)
+    fields = ("magnetic_field_x", "magnetic_field_y", "magnetic_field_z")
+    cgs_units = ("gauss",)*3
+    mks_units = ("T",)*3
 
-    data_cgs = {"magnetic_field_x":(np.random.random(size=ddims), "gauss"),
-                "magnetic_field_y":(np.random.random(size=ddims), "gauss"),
-                "magnetic_field_z":(np.random.random(size=ddims), "gauss")}
-    data_mks = {"magnetic_field_x":(np.random.random(size=ddims), "T"),
-                "magnetic_field_y":(np.random.random(size=ddims), "T"),
-                "magnetic_field_z":(np.random.random(size=ddims), "T")}
-
-    ds_cgs = load_uniform_grid(data_cgs, ddims)
-    ds_mks = load_uniform_grid(data_mks, ddims, unit_system="mks")
+    ds_cgs = fake_random_ds(16, fields=fields, units=cgs_units, nprocs=16)
+    ds_mks = fake_random_ds(16, fields=fields, units=mks_units, nprocs=16,
+                            unit_system="mks")
 
     ds_cgs.index
     ds_mks.index
@@ -38,15 +33,15 @@
                 dd_cgs["magnetic_field_y"]**2 +
                 dd_cgs["magnetic_field_z"]**2)/(8.0*np.pi)
     emag_cgs.convert_to_units("dyne/cm**2")
-    
+
     emag_mks = (dd_mks["magnetic_field_x"]**2 +
                 dd_mks["magnetic_field_y"]**2 +
                 dd_mks["magnetic_field_z"]**2)/(2.0*mu_0)
     emag_mks.convert_to_units("Pa")
-    
-    yield assert_array_almost_equal_nulp, emag_cgs, dd_cgs["magnetic_energy"], 2
-    yield assert_array_almost_equal_nulp, emag_mks, dd_mks["magnetic_energy"], 2
-    
+
+    yield assert_almost_equal, emag_cgs, dd_cgs["magnetic_energy"]
+    yield assert_almost_equal, emag_mks, dd_mks["magnetic_energy"]
+
     assert str(emag_cgs.units) == str(dd_cgs["magnetic_energy"].units)
     assert str(emag_mks.units) == str(dd_mks["magnetic_energy"].units)
 


https://bitbucket.org/yt_analysis/yt/commits/4c725d533d68/
Changeset:   4c725d533d68
Branch:      yt
User:        jzuhone
Date:        2015-12-03 22:23:34+00:00
Summary:     Nice __repr__ for UnitSystem
Affected #:  1 file

diff -r f8a57e35dac5d5e295c6693ce1f18193f1d5305b -r 4c725d533d68bfe4e4d153ce8f6510dd39c56079 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -17,15 +17,18 @@
 
 class UnitSystem(object):
     def __init__(self, name, length_unit, mass_unit, time_unit,
-                 temperature_unit, angle_unit, current_mks_unit="A",
+                 temperature_unit, angle_unit, current_mks_unit=None,
                  registry=None):
         self.registry = registry
         self.units_map = {dimensions.length: Unit(length_unit, registry=self.registry),
                           dimensions.mass: Unit(mass_unit, registry=self.registry),
                           dimensions.time: Unit(time_unit, registry=self.registry),
                           dimensions.temperature: Unit(temperature_unit, registry=self.registry),
-                          dimensions.angle: Unit(angle_unit, registry=self.registry),
-                          dimensions.current_mks: Unit(current_mks_unit, registry=self.registry)}
+                          dimensions.angle: Unit(angle_unit, registry=self.registry)}
+        self._keys = ["length","mass","time","temperature","angle"]
+        if current_mks_unit is not None:
+            self.units_map[dimensions.current_mks] = Unit(current_mks_unit, registry=self.registry)
+            self._keys.append("current_mks")
         self.registry = registry
         self.base_units = self.units_map.copy()
         unit_system_registry[name] = self
@@ -33,6 +36,8 @@
 
     def __getitem__(self, key):
         if isinstance(key, string_types):
+            if key not in self._keys:
+                self._keys.append(key)
             key = getattr(dimensions, key)
         if key not in self.units_map:
             dims = key.expand()
@@ -48,39 +53,57 @@
 
     def __setitem__(self, key, value):
         if isinstance(key, string_types):
+            if key not in self._keys:
+                self._keys.append(key)
             key = getattr(dimensions, key)
         self.units_map[key] = Unit(value, registry=self.registry)
 
+    def keys(self):
+        return self._keys
+
     def __str__(self):
         return self.name
 
+    def __repr__(self):
+        repr = "%s Unit System\n" % self.name
+        repr += " Base Units:\n"
+        for dim in self.base_units:
+            repr += "  %s: %s\n" % (str(dim).strip("()"), self.base_units[dim])
+        repr += " Other Units:\n"
+        for key in self.keys():
+            dim = getattr(dimensions, key)
+            if dim not in self.base_units:
+                repr += "  %s: %s\n" % (key, self.units_map[dim])
+        return repr
+
 def create_code_unit_system(ds):
     code_unit_system = UnitSystem(str(ds), "code_length", "code_mass", "code_time",
                                   "code_temperature", "radian", registry=ds.unit_registry)
-    code_unit_system[dimensions.velocity] = "code_velocity"
+    code_unit_system["velocity"] = "code_velocity"
 
 cgs_unit_system = UnitSystem("cgs", "cm", "g", "s", "K", "radian")
-cgs_unit_system[dimensions.energy] = "erg"
-cgs_unit_system[dimensions.specific_energy] = "erg/g"
-cgs_unit_system[dimensions.pressure] = "dyne/cm**2"
-cgs_unit_system[dimensions.force] = "dyne"
-cgs_unit_system[dimensions.magnetic_field_cgs] = "gauss"
+cgs_unit_system["energy"] = "erg"
+cgs_unit_system["specific_energy"] = "erg/g"
+cgs_unit_system["pressure"] = "dyne/cm**2"
+cgs_unit_system["force"] = "dyne"
+cgs_unit_system["magnetic_field_cgs"] = "gauss"
 
 mks_unit_system = UnitSystem("mks", "m", "kg", "s", "K", "radian",
                              current_mks_unit="A")
-mks_unit_system[dimensions.energy] = "J"
-mks_unit_system[dimensions.specific_energy] = "J/kg"
-mks_unit_system[dimensions.pressure] = "Pa"
-mks_unit_system[dimensions.force] = "N"
-mks_unit_system[dimensions.magnetic_field_mks] = "T"
+mks_unit_system["energy"] = "J"
+mks_unit_system["specific_energy"] = "J/kg"
+mks_unit_system["pressure"] = "Pa"
+mks_unit_system["force"] = "N"
+mks_unit_system["magnetic_field_mks"] = "T"
 
 imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", "R", "radian")
-imperial_unit_system[dimensions.force] = "lbf"
-imperial_unit_system[dimensions.energy] = "ft*lbf"
+imperial_unit_system["force"] = "lbf"
+imperial_unit_system["energy"] = "ft*lbf"
+imperial_unit_system["pressure"] = "lbf/ft**2"
 
 galactic_unit_system = UnitSystem("galactic", "kpc", "Msun", "Myr", "K", "radian")
-galactic_unit_system[dimensions.energy] = "keV"
-galactic_unit_system[dimensions.magnetic_field_cgs] = "uG"
+galactic_unit_system["energy"] = "keV"
+galactic_unit_system["magnetic_field_cgs"] = "uG"
 
 geographic_unit_system = UnitSystem("geographic", "mile", "lbm", "s", "K", "deg")
-geographic_unit_system[dimensions.pressure] = "atm"
+geographic_unit_system["pressure"] = "atm"


https://bitbucket.org/yt_analysis/yt/commits/900adbd183bf/
Changeset:   900adbd183bf
Branch:      yt
User:        jzuhone
Date:        2015-12-03 22:25:59+00:00
Summary:     Make this fully private
Affected #:  1 file

diff -r 4c725d533d68bfe4e4d153ce8f6510dd39c56079 -r 900adbd183bfa2f9482a02bbeee10c1c591ea5a6 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -25,10 +25,10 @@
                           dimensions.time: Unit(time_unit, registry=self.registry),
                           dimensions.temperature: Unit(temperature_unit, registry=self.registry),
                           dimensions.angle: Unit(angle_unit, registry=self.registry)}
-        self._keys = ["length","mass","time","temperature","angle"]
+        self._dims = ["length","mass","time","temperature","angle"]
         if current_mks_unit is not None:
             self.units_map[dimensions.current_mks] = Unit(current_mks_unit, registry=self.registry)
-            self._keys.append("current_mks")
+            self._dims.append("current_mks")
         self.registry = registry
         self.base_units = self.units_map.copy()
         unit_system_registry[name] = self
@@ -36,8 +36,8 @@
 
     def __getitem__(self, key):
         if isinstance(key, string_types):
-            if key not in self._keys:
-                self._keys.append(key)
+            if key not in self._dims:
+                self._dims.append(key)
             key = getattr(dimensions, key)
         if key not in self.units_map:
             dims = key.expand()
@@ -53,14 +53,11 @@
 
     def __setitem__(self, key, value):
         if isinstance(key, string_types):
-            if key not in self._keys:
-                self._keys.append(key)
+            if key not in self._dims:
+                self._dims.append(key)
             key = getattr(dimensions, key)
         self.units_map[key] = Unit(value, registry=self.registry)
 
-    def keys(self):
-        return self._keys
-
     def __str__(self):
         return self.name
 
@@ -70,7 +67,7 @@
         for dim in self.base_units:
             repr += "  %s: %s\n" % (str(dim).strip("()"), self.base_units[dim])
         repr += " Other Units:\n"
-        for key in self.keys():
+        for key in self._dims:
             dim = getattr(dimensions, key)
             if dim not in self.base_units:
                 repr += "  %s: %s\n" % (key, self.units_map[dim])


https://bitbucket.org/yt_analysis/yt/commits/b30538a9bfa4/
Changeset:   b30538a9bfa4
Branch:      yt
User:        jzuhone
Date:        2015-12-04 04:53:36+00:00
Summary:     unit systems test
Affected #:  2 files

diff -r 900adbd183bfa2f9482a02bbeee10c1c591ea5a6 -r b30538a9bfa4d97894248ce3c1a9d522bf8057f1 yt/fields/tests/test_magnetic_fields.py
--- a/yt/fields/tests/test_magnetic_fields.py
+++ b/yt/fields/tests/test_magnetic_fields.py
@@ -12,8 +12,8 @@
     cgs_units = ("gauss",)*3
     mks_units = ("T",)*3
 
-    ds_cgs = fake_random_ds(16, fields=fields, units=cgs_units, nprocs=16)
-    ds_mks = fake_random_ds(16, fields=fields, units=mks_units, nprocs=16,
+    ds_cgs = fake_random_ds(16, fields=fields, units=cgs_units, nprocs=4)
+    ds_mks = fake_random_ds(16, fields=fields, units=mks_units, nprocs=4,
                             unit_system="mks")
 
     ds_cgs.index

diff -r 900adbd183bfa2f9482a02bbeee10c1c591ea5a6 -r b30538a9bfa4d97894248ce3c1a9d522bf8057f1 yt/units/tests/test_unit_systems.py
--- /dev/null
+++ b/yt/units/tests/test_unit_systems.py
@@ -0,0 +1,61 @@
+"""
+Test unit systems. 
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.units.unit_object import unit_system_registry
+from yt.convenience import load
+from yt.testing import fake_random_ds, assert_almost_equal, requires_file
+from yt.config import ytcfg
+import numpy as np
+
+def test_unit_systems():
+    pass
+
+gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+ at requires_file(gslr)
+def test_fields_diff_systems():
+    ytcfg["yt","skip_dataset_cache"] = "True"
+    test_fields = ["density",
+                   "kinetic_energy",
+                   "velocity_divergence",
+                   "density_gradient_x",
+                   "velocity_magnitude"]
+
+    test_units = {}
+    test_units["mks"] = {"density":"kg/m**3",
+                         "kinetic_energy":"Pa",
+                         "velocity_magnitude":"m/s",
+                         "velocity_divergence":"1/s",
+                         "density_gradient_x":"kg/m**4"}
+    test_units["imperial"] = {"density":"lbm/ft**3",
+                              "kinetic_energy":"lbf/ft**2",
+                              "velocity_magnitude":"ft/s",
+                              "velocity_divergence":"1/s",
+                              "density_gradient_x":"lbm/ft**4"}
+    test_units["galactic"] = {"density":"Msun/kpc**3",
+                              "kinetic_energy":"Msun/(Myr**2*kpc)",
+                              "velocity_magnitude":"kpc/Myr",
+                              "velocity_divergence":"1/Myr",
+                              "density_gradient_x":"Msun/kpc**4"}
+
+    ds_cgs = load(gslr)
+    dd_cgs = ds_cgs.sphere("c", (100., "kpc"))
+
+    for us in test_units:
+        ds = load(gslr, unit_system=us)
+        dd = ds.sphere("c", (100.,"kpc"))
+        for field in test_fields:
+            v1 = dd_cgs[field].in_base(us)
+            v2 = dd[field]
+            print(field)
+            assert_almost_equal(v1.v, v2.v)
+            assert str(v2.units) == test_units[us][field]


https://bitbucket.org/yt_analysis/yt/commits/96f3696ec341/
Changeset:   96f3696ec341
Branch:      yt
User:        jzuhone
Date:        2015-12-04 06:11:50+00:00
Summary:     more unit system testing
Affected #:  1 file

diff -r b30538a9bfa4d97894248ce3c1a9d522bf8057f1 -r 96f3696ec34113d735c629d83b7e2c17268e87d9 yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -11,14 +11,24 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.units.unit_object import unit_system_registry
+from yt.units.unit_object import Unit, unit_system_registry
+from yt.units.unit_systems import UnitSystem
+from yt.units import dimensions
 from yt.convenience import load
-from yt.testing import fake_random_ds, assert_almost_equal, requires_file
+from yt.testing import assert_almost_equal, requires_file
 from yt.config import ytcfg
 import numpy as np
 
 def test_unit_systems():
-    pass
+    goofy_unit_system = UnitSystem("goofy", "ly", "lbm", "hr", "R",
+                                   "arcsec", current_mks_unit="mA")
+    assert goofy_unit_system["temperature"] == Unit("R")
+    assert goofy_unit_system[dimensions.solid_angle] == Unit("arcsec**2")
+    assert goofy_unit_system["energy"] == Unit("lbm*ly**2/hr**2")
+    goofy_unit_system["energy"] = "eV"
+    assert goofy_unit_system["energy"] == Unit("eV")
+    assert goofy_unit_system["magnetic_field_mks"] == Unit("lbm/(hr**2*mA)")
+    assert "goofy" in unit_system_registry
 
 gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
 @requires_file(gslr)
@@ -56,6 +66,5 @@
         for field in test_fields:
             v1 = dd_cgs[field].in_base(us)
             v2 = dd[field]
-            print(field)
             assert_almost_equal(v1.v, v2.v)
-            assert str(v2.units) == test_units[us][field]
+            assert str(v2.units) == test_units[us][field]
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/5240fa6aef26/
Changeset:   5240fa6aef26
Branch:      yt
User:        jzuhone
Date:        2015-12-06 00:44:34+00:00
Summary:     Test code units too
Affected #:  2 files

diff -r 96f3696ec34113d735c629d83b7e2c17268e87d9 -r 5240fa6aef26331fcc7650691fedc70f61da5f11 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -41,7 +41,8 @@
     if (ftype,"magnetic_field_x") not in registry.ds.field_info:
         return
 
-    u = Unit(registry.ds.field_info[ftype,"magnetic_field_x"].units)
+    u = Unit(registry.ds.field_info[ftype,"magnetic_field_x"].units,
+             registry=registry.ds.unit_registry)
     mag_dims = u.dimensions
 
     def _magnetic_field_strength(field,data):

diff -r 96f3696ec34113d735c629d83b7e2c17268e87d9 -r 5240fa6aef26331fcc7650691fedc70f61da5f11 yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -56,6 +56,11 @@
                               "velocity_magnitude":"kpc/Myr",
                               "velocity_divergence":"1/Myr",
                               "density_gradient_x":"Msun/kpc**4"}
+    test_units["code"] = {"density":"code_mass/code_length**3",
+                          "kinetic_energy":"code_mass/(code_length*code_time**2)",
+                          "velocity_magnitude":"code_velocity",
+                          "velocity_divergence":"code_velocity/code_length",
+                          "density_gradient_x":"code_mass/code_length**4"}
 
     ds_cgs = load(gslr)
     dd_cgs = ds_cgs.sphere("c", (100., "kpc"))
@@ -64,7 +69,10 @@
         ds = load(gslr, unit_system=us)
         dd = ds.sphere("c", (100.,"kpc"))
         for field in test_fields:
-            v1 = dd_cgs[field].in_base(us)
+            if us == "code":
+                v1 = dd_cgs[field]
+            else:
+                v1 = dd_cgs[field].in_base(us)
             v2 = dd[field]
             assert_almost_equal(v1.v, v2.v)
             assert str(v2.units) == test_units[us][field]
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/c0c482412129/
Changeset:   c0c482412129
Branch:      yt
User:        jzuhone
Date:        2015-12-06 00:51:43+00:00
Summary:     Someone will add this later
Affected #:  1 file

diff -r 5240fa6aef26331fcc7650691fedc70f61da5f11 -r c0c4824121293fda008c85566f83093e8087dcd6 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -100,7 +100,4 @@
 
 galactic_unit_system = UnitSystem("galactic", "kpc", "Msun", "Myr", "K", "radian")
 galactic_unit_system["energy"] = "keV"
-galactic_unit_system["magnetic_field_cgs"] = "uG"
-
-geographic_unit_system = UnitSystem("geographic", "mile", "lbm", "s", "K", "deg")
-geographic_unit_system["pressure"] = "atm"
+galactic_unit_system["magnetic_field_cgs"] = "uG"
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/58ad89b3d1a2/
Changeset:   58ad89b3d1a2
Branch:      yt
User:        jzuhone
Date:        2015-12-06 00:53:32+00:00
Summary:     Possibly useful
Affected #:  1 file

diff -r c0c4824121293fda008c85566f83093e8087dcd6 -r 58ad89b3d1a26aaf17a5dc52e1032f23cff07c78 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -100,4 +100,6 @@
 
 galactic_unit_system = UnitSystem("galactic", "kpc", "Msun", "Myr", "K", "radian")
 galactic_unit_system["energy"] = "keV"
-galactic_unit_system["magnetic_field_cgs"] = "uG"
\ No newline at end of file
+galactic_unit_system["magnetic_field_cgs"] = "uG"
+
+solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr", "K", "radian")
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/116cc3840888/
Changeset:   116cc3840888
Branch:      yt
User:        jzuhone
Date:        2015-12-06 04:09:13+00:00
Summary:     Only allow strings to go in here
Affected #:  1 file

diff -r 58ad89b3d1a26aaf17a5dc52e1032f23cff07c78 -r 116cc384088897a62fe62ffbc3cdc31251ea84bd yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -459,7 +459,7 @@
 
         Parameters
         ----------
-        unit_system : UnitRegistry object or string, optional
+        unit_system : string, optional
             The unit system to be used in the conversion. If not specified,
             the default base units are used.
 
@@ -468,8 +468,7 @@
         >>> E = YTQuantity(2.5, "erg/s")
         >>> E.convert_to_base(unit_system="galactic")
         """
-        if isinstance(unit_system, string_types):
-            unit_system = unit_system_registry[unit_system]
+        unit_system = unit_system_registry[unit_system]
         return self.convert_to_units(self.units.get_base_equivalent(unit_system=unit_system))
 
     def convert_to_cgs(self):
@@ -527,7 +526,7 @@
 
         Parameters
         ----------
-        unit_system : UnitRegistry object or string, optional
+        unit_system : string, optional
             The unit system to be used in the conversion. If not specified,
             the default base units are used.
 
@@ -536,8 +535,7 @@
         >>> E = YTQuantity(2.5, "erg/s")
         >>> E_new = E.in_base(unit_system="galactic")
         """
-        if isinstance(unit_system, string_types):
-            unit_system = unit_system_registry[unit_system]
+        unit_system = unit_system_registry[unit_system]
         return self.in_units(self.units.get_base_equivalent(unit_system=unit_system))
 
     def in_cgs(self):


https://bitbucket.org/yt_analysis/yt/commits/969fddef35ea/
Changeset:   969fddef35ea
Branch:      yt
User:        jzuhone
Date:        2015-12-06 06:21:31+00:00
Summary:     Wrong label
Affected #:  1 file

diff -r 116cc384088897a62fe62ffbc3cdc31251ea84bd -r 969fddef35ea318eb85079def253d92ab22e4ee6 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -137,7 +137,7 @@
     # Planck units
     "m_pl": (planck_mass_grams, dimensions.mass, 0.0, r"m_{\rm{P}}"),
     "l_pl": (planck_length_cm, dimensions.length, 0.0, r"\ell_\rm{P}"),
-    "t_pl": (planck_time_s, dimensions.time, 0.0, r"r_{\rm{P}}"),
+    "t_pl": (planck_time_s, dimensions.time, 0.0, r"t_{\rm{P}}"),
     "T_pl": (planck_temperature_K, dimensions.temperature, 0.0, r"T_{\rm{P}}"),
     "q_pl": (planck_charge_esu, dimensions.charge_cgs, 0.0, r"q_{\rm{P}}"),
     "E_pl": (planck_energy_erg, dimensions.energy, 0.0, r"E_{\rm{P}}"),


https://bitbucket.org/yt_analysis/yt/commits/34341cab296e/
Changeset:   34341cab296e
Branch:      yt
User:        jzuhone
Date:        2015-12-06 06:31:55+00:00
Summary:     fixing example
Affected #:  1 file

diff -r 969fddef35ea318eb85079def253d92ab22e4ee6 -r 34341cab296e9a959cfaf84338237f3d0a7fdad8 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1516,9 +1516,9 @@
     Examples
     --------
     >>> sp = ds.sphere("c", (100,"kpc"))
-    >>> a = sphere["density"]
-    >>> b = sphere["temperature"]
-    >>> c = sphere["velocity_x"]
+    >>> a = sp["density"]
+    >>> b = sp["temperature"]
+    >>> c = sp["velocity_x"]
     >>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
     """
     if not isinstance(arrays, list):


https://bitbucket.org/yt_analysis/yt/commits/1ebf9ac926fc/
Changeset:   1ebf9ac926fc
Branch:      yt
User:        jzuhone
Date:        2015-12-06 06:35:10+00:00
Summary:     Don't need this
Affected #:  1 file

diff -r 34341cab296e9a959cfaf84338237f3d0a7fdad8 -r 1ebf9ac926fc9c9ea8a601e9c1a0c94cb4d0fe76 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -16,7 +16,6 @@
 import copy
 import numpy as np
 
-from yt.extern.six import string_types
 from functools import wraps
 from numpy import \
     add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \


https://bitbucket.org/yt_analysis/yt/commits/aa4f97fdf579/
Changeset:   aa4f97fdf579
Branch:      yt
User:        jzuhone
Date:        2015-12-08 19:17:30+00:00
Summary:     Make all of the unit conversion methods take strings for the unit_system argument. Make the default "cgs", not None.
Affected #:  4 files

diff -r 1ebf9ac926fc9c9ea8a601e9c1a0c94cb4d0fe76 -r aa4f97fdf5790d466683ab6c011a3f1fa3ea2050 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -85,7 +85,7 @@
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \
                 units not in skip_output_units:
                 u = Unit(units, registry = self.ds.unit_registry)
-                output_units = str(u.get_base_equivalent(unit_system=self.ds.unit_system))
+                output_units = str(u.get_base_equivalent(self.ds.unit_system.name))
             else:
                 output_units = units
             if (ptype, f) not in self.field_list:
@@ -283,7 +283,7 @@
             # as well.
             u = Unit(self[original_name].units,
                       registry = self.ds.unit_registry)
-            units = str(u.get_base_equivalent(unit_system=self.ds.unit_system))
+            units = str(u.get_base_equivalent(self.ds.unit_system.name))
         self.field_aliases[alias_name] = original_name
         self.add_field(alias_name,
             function = TranslationFunc(original_name),

diff -r 1ebf9ac926fc9c9ea8a601e9c1a0c94cb4d0fe76 -r aa4f97fdf5790d466683ab6c011a3f1fa3ea2050 yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -70,6 +70,7 @@
         dd = ds.sphere("c", (100.,"kpc"))
         for field in test_fields:
             if us == "code":
+                # For this dataset code units are cgs
                 v1 = dd_cgs[field]
             else:
                 v1 = dd_cgs[field].in_base(us)

diff -r 1ebf9ac926fc9c9ea8a601e9c1a0c94cb4d0fe76 -r aa4f97fdf5790d466683ab6c011a3f1fa3ea2050 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -410,16 +410,17 @@
             units.append("(%s)%s" % (unit_string, power_string))
         return " * ".join(units)
 
-    def get_base_equivalent(self, unit_system=None):
+    def get_base_equivalent(self, unit_system="cgs"):
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
         yt_base_unit_string = self._get_system_unit_string(default_base_units)
         yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
                             dimensions=self.dimensions, registry=self.registry)
-        if unit_system is None:
+        if unit_system == "cgs":
             return yt_base_unit
         else:
+            unit_system = unit_system_registry[unit_system]
             units_string = self._get_system_unit_string(unit_system.base_units)
             u = Unit(units_string, registry=self.registry)
             base_value = get_conversion_factor(self, yt_base_unit)[0]
@@ -433,17 +434,13 @@
         """
         if current_mks in self.dimensions.free_symbols:
             raise YTUnitsNotReducible(self, "cgs")
-        # Note that the following call only works because the default base units
-        # are simply cgs units plus Amperes. If that is no longer the case, we will
-        # then need to pass "unit_system=unit_system_registry['cgs']" to the
-        # get_base_equivalent call here.
-        return self.get_base_equivalent()
+        return self.get_base_equivalent(unit_system="cgs")
 
     def get_mks_equivalent(self):
         """
         Create and return dimensionally-equivalent mks units.
         """
-        return self.get_base_equivalent(unit_system=unit_system_registry["mks"])
+        return self.get_base_equivalent(unit_system="mks")
 
     def get_conversion_factor(self, other_units):
         return get_conversion_factor(self, other_units)

diff -r 1ebf9ac926fc9c9ea8a601e9c1a0c94cb4d0fe76 -r aa4f97fdf5790d466683ab6c011a3f1fa3ea2050 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -451,7 +451,7 @@
 
         return self
 
-    def convert_to_base(self, unit_system=None):
+    def convert_to_base(self, unit_system="cgs"):
         """
         Convert the array and units to the equivalent base units in
         the specified unit system.
@@ -460,15 +460,14 @@
         ----------
         unit_system : string, optional
             The unit system to be used in the conversion. If not specified,
-            the default base units are used.
+            the default base units of cgs are used.
 
         Examples
         --------
         >>> E = YTQuantity(2.5, "erg/s")
         >>> E.convert_to_base(unit_system="galactic")
         """
-        unit_system = unit_system_registry[unit_system]
-        return self.convert_to_units(self.units.get_base_equivalent(unit_system=unit_system))
+        return self.convert_to_units(self.units.get_base_equivalent(unit_system))
 
     def convert_to_cgs(self):
         """
@@ -518,7 +517,7 @@
         """
         return self.in_units(units)
 
-    def in_base(self, unit_system=None):
+    def in_base(self, unit_system="cgs"):
         """
         Creates a copy of this array with the data in the specified unit system,
         and returns it in that system's base units.
@@ -527,15 +526,14 @@
         ----------
         unit_system : string, optional
             The unit system to be used in the conversion. If not specified,
-            the default base units are used.
+            the default base units of cgs are used.
 
         Examples
         --------
         >>> E = YTQuantity(2.5, "erg/s")
         >>> E_new = E.in_base(unit_system="galactic")
         """
-        unit_system = unit_system_registry[unit_system]
-        return self.in_units(self.units.get_base_equivalent(unit_system=unit_system))
+        return self.in_units(self.units.get_base_equivalent(unit_system))
 
     def in_cgs(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/7354c2f05a51/
Changeset:   7354c2f05a51
Branch:      yt
User:        jzuhone
Date:        2015-12-08 21:31:26+00:00
Summary:     Make sure the self-created units pick up the registry
Affected #:  1 file

diff -r aa4f97fdf5790d466683ab6c011a3f1fa3ea2050 -r 7354c2f05a516d72c9c35f6b935cfc0041c3b01f yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -41,7 +41,7 @@
             key = getattr(dimensions, key)
         if key not in self.units_map:
             dims = key.expand()
-            units = Unit("")
+            units = Unit("", registry=self.registry)
             for factor in dims.as_ordered_factors():
                 dim = list(factor.free_symbols)[0]
                 u = self.units_map[dim]


https://bitbucket.org/yt_analysis/yt/commits/03872e03e3a2/
Changeset:   03872e03e3a2
Branch:      yt
User:        jzuhone
Date:        2015-12-08 21:32:45+00:00
Summary:     Re-doing how we do magnetic field aliases so we can go between MKS-like and cgs-like on the fly
Affected #:  4 files

diff -r 7354c2f05a516d72c9c35f6b935cfc0041c3b01f -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -31,19 +31,15 @@
 
 mag_factors = {dimensions.magnetic_field_cgs: 4.0*np.pi,
                dimensions.magnetic_field_mks: mu_0}
-mag_units = {dimensions.magnetic_field_cgs: "gauss",
-             dimensions.magnetic_field_mks: "T"}
 
 @register_field_plugin
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):
     unit_system = registry.ds.unit_system
 
-    if (ftype,"magnetic_field_x") not in registry.ds.field_info:
+    if (ftype,"magnetic_field_x") not in registry:
         return
 
-    u = Unit(registry.ds.field_info[ftype,"magnetic_field_x"].units,
-             registry=registry.ds.unit_registry)
-    mag_dims = u.dimensions
+    u = registry[ftype,"magnetic_field_x"].units
 
     def _magnetic_field_strength(field,data):
         B2 = (data[ftype,"magnetic_field_x"]**2 +
@@ -52,7 +48,7 @@
         return np.sqrt(B2)
     registry.add_field((ftype,"magnetic_field_strength"),
                        function=_magnetic_field_strength,
-                       units = unit_system[mag_dims])
+                       units=u)
 
     def _magnetic_energy(field, data):
         B = data[ftype,"magnetic_field_strength"]
@@ -81,16 +77,15 @@
                      data[ftype,'magnetic_field_y'],
                      data[ftype,'magnetic_field_z']],
                      d.units)
-        
+
         theta = data["index", 'spherical_theta']
         phi   = data["index", 'spherical_phi']
-        
+
         return get_sph_theta_component(Bfields, theta, phi, normal)
 
     registry.add_field((ftype, "magnetic_field_poloidal"),
              function=_magnetic_field_poloidal,
-             units=unit_system[mag_dims],
-             validators=[ValidateParameter("normal")])
+             units=u, validators=[ValidateParameter("normal")])
 
     def _magnetic_field_toroidal(field,data):
         normal = data.get_field_parameter("normal")
@@ -106,8 +101,7 @@
 
     registry.add_field((ftype, "magnetic_field_toroidal"),
              function=_magnetic_field_toroidal,
-             units=unit_system[mag_dims],
-             validators=[ValidateParameter("normal")])
+             units=u, validators=[ValidateParameter("normal")])
 
     def _alfven_speed(field,data):
         B = data[ftype,'magnetic_field_strength']
@@ -120,3 +114,25 @@
     registry.add_field((ftype, "mach_alfven"), function=_mach_alfven,
                        units="dimensionless")
 
+def setup_magnetic_field_aliases(registry, ds_fields, ftype="gas"):
+    unit_system = registry.ds.unit_system
+    from_units = Unit(registry[ds_fields[0]].units,
+                      registry=registry.ds.unit_registry)
+    if dimensions.current_mks in unit_system.base_units:
+        to_units = unit_system["magnetic_field_mks"]
+        equiv = "SI"
+    else:
+        to_units = unit_system["magnetic_field_cgs"]
+        equiv = "CGS"
+    if from_units.dimensions == to_units.dimensions:
+        convert = lambda x: x.in_units(to_units)
+    else:
+        convert = lambda x: x.to_equivalent(to_units, equiv)
+    def mag_field(ax, fd):
+        def _mag_field(field, data):
+            return convert(data[fd])
+        return _mag_field
+    for ax, fd in zip("xyz", ds_fields):
+        registry.add_field((ftype,"magnetic_field_%s" % ax),
+                           function=mag_field(ax, fd),
+                           units=unit_system[to_units.dimensions])
\ No newline at end of file

diff -r 7354c2f05a516d72c9c35f6b935cfc0041c3b01f -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -31,9 +31,9 @@
 class AthenaFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", ("code_mass/code_length**3", ["density"], None)),
-        ("cell_centered_B_x", (b_units, ["magnetic_field_x"], None)),
-        ("cell_centered_B_y", (b_units, ["magnetic_field_y"], None)),
-        ("cell_centered_B_z", (b_units, ["magnetic_field_z"], None)),
+        ("cell_centered_B_x", (b_units, [], None)),
+        ("cell_centered_B_y", (b_units, [], None)),
+        ("cell_centered_B_z", (b_units, [], None)),
     )
 
 # In Athena, conservative or primitive variables may be written out.
@@ -42,6 +42,8 @@
 # case that the file contains the conservative ones.
 
     def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
         unit_system = self.ds.unit_system
         # Add velocity fields
         for comp in "xyz":
@@ -123,3 +125,7 @@
             return mu*mh*data["gas","pressure"]/data["gas","density"]/kboltz
         self.add_field(("gas","temperature"), function=_temperature,
                        units=unit_system["temperature"])
+
+        setup_magnetic_field_aliases(self, [("athena","cell_centered_B_%s" % ax) for ax in "xyz"])
+
+

diff -r 7354c2f05a516d72c9c35f6b935cfc0041c3b01f -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -57,9 +57,9 @@
         ("HeI_kph", ("1/code_time", [], None)),
         ("HeII_kph", ("1/code_time", [], None)),
         ("H2I_kdiss", ("1/code_time", [], None)),
-        ("Bx", (b_units, ["magnetic_field_x"], None)),
-        ("By", (b_units, ["magnetic_field_y"], None)),
-        ("Bz", (b_units, ["magnetic_field_z"], None)),
+        ("Bx", (b_units, [], None)),
+        ("By", (b_units, [], None)),
+        ("Bz", (b_units, [], None)),
         ("RadAccel1", (ra_units, ["radiation_acceleration_x"], None)),
         ("RadAccel2", (ra_units, ["radiation_acceleration_y"], None)),
         ("RadAccel3", (ra_units, ["radiation_acceleration_z"], None)),
@@ -147,6 +147,8 @@
         self.species_names.sort()  # bb #1059
 
     def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
         # Now we conditionally load a few other things.
         params = self.ds.parameters
         multi_species = params.get("MultiSpecies", None)
@@ -156,6 +158,7 @@
         if multi_species > 0 or dengo == 1:
             self.setup_species_fields()
         self.setup_energy_field()
+        setup_magnetic_field_aliases(self, [("enzo","B%s" % ax) for ax in "xyz"])
 
     def setup_energy_field(self):
         unit_system = self.ds.unit_system

diff -r 7354c2f05a516d72c9c35f6b935cfc0041c3b01f -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -79,9 +79,9 @@
         ("targ", ("", [], "Target Material Fraction")),
         ("sumy", ("", [], None)),
         ("mgdc", ("", [], "Emission Minus Absorption Diffusion Terms")),
-        ("magx", (b_units, ["magnetic_field_x"], "B_x")),
-        ("magy", (b_units, ["magnetic_field_y"], "B_y")),
-        ("magz", (b_units, ["magnetic_field_z"], "B_z")),
+        ("magx", (b_units, [], "B_x")),
+        ("magy", (b_units, [], "B_y")),
+        ("magz", (b_units, [], "B_z")),
     )
 
     known_particle_fields = (
@@ -96,6 +96,8 @@
     )
 
     def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
         unit_system = self.ds.unit_system
         for i in range(1, 1000):
             self.add_output_field(("flash", "r{0:03}".format(i)), 
@@ -160,4 +162,6 @@
         self.add_field(("gas","number_density"), function=_number_density,
                        units=unit_system["number_density"])
 
+        setup_magnetic_field_aliases(self, [("flash","mag%s" % ax) for ax in "xyz"])
 
+


https://bitbucket.org/yt_analysis/yt/commits/121bfa21899c/
Changeset:   121bfa21899c
Branch:      yt
User:        jzuhone
Date:        2015-12-08 23:03:50+00:00
Summary:     Simpler way of handling magnetic field aliases than my first attempt. adding some more frontends.
Affected #:  6 files

diff -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb -r 121bfa21899cad8717020c434e5bf7bb8c14bcd8 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -116,6 +116,7 @@
 
 def setup_magnetic_field_aliases(registry, ds_fields, ftype="gas"):
     unit_system = registry.ds.unit_system
+    ds_fields = [(registry.ds.dataset_type, fd) for fd in ds_fields]
     from_units = Unit(registry[ds_fields[0]].units,
                       registry=registry.ds.unit_registry)
     if dimensions.current_mks in unit_system.base_units:
@@ -134,5 +135,5 @@
         return _mag_field
     for ax, fd in zip("xyz", ds_fields):
         registry.add_field((ftype,"magnetic_field_%s" % ax),
-                           function=mag_field(ax, fd),
+                           function=mag_field(fd),
                            units=unit_system[to_units.dimensions])
\ No newline at end of file

diff -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb -r 121bfa21899cad8717020c434e5bf7bb8c14bcd8 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -126,6 +126,6 @@
         self.add_field(("gas","temperature"), function=_temperature,
                        units=unit_system["temperature"])
 
-        setup_magnetic_field_aliases(self, [("athena","cell_centered_B_%s" % ax) for ax in "xyz"])
+        setup_magnetic_field_aliases(self, ["cell_centered_B_%s" % ax for ax in "xyz"])
 
 

diff -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb -r 121bfa21899cad8717020c434e5bf7bb8c14bcd8 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -48,9 +48,9 @@
         ("Y-momentum", (mom_units, ["momentum_y"], None)),
         ("Z-momentum", (mom_units, ["momentum_z"], None)),
         ("temperature", ("K", ["temperature"], None)),
-        ("X-magnfield", (b_units, ["magnetic_field_x"], None)),
-        ("Y-magnfield", (b_units, ["magnetic_field_y"], None)),
-        ("Z-magnfield", (b_units, ["magnetic_field_z"], None)),
+        ("X-magnfield", (b_units, [], None)),
+        ("Y-magnfield", (b_units, [], None)),
+        ("Z-magnfield", (b_units, [], None)),
         ("directrad-dedt-density", (eden_units, ["directrad-dedt-density"], None)),
         ("directrad-dpxdt-density", (mom_units, ["directrad-dpxdt-density"], None)),
         ("directrad-dpydt-density", (mom_units, ["directrad-dpydt-density"], None)),
@@ -79,6 +79,8 @@
     )
 
     def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
         unit_system = self.ds.unit_system
         def _thermal_energy_density(field, data):
             try:
@@ -146,6 +148,8 @@
         self.add_field(("gas", "temperature"), function=_temperature,
                        units=unit_system["temperature"])
 
+        setup_magnetic_field_aliases(self, ["%s-magnfield" % ax for ax in "XYZ"])
+
 
 class ChomboPICFieldInfo3D(FieldInfoContainer):
     known_other_fields = (
@@ -299,9 +303,15 @@
         ("vx1", (vel_units, ["velocity_x"], None)),
         ("vx2", (vel_units, ["velocity_y"], None)),
         ("vx3", (vel_units, ["velocity_z"], None)),
-        ("bx1", (b_units, ["magnetic_field_x"], None)),
-        ("bx2", (b_units, ["magnetic_field_y"], None)),
-        ("bx3", (b_units, ["magnetic_field_z"], None)),
+        ("bx1", (b_units, [], None)),
+        ("bx2", (b_units, [], None)),
+        ("bx3", (b_units, [], None)),
     )
 
     known_particle_fields = ()
+
+    def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
+        setup_magnetic_field_aliases(self, ["bx%s" % ax for ax in [1,2,3]])
+

diff -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb -r 121bfa21899cad8717020c434e5bf7bb8c14bcd8 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -158,7 +158,7 @@
         if multi_species > 0 or dengo == 1:
             self.setup_species_fields()
         self.setup_energy_field()
-        setup_magnetic_field_aliases(self, [("enzo","B%s" % ax) for ax in "xyz"])
+        setup_magnetic_field_aliases(self, ["B%s" % ax for ax in "xyz"])
 
     def setup_energy_field(self):
         unit_system = self.ds.unit_system

diff -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb -r 121bfa21899cad8717020c434e5bf7bb8c14bcd8 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -162,6 +162,6 @@
         self.add_field(("gas","number_density"), function=_number_density,
                        units=unit_system["number_density"])
 
-        setup_magnetic_field_aliases(self, [("flash","mag%s" % ax) for ax in "xyz"])
+        setup_magnetic_field_aliases(self, ["mag%s" % ax for ax in "xyz"])
 
 

diff -r 03872e03e3a2c284056de13e00e2ea3a1d9b5ccb -r 121bfa21899cad8717020c434e5bf7bb8c14bcd8 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -33,3 +33,8 @@
         ("mag_field_z", ("gauss", ["magnetic_field_z"], None)),
     )
     known_particle_fields = ()
+
+    def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
+        setup_magnetic_field_aliases(self, ["magnetic_field_%s" % ax for ax in "xyz"])


https://bitbucket.org/yt_analysis/yt/commits/c1a8558b90ee/
Changeset:   c1a8558b90ee
Branch:      yt
User:        jzuhone
Date:        2015-12-08 23:10:46+00:00
Summary:     Adding new B-field alias stuff to the stream frontend.
Affected #:  1 file

diff -r 121bfa21899cad8717020c434e5bf7bb8c14bcd8 -r c1a8558b90eef428a9de917fda5fc5492473fe55 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -70,9 +70,12 @@
     )
 
     def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
         for field in self.ds.stream_handler.field_units:
             units = self.ds.stream_handler.field_units[field]
             if units != '': self.add_output_field(field, units=units)
+        setup_magnetic_field_aliases(self, ["magnetic_field_%s" % ax for ax in "xyz"])
 
     def add_output_field(self, name, **kwargs):
         if name in self.ds.stream_handler.field_units:


https://bitbucket.org/yt_analysis/yt/commits/3e1c1e723b95/
Changeset:   3e1c1e723b95
Branch:      yt
User:        jzuhone
Date:        2015-12-08 23:16:35+00:00
Summary:     Just pass in the correct ftype
Affected #:  7 files

diff -r c1a8558b90eef428a9de917fda5fc5492473fe55 -r 3e1c1e723b95b62ece77f35365535df623c838b6 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -114,9 +114,9 @@
     registry.add_field((ftype, "mach_alfven"), function=_mach_alfven,
                        units="dimensionless")
 
-def setup_magnetic_field_aliases(registry, ds_fields, ftype="gas"):
+def setup_magnetic_field_aliases(registry, ds_ftype, ds_fields, ftype="gas"):
     unit_system = registry.ds.unit_system
-    ds_fields = [(registry.ds.dataset_type, fd) for fd in ds_fields]
+    ds_fields = [(ds_ftype, fd) for fd in ds_fields]
     from_units = Unit(registry[ds_fields[0]].units,
                       registry=registry.ds.unit_registry)
     if dimensions.current_mks in unit_system.base_units:

diff -r c1a8558b90eef428a9de917fda5fc5492473fe55 -r 3e1c1e723b95b62ece77f35365535df623c838b6 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -126,6 +126,6 @@
         self.add_field(("gas","temperature"), function=_temperature,
                        units=unit_system["temperature"])
 
-        setup_magnetic_field_aliases(self, ["cell_centered_B_%s" % ax for ax in "xyz"])
+        setup_magnetic_field_aliases(self, "athena", ["cell_centered_B_%s" % ax for ax in "xyz"])
 
 

diff -r c1a8558b90eef428a9de917fda5fc5492473fe55 -r 3e1c1e723b95b62ece77f35365535df623c838b6 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -148,7 +148,7 @@
         self.add_field(("gas", "temperature"), function=_temperature,
                        units=unit_system["temperature"])
 
-        setup_magnetic_field_aliases(self, ["%s-magnfield" % ax for ax in "XYZ"])
+        setup_magnetic_field_aliases(self, "chombo", ["%s-magnfield" % ax for ax in "XYZ"])
 
 
 class ChomboPICFieldInfo3D(FieldInfoContainer):
@@ -313,5 +313,5 @@
     def setup_fluid_fields(self):
         from yt.fields.magnetic_field import \
             setup_magnetic_field_aliases
-        setup_magnetic_field_aliases(self, ["bx%s" % ax for ax in [1,2,3]])
+        setup_magnetic_field_aliases(self, "chombo", ["bx%s" % ax for ax in [1,2,3]])
 

diff -r c1a8558b90eef428a9de917fda5fc5492473fe55 -r 3e1c1e723b95b62ece77f35365535df623c838b6 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -158,7 +158,7 @@
         if multi_species > 0 or dengo == 1:
             self.setup_species_fields()
         self.setup_energy_field()
-        setup_magnetic_field_aliases(self, ["B%s" % ax for ax in "xyz"])
+        setup_magnetic_field_aliases(self, "enzo", ["B%s" % ax for ax in "xyz"])
 
     def setup_energy_field(self):
         unit_system = self.ds.unit_system

diff -r c1a8558b90eef428a9de917fda5fc5492473fe55 -r 3e1c1e723b95b62ece77f35365535df623c838b6 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -162,6 +162,6 @@
         self.add_field(("gas","number_density"), function=_number_density,
                        units=unit_system["number_density"])
 
-        setup_magnetic_field_aliases(self, ["mag%s" % ax for ax in "xyz"])
+        setup_magnetic_field_aliases(self, "flash", ["mag%s" % ax for ax in "xyz"])
 
 

diff -r c1a8558b90eef428a9de917fda5fc5492473fe55 -r 3e1c1e723b95b62ece77f35365535df623c838b6 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -37,4 +37,4 @@
     def setup_fluid_fields(self):
         from yt.fields.magnetic_field import \
             setup_magnetic_field_aliases
-        setup_magnetic_field_aliases(self, ["magnetic_field_%s" % ax for ax in "xyz"])
+        setup_magnetic_field_aliases(self, "gdf", ["magnetic_field_%s" % ax for ax in "xyz"])

diff -r c1a8558b90eef428a9de917fda5fc5492473fe55 -r 3e1c1e723b95b62ece77f35365535df623c838b6 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -75,7 +75,7 @@
         for field in self.ds.stream_handler.field_units:
             units = self.ds.stream_handler.field_units[field]
             if units != '': self.add_output_field(field, units=units)
-        setup_magnetic_field_aliases(self, ["magnetic_field_%s" % ax for ax in "xyz"])
+        setup_magnetic_field_aliases(self, "stream", ["magnetic_field_%s" % ax for ax in "xyz"])
 
     def add_output_field(self, name, **kwargs):
         if name in self.ds.stream_handler.field_units:


https://bitbucket.org/yt_analysis/yt/commits/9298b1c17256/
Changeset:   9298b1c17256
Branch:      yt
User:        jzuhone
Date:        2015-12-08 23:22:46+00:00
Summary:     Bugfix
Affected #:  1 file

diff -r 3e1c1e723b95b62ece77f35365535df623c838b6 -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -129,7 +129,7 @@
         convert = lambda x: x.in_units(to_units)
     else:
         convert = lambda x: x.to_equivalent(to_units, equiv)
-    def mag_field(ax, fd):
+    def mag_field(fd):
         def _mag_field(field, data):
             return convert(data[fd])
         return _mag_field


https://bitbucket.org/yt_analysis/yt/commits/b660d9a31e92/
Changeset:   b660d9a31e92
Branch:      yt
User:        jzuhone
Date:        2015-12-09 04:43:50+00:00
Summary:     Merge
Affected #:  28 files

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -11,8 +11,8 @@
 with the path length of the ray through the cell.  Line profiles are 
 generated using a voigt profile based on the temperature field.  The lines 
 are then shifted according to the redshift recorded by the light ray tool 
-and (optionally) the line of sight peculiar velocity.  Inclusion of the 
-peculiar velocity requires setting ``get_los_velocity`` to True in the call to 
+and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the 
+peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to 
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
 
 The spectrum generator will output a file containing the wavelength and 

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -79,7 +79,7 @@
 
   lr.make_light_ray(seed=8675309,
                     fields=['temperature', 'density'],
-                    get_los_velocity=True)
+                    use_peculiar_velocity=True)
 
 The keyword arguments are:
 
@@ -107,8 +107,10 @@
 * ``data_filename`` (*string*): Path to output file for ray data.  
   Default: None.
 
-* ``get_los_velocity`` (*bool*): If True, the line of sight velocity is 
-  calculated for each point in the ray.  Default: True.
+* ``use_peculiar_velocity`` (*bool*): If True, the doppler redshift from
+  the peculiar velocity of gas along the ray is calculated and added to the
+  cosmological redshift as the "effective" redshift.
+  Default: True.
 
 * ``redshift`` (*float*): Used with light rays made from single datasets to 
   specify a starting redshift for the ray.  If not used, the starting 

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -468,6 +468,19 @@
 
 All of these projections supply the data object as their base input.
 
+Often, it can be useful to sample a field at the minimum and maximum of a
+different field.  You can use the ``argmax`` and ``argmin`` operations to do
+this.::
+
+  reg.argmin("density", axis="temperature")
+
+This will return the temperature at the minimum density.
+
+If you don't specify an ``axis``, it will return the spatial position of
+the maximum value of the queried field.  Here is an example:::
+
+  x, y, z = reg.argmin("density")
+
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -494,11 +507,15 @@
     | Usage: ``extrema(fields, non_zero=False)``
     | The extrema of a field or list of fields.
 
-**Maximum Location**
-    | Class :class:`~yt.data_objects.derived_quantities.MaxLocation`
-    | Usage: ``max_location(fields)``
-    | The maximum of a field or list of fields as well
-      as the x,y,z location of that maximum.
+**Maximum Location Sampling**
+    | Class :class:`~yt.data_objects.derived_quantities.SampleAtMaxFieldValues`
+    | Usage: ``sample_at_max_field_values(fields, sample_fields)``
+    | The value of sample_fields at the maximum value in fields.
+
+**Minimum Location Sampling**
+    | Class :class:`~yt.data_objects.derived_quantities.SampleAtMinFieldValues`
+    | Usage: ``sample_at_min_field_values(fields, sample_fields)``
+    | The value of sample_fields at the minimum value in fields.
 
 **Minimum Location**
     | Class :class:`~yt.data_objects.derived_quantities.MinLocation`
@@ -506,6 +523,12 @@
     | The minimum of a field or list of fields as well
       as the x,y,z location of that minimum.
 
+**Maximum Location**
+    | Class :class:`~yt.data_objects.derived_quantities.MaxLocation`
+    | Usage: ``max_location(fields)``
+    | The maximum of a field or list of fields as well
+      as the x,y,z location of that maximum.
+
 **Spin Parameter**
     | Class :class:`~yt.data_objects.derived_quantities.SpinParameter`
     | Usage: ``spin_parameter(use_gas=True, use_particles=True)``

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:0dbaef644354e4d0191367f8f90e6dfd0d3d527925ef0331e1ef381c9099a8cd"
+  "signature": "sha256:6d823c3543f4183db8d28ad5003183515a69ce533fcfff00d92db0372afc3930"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -529,8 +529,21 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  \n",
-      "\n",
+      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 4,
+     "metadata": {},
+     "source": [
+      "HDF5"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To write to HDF5, use `write_hdf5`:"
      ]
     },
@@ -591,6 +604,38 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "If you want to read/write a dataset from/to a specific group within the HDF5 file, use the `group_name` keyword:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_vels.write_hdf5(\"data_in_group.h5\", dataset_name=\"velocity\", info=info, group_name=\"/data/fields\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where we have used the standard HDF5 slash notation for writing a group hierarchy (e.g., group within a group):"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 4,
+     "metadata": {},
+     "source": [
+      "ASCII"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:"
      ]
     },

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -71,7 +71,6 @@
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=fields, setup_function=setup_ds,
-                  get_los_velocity=True,
                   njobs=-1)
 
 # Create an AbsorptionSpectrum object extending from

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d doc/source/cookbook/light_ray.py
--- a/doc/source/cookbook/light_ray.py
+++ b/doc/source/cookbook/light_ray.py
@@ -20,7 +20,6 @@
                   solution_filename='LR/lightraysolution.txt',
                   data_filename='LR/lightray.h5',
                   fields=['temperature', 'density'],
-                  get_los_velocity=True,
                   njobs=-1)
 
 # Optionally, we can now overplot the part of this ray that intersects 

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -13,8 +13,7 @@
                   end_position=[1., 1., 1.],
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
-                  fields=['temperature', 'density'],
-                  get_los_velocity=True)
+                  fields=['temperature', 'density'])
 
 # Optionally, we can now overplot this ray on a projection of the source 
 # dataset

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -339,11 +339,11 @@
 .. code-block:: python
 
    #!python
-   class MaximumValue(AnswerTestingTest):
-       _type_name = "ParentageRelationships"
+   class MaximumValueTest(AnswerTestingTest):
+       _type_name = "MaximumValue"
        _attrs = ("field",)
        def __init__(self, ds_fn, field):
-           super(MaximumValue, self).__init__(ds_fn)
+           super(MaximumValueTest, self).__init__(ds_fn)
            self.field = field
 
        def run(self):
@@ -381,10 +381,10 @@
 * Typically for derived values, we compare to 10 or 12 decimal places.
   For exact values, we compare exactly.
 
-How to Add Data to the Testing Suite
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+How To Write Answer Tests for a Frontend
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To add data to the testing suite, first write a new set of tests for the data.
+To add a new frontend answer test, first write a new set of tests for the data.
 The Enzo example in ``yt/frontends/enzo/tests/test_outputs.py`` is
 considered canonical.  Do these things:
 
@@ -399,8 +399,13 @@
   * This routine should test a number of different fields and data objects.
 
   * The test routine itself should be decorated with
-    ``@requires_ds(path_to_test_dataset)``. This decorator can accept the
-    argument ``big_data=True`` if the test is expensive.
+    ``@requires_ds(test_dataset_name)``. This decorator can accept the
+    argument ``big_data=True`` if the test is expensive. The 
+    ``test_dataset_name`` should be a string containing the path you would pass
+    to the ``yt.load`` function. It does not need to be the full path to the 
+    dataset, since the path will be automatically prepended with the location of
+    the test data directory.  See :ref:`configuration-file` for more information
+    about the ``test_data-dir`` configuration option.
 
   * There are ``small_patch_amr`` and ``big_patch_amr`` routines that you can
     yield from to execute a bunch of standard tests. In addition we have created
@@ -408,7 +413,59 @@
     you should start, and then yield additional tests that stress the outputs in
     whatever ways are necessary to ensure functionality.
 
-  * **All tests should be yielded!**
-
 If you are adding to a frontend that has a few tests already, skip the first
 two steps.
+
+How to Write Image Comparison Tests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We have a number of tests designed to compare images as part of yt. We make use
+of some functionality from matplotlib to automatically compare images and detect
+differences, if any. Image comparison tests are used in the plotting and volume
+rendering machinery.
+
+The easiest way to use the image comparison tests is to make use of the 
+``GenericImageTest`` class. This class takes three arguments:
+
+* A dataset instance (e.g. something you load with ``yt.load`` or 
+  ``data_dir_load``) 
+* A function the test machinery can call which will save an image to disk. The 
+  test class will then find any images that get created and compare them with the
+  stored "correct" answer.
+* An integer specifying the number of decimal places to use when comparing
+  images. A smaller number of decimal places will produce a less stringent test.
+  Matplotlib uses an L2 norm on the full image to do the comparison tests, so
+  this is not a pixel-by-pixel measure, and surprisingly large variations will
+  still pass the test if the strictness of the comparison is not high enough.
+
+You *must* decorate your test function with ``requires_ds``, otherwise the 
+answer testing machinery will not be properly set up.
+
+Here is an example test function:
+
+.. code-block:: python
+
+   from yt.utilities.answer_testing.framework import \
+       GenericImageTest, requires_ds, data_dir_load
+
+   from matplotlib import pyplot as plt
+
+   @requires_ds(my_ds)
+   def test_my_ds():
+       ds = data_dir_load(my_ds)
+       def create_image(filename_prefix):
+           plt.plot([1, 2], [1, 2])
+           plt.savefig(filename_prefix)
+       test = GenericImageTest(ds, create_image, 12)
+       # this ensures a nice test name in nose's output
+       test_my_ds.__description__ = test.description
+       yield test_my_ds
+
+Another good example of an image comparison test is the
+``PlotWindowAttributeTest`` defined in the answer testing framework and used in
+``yt/visualization/tests/test_plotwindow.py``. This test shows how a new answer
+test subclass can be used to programitically test a variety of different methods
+of a complicated class using the same test class. This sort of image comparison
+test is more useful if you are finding yourself writing a ton of boilerplate
+code to get your image comparison test working.  The ``GenericImageTest`` is
+more useful if you only need to do a one-off image comparison test.

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -139,7 +139,9 @@
            is recommended to set to None in such circumstances.
            Default: None
         use_peculiar_velocity : optional, bool
-           if True, include line of sight velocity for shifting lines.
+           if True, include peculiar velocity for calculating doppler redshift
+           to shift lines.  Requires similar flag to be set in LightRay 
+           generation.
            Default: True
         subgrid_resolution : optional, int
            When a line is being added that is unresolved (ie its thermal

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -50,7 +50,6 @@
 
     lr.make_light_ray(seed=1234567,
                       fields=['temperature', 'density', 'H_number_density'],
-                      get_los_velocity=True,
                       data_filename='lightray.h5')
 
     sp = AbsorptionSpectrum(900.0, 1800.0, 10000)

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -258,13 +258,13 @@
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=True, redshift=None,
-                       njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True, 
+                       redshift=None, njobs=-1):
         """
         make_light_ray(seed=None, start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=True, redshift=None,
+                       use_peculiar_velocity=True, redshift=None,
                        njobs=-1)
 
         Create a light ray and get field values for each lixel.  A light
@@ -305,9 +305,10 @@
         data_filename : optional, string
             Path to output file for ray data.
             Default: None.
-        get_los_velocity : optional, bool
-            If True, the line of sight velocity is calculated for
-            each point in the ray.
+        use_peculiar_velocity : optional, bool
+            If True, the peculiar velocity along the ray will be sampled for
+            calculating the effective redshift combining the cosmological
+            redshift and the doppler redshift.
             Default: True.
         redshift : optional, float
             Used with light rays made from single datasets to specify a
@@ -335,7 +336,7 @@
         ...                       solution_filename="solution.txt",
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
-        ...                       get_los_velocity=True)
+        ...                       use_peculiar_velocity=True)
 
         Make a light ray from a single dataset:
 
@@ -349,9 +350,12 @@
         ...                       solution_filename="solution.txt",
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
-        ...                       get_los_velocity=True)
+        ...                       use_peculiar_velocity=True)
 
         """
+        if get_los_velocity is not None:
+            use_peculiar_velocity = get_los_velocity
+            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
@@ -368,9 +372,10 @@
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
-        if get_los_velocity:
-            all_fields.extend(['velocity_x', 'velocity_y',
-                               'velocity_z', 'velocity_los', 'redshift_eff'])
+        if use_peculiar_velocity:
+            all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
+                               'velocity_los', 'redshift_eff', 
+                               'redshift_dopp'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
@@ -444,16 +449,43 @@
                 for field in data_fields:
                     sub_data[field].extend(sub_ray[field][asort])
 
-                if get_los_velocity:
-                    line_of_sight = sub_segment[1] - sub_segment[0]
+                if use_peculiar_velocity:
+                    line_of_sight = sub_segment[0] - sub_segment[1]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
                     sub_vel = ds.arr([sub_ray['velocity_x'],
                                       sub_ray['velocity_y'],
                                       sub_ray['velocity_z']])
-                    # line of sight velocity is reversed relative to ray
-                    sub_data['velocity_los'].extend(-1*(np.rollaxis(sub_vel, 1) *
-                                                     line_of_sight).sum(axis=1)[asort])
-                    del sub_vel
+                    # Line of sight velocity = vel_los
+                    sub_vel_los = (np.rollaxis(sub_vel, 1) * \
+                                   line_of_sight).sum(axis=1)
+                    sub_data['velocity_los'].extend(sub_vel_los[asort])
+
+                    # doppler redshift:
+                    # See https://en.wikipedia.org/wiki/Redshift and 
+                    # Peebles eqns: 5.48, 5.49
+
+                    # 1 + redshift_dopp = (1 + v*cos(theta)/c) / 
+                    # sqrt(1 - v**2/c**2)
+
+                    # where v is the peculiar velocity (ie physical velocity
+                    # without the hubble flow, but no hubble flow in sim, so
+                    # just the physical velocity).
+
+                    # the bulk of the doppler redshift is from line of sight 
+                    # motion, but there is a small amount from time dilation 
+                    # of transverse motion, hence the inclusion of theta (the 
+                    # angle between line of sight and the velocity). 
+                    # theta is the angle between the ray vector (i.e. line of 
+                    # sight) and the velocity vectors: a dot b = ab cos(theta)
+
+                    sub_vel_mag = sub_ray['velocity_magnitude']
+                    cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    redshift_dopp = \
+                        (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \
+                         np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1
+                    sub_data['redshift_dopp'].extend(redshift_dopp[asort])
+                    del sub_vel, sub_vel_los, sub_vel_mag, cos_theta, \
+                        redshift_dopp
 
                 sub_ray.clear_data()
                 del sub_ray, asort
@@ -461,34 +493,25 @@
             for key in sub_data:
                 sub_data[key] = ds.arr(sub_data[key]).in_cgs()
 
-            # Get redshift for each lixel.  Assume linear relation between l and z.
+            # Get redshift for each lixel.  Assume linear relation between l 
+            # and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
                 (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
-            # When velocity_los is present, add effective redshift 
-            # (redshift_eff) field by combining cosmological redshift and 
+            # When using the peculiar velocity, create effective redshift 
+            # (redshift_eff) field combining cosmological redshift and 
             # doppler redshift.
             
-            # first convert los velocities to comoving frame (ie mult. by (1+z)), 
-            # then calculate doppler redshift:
-            # 1 + redshift_dopp = sqrt((1+v/c) / (1-v/c))
+            # then to add cosmological redshift and doppler redshifts, follow
+            # eqn 3.75 in Peacock's Cosmological Physics:
+            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
 
-            # then to add cosmological redshift and doppler redshift, follow
-            # eqn 3.75 in Peacock's Cosmological Physics:
-            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
-            # Alternatively, see eqn 5.49 in Peebles for a similar result.
-            if get_los_velocity:
-
-                velocity_los_cm = (1 + sub_data['redshift']) * \
-                                  sub_data['velocity_los']
-                redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
-                                (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
-                sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
-                                           (1 + sub_data['redshift'])) - 1
-                del velocity_los_cm, redshift_dopp
+            if use_peculiar_velocity:
+               sub_data['redshift_eff'] = ((1 + sub_data['redshift_dopp']) * \
+                                            (1 + sub_data['redshift'])) - 1
 
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -123,7 +123,7 @@
     s_ds = halo.data_object.ds
     old_sphere = halo.data_object
     max_vals = old_sphere.quantities.max_location(field)
-    new_center = s_ds.arr(max_vals[2:])
+    new_center = s_ds.arr(max_vals[1:])
     new_sphere = s_ds.sphere(new_center.in_units("code_length"),
                                old_sphere.radius.in_units("code_length"))
     mylog.info("Moving sphere center from %s to %s." % (old_sphere.center,

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -87,7 +87,7 @@
         events1 = photons1.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
                                           absorb_model=tbabs_model, 
                                           convolve_energies=True, prng=prng)
-
+        events1['xsky']
         return_events = return_data(events1.events)
 
         tests.append(GenericArrayTest(ds, return_events, args=[a]))

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -613,10 +613,84 @@
 
     # Numpy-like Operations
     def argmax(self, field, axis=None):
-        raise NotImplementedError
+        r"""Return the values at which the field is maximized.
+
+        This will, in a parallel-aware fashion, find the maximum value and then
+        return to you the values at that maximum location that are requested
+        for "axis".  By default it will return the spatial positions (in the
+        natural coordinate system), but it can be any field
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to maximize.
+        axis : string or list of strings, optional
+            If supplied, the fields to sample along; if not supplied, defaults
+            to the coordinate fields.  This can be the name of the coordinate
+            fields (i.e., 'x', 'y', 'z') or a list of fields, but cannot be 0,
+            1, 2.
+
+        Returns
+        -------
+        A list of YTQuantities as specified by the axis argument.
+
+        Examples
+        --------
+
+        >>> temp_at_max_rho = reg.argmax("density", axis="temperature")
+        >>> max_rho_xyz = reg.argmax("density")
+        >>> t_mrho, v_mrho = reg.argmax("density", axis=["temperature",
+        ...                 "velocity_magnitude"])
+        >>> x, y, z = reg.argmax("density")
+
+        """
+        if axis is None:
+            mv, pos0, pos1, pos2 = self.quantities.max_location(field)
+            return pos0, pos1, pos2
+        rv = self.quantities.sample_at_max_field_values(field, axis)
+        if len(rv) == 2:
+            return rv[1]
+        return rv[1:]
 
     def argmin(self, field, axis=None):
-        raise NotImplementedError
+        r"""Return the values at which the field is minimized.
+
+        This will, in a parallel-aware fashion, find the minimum value and then
+        return to you the values at that minimum location that are requested
+        for "axis".  By default it will return the spatial positions (in the
+        natural coordinate system), but it can be any field
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to minimize.
+        axis : string or list of strings, optional
+            If supplied, the fields to sample along; if not supplied, defaults
+            to the coordinate fields.  This can be the name of the coordinate
+            fields (i.e., 'x', 'y', 'z') or a list of fields, but cannot be 0,
+            1, 2.
+
+        Returns
+        -------
+        A list of YTQuantities as specified by the axis argument.
+
+        Examples
+        --------
+
+        >>> temp_at_min_rho = reg.argmin("density", axis="temperature")
+        >>> min_rho_xyz = reg.argmin("density")
+        >>> t_mrho, v_mrho = reg.argmin("density", axis=["temperature",
+        ...                 "velocity_magnitude"])
+        >>> x, y, z = reg.argmin("density")
+
+        """
+        if axis is None:
+            mv, pos0, pos1, pos2 = self.quantities.min_location(field)
+            return pos0, pos1, pos2
+        rv = self.quantities.sample_at_min_field_values(field, axis)
+        if len(rv) == 2:
+            return rv[1]
+        return rv[1:]
 
     def _compute_extrema(self, field):
         if self._extrema_cache is None:

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -522,10 +522,57 @@
         return [self.data_source.ds.arr([mis.min(), mas.max()])
                 for mis, mas in zip(values[::2], values[1::2])]
 
-class MaxLocation(DerivedQuantity):
+class SampleAtMaxFieldValues(DerivedQuantity):
     r"""
-    Calculates the maximum value plus the index, x, y, and z position
-    of the maximum.
+    Calculates the maximum value and returns whichever fields are asked to be
+    sampled.
+
+    Parameters
+    ----------
+    field : field
+        The field over which the extrema are to be calculated.
+    sample_fields : list of fields
+        The fields to sample and return at the minimum value.
+
+    Examples
+    --------
+
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> ad = ds.all_data()
+    >>> print ad.quantities.sample_at_max_field_values(("gas", "density"),
+    ...         ["temperature", "velocity_magnitude"])
+
+    """
+    def count_values(self, field, sample_fields):
+        # field itself, then index, then the number of sample fields
+        self.num_vals = 1 + len(sample_fields)
+
+    def __call__(self, field, sample_fields):
+        rv = super(SampleAtMaxFieldValues, self).__call__(field, sample_fields)
+        if len(rv) == 1: rv = rv[0]
+        return rv
+
+    def process_chunk(self, data, field, sample_fields):
+        field = data._determine_fields(field)[0]
+        ma = array_like_field(data, -HUGE, field)
+        vals = [array_like_field(data, -1, sf) for sf in sample_fields]
+        maxi = -1
+        if data[field].size > 0:
+            maxi = self._func(data[field])
+            ma = data[field][maxi]
+            vals = [data[sf][maxi] for sf in sample_fields]
+        return (ma,) + tuple(vals)
+
+    def reduce_intermediate(self, values):
+        i = self._func(values[0]) # ma is values[0]
+        return [val[i] for val in values]
+
+    def _func(self, arr):
+        return np.argmax(arr)
+
+class MaxLocation(SampleAtMaxFieldValues):
+    r"""
+    Calculates the maximum value plus the x, y, and z position of the maximum.
 
     Parameters
     ----------
@@ -540,36 +587,39 @@
     >>> print ad.quantities.max_location(("gas", "density"))
 
     """
-    def count_values(self, *args, **kwargs):
-        self.num_vals = 5
-
     def __call__(self, field):
-        rv = super(MaxLocation, self).__call__(field)
+        sample_fields = get_position_fields(field, self.data_source)
+        rv = super(MaxLocation, self).__call__(field, sample_fields)
         if len(rv) == 1: rv = rv[0]
         return rv
 
-    def process_chunk(self, data, field):
-        field = data._determine_fields(field)[0]
-        ma = array_like_field(data, -HUGE, field)
-        position_fields = get_position_fields(field, data)
-        mx = array_like_field(data, -1, position_fields[0])
-        my = array_like_field(data, -1, position_fields[1])
-        mz = array_like_field(data, -1, position_fields[2])
-        maxi = -1
-        if data[field].size > 0:
-            maxi = np.argmax(data[field])
-            ma = data[field][maxi]
-            mx, my, mz = [data[ax][maxi] for ax in position_fields]
-        return (ma, maxi, mx, my, mz)
+class SampleAtMinFieldValues(SampleAtMaxFieldValues):
+    r"""
+    Calculates the minimum value and returns whichever fields are asked to be
+    sampled.
 
-    def reduce_intermediate(self, values):
-        i = np.argmax(values[0]) # ma is values[0]
-        return [val[i] for val in values]
+    Parameters
+    ----------
+    field : field
+        The field over which the extrema are to be calculated.
+    sample_fields : list of fields
+        The fields to sample and return at the minimum value.
 
-class MinLocation(DerivedQuantity):
+    Examples
+    --------
+
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> ad = ds.all_data()
+    >>> print ad.quantities.sample_at_min_field_values(("gas", "density"),
+    ...         ["temperature", "velocity_magnitude"])
+
+    """
+    def _func(self, arr):
+        return np.argmin(arr)
+
+class MinLocation(SampleAtMinFieldValues):
     r"""
-    Calculates the minimum value plus the index, x, y, and z position
-    of the minimum.
+    Calculates the minimum value plus the x, y, and z position of the minimum.
 
     Parameters
     ----------
@@ -584,32 +634,12 @@
     >>> print ad.quantities.min_location(("gas", "density"))
 
     """
-    def count_values(self, *args, **kwargs):
-        self.num_vals = 5
-
     def __call__(self, field):
-        rv = super(MinLocation, self).__call__(field)
+        sample_fields = get_position_fields(field, self.data_source)
+        rv = super(MinLocation, self).__call__(field, sample_fields)
         if len(rv) == 1: rv = rv[0]
         return rv
 
-    def process_chunk(self, data, field):
-        field = data._determine_fields(field)[0]
-        ma = array_like_field(data, HUGE, field)
-        position_fields = get_position_fields(field, data)
-        mx = array_like_field(data, -1, position_fields[0])
-        my = array_like_field(data, -1, position_fields[1])
-        mz = array_like_field(data, -1, position_fields[2])
-        mini = -1
-        if data[field].size > 0:
-            mini = np.argmin(data[field])
-            ma = data[field][mini]
-            mx, my, mz = [data[ax][mini] for ax in position_fields]
-        return (ma, mini, mx, my, mz)
-
-    def reduce_intermediate(self, values):
-        i = np.argmin(values[0]) # ma is values[0]
-        return [val[i] for val in values]
-
 class SpinParameter(DerivedQuantity):
     r"""
     Calculates the dimensionless spin parameter.

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -632,7 +632,7 @@
         """
         mylog.debug("Searching for maximum value of %s", field)
         source = self.all_data()
-        max_val, maxi, mx, my, mz = \
+        max_val, mx, my, mz = \
             source.quantities.max_location(field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
@@ -644,7 +644,7 @@
         """
         mylog.debug("Searching for minimum value of %s", field)
         source = self.all_data()
-        min_val, maxi, mx, my, mz = \
+        min_val, mx, my, mz = \
             source.quantities.min_location(field)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f",
               min_val, mx, my, mz)

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -55,6 +55,68 @@
                         ad["cell_mass"].sum())
         yield assert_rel_equal, my_std, a_std, 12
 
+def test_max_location():
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", ))
+        ad = ds.all_data()
+
+        mv, x, y, z = ad.quantities.max_location(("gas", "density"))
+
+        yield assert_equal, mv, ad["density"].max()
+
+        mi = np.argmax(ad["density"])
+
+        yield assert_equal, ad["x"][mi], x
+        yield assert_equal, ad["y"][mi], y
+        yield assert_equal, ad["z"][mi], z
+
+def test_min_location():
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", ))
+        ad = ds.all_data()
+
+        mv, x, y, z = ad.quantities.min_location(("gas", "density"))
+
+        yield assert_equal, mv, ad["density"].min()
+
+        mi = np.argmin(ad["density"])
+
+        yield assert_equal, ad["x"][mi], x
+        yield assert_equal, ad["y"][mi], y
+        yield assert_equal, ad["z"][mi], z
+
+def test_sample_at_min_field_values():
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(16, nprocs = nprocs,
+            fields = ("density", "temperature", "velocity_x"))
+        ad = ds.all_data()
+
+        mv, temp, vm = ad.quantities.sample_at_min_field_values(
+            "density", ["temperature", "velocity_x"])
+
+        yield assert_equal, mv, ad["density"].min()
+
+        mi = np.argmin(ad["density"])
+
+        yield assert_equal, ad["temperature"][mi], temp
+        yield assert_equal, ad["velocity_x"][mi], vm
+
+def test_sample_at_max_field_values():
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(16, nprocs = nprocs,
+            fields = ("density", "temperature", "velocity_x"))
+        ad = ds.all_data()
+
+        mv, temp, vm = ad.quantities.sample_at_max_field_values(
+            "density", ["temperature", "velocity_x"])
+
+        yield assert_equal, mv, ad["density"].max()
+
+        mi = np.argmax(ad["density"])
+
+        yield assert_equal, ad["temperature"][mi], temp
+        yield assert_equal, ad["velocity_x"][mi], vm
+
 if __name__ == "__main__":
     for i in test_extrema():
         i[0](*i[1:])

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -1,4 +1,5 @@
 from yt.testing import fake_random_ds, fake_amr_ds, assert_equal
+import numpy as np
 
 
 def setup():
@@ -93,3 +94,51 @@
         qrho, qtemp = ad.min(["density", "temperature"])
         yield assert_equal, qrho, ad["density"].min()
         yield assert_equal, qtemp, ad["temperature"].min()
+
+def test_argmin():
+    for nprocs in [-1, 1, 2, 16]:
+        if nprocs == -1:
+            ds = fake_amr_ds(fields=("density","temperature"))
+        else:
+            ds = fake_random_ds(32, nprocs=nprocs,
+                fields=("density","temperature"))
+
+        ad = ds.all_data()
+
+        q = ad.argmin("density", axis=["density"])
+        yield assert_equal, q, ad["density"].min()
+
+        q1, q2 = ad.argmin("density", axis=["density", "temperature"])
+        mi = np.argmin(ad["density"])
+        yield assert_equal, q1, ad["density"].min()
+        yield assert_equal, q2, ad["temperature"][mi]
+
+        pos = ad.argmin("density")
+        mi = np.argmin(ad["density"])
+        yield assert_equal, pos[0], ad["x"][mi]
+        yield assert_equal, pos[1], ad["y"][mi]
+        yield assert_equal, pos[2], ad["z"][mi]
+
+def test_argmax():
+    for nprocs in [-1, 1, 2, 16]:
+        if nprocs == -1:
+            ds = fake_amr_ds(fields=("density","temperature"))
+        else:
+            ds = fake_random_ds(32, nprocs=nprocs,
+                fields=("density","temperature"))
+
+        ad = ds.all_data()
+
+        q = ad.argmax("density", axis=["density"])
+        yield assert_equal, q, ad["density"].max()
+
+        q1, q2 = ad.argmax("density", axis=["density", "temperature"])
+        mi = np.argmax(ad["density"])
+        yield assert_equal, q1, ad["density"].max()
+        yield assert_equal, q2, ad["temperature"][mi]
+
+        pos = ad.argmax("density")
+        mi = np.argmax(ad["density"])
+        yield assert_equal, pos[0], ad["x"][mi]
+        yield assert_equal, pos[1], ad["y"][mi]
+        yield assert_equal, pos[2], ad["z"][mi]

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -199,7 +199,7 @@
         if finest_levels is not False:
             source.min_level = self.max_level - finest_levels
         mylog.debug("Searching for maximum value of %s", field)
-        max_val, maxi, mx, my, mz = \
+        max_val, mx, my, mz = \
             source.quantities["MaxLocation"](field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
                    max_val, mx, my, mz)

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -293,7 +293,7 @@
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}
         #self._conversion_override = conversion_override
-
+        self.fluid_types += ("stream",)
         self.geometry = geometry
         self.stream_handler = stream_handler
         name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
@@ -376,7 +376,7 @@
         if len(s) == 1:
             field = ("io", k)
         elif len(s) == 3:
-            field = ("gas", k)
+            field = ("stream", k)
         elif len(s) == 0:
             continue
         else:
@@ -502,7 +502,7 @@
         elif len(data[field].shape) in (1, 2):
             new_field = ("io", field)
         elif len(data[field].shape) == 3:
-            new_field = ("gas", field)
+            new_field = ("stream", field)
         else:
             raise RuntimeError
         new_data[new_field] = data[field]

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -19,54 +19,54 @@
 
 class StreamFieldInfo(FieldInfoContainer):
     known_other_fields = (
-        ("density", ("code_mass/code_length**3", [], None)),
-        ("dark_matter_density", ("code_mass/code_length**3", [], None)),
-        ("number_density", ("1/code_length**3", [], None)),
-        ("pressure", ("dyne/code_length**2", [], None)),
-        ("thermal_energy", ("erg / g", [], None)),
-        ("temperature", ("K", [], None)),
-        ("velocity_x", ("code_length/code_time", [], None)),
-        ("velocity_y", ("code_length/code_time", [], None)),
-        ("velocity_z", ("code_length/code_time", [], None)),
-        ("magnetic_field_x", ("gauss", [], None)),
-        ("magnetic_field_y", ("gauss", [], None)),
-        ("magnetic_field_z", ("gauss", [], None)),
-        ("radiation_acceleration_x", ("code_length/code_time**2", [], None)),
-        ("radiation_acceleration_y", ("code_length/code_time**2", [], None)),
-        ("radiation_acceleration_z", ("code_length/code_time**2", [], None)),
+        ("density", ("code_mass/code_length**3", ["density"], None)),
+        ("dark_matter_density", ("code_mass/code_length**3", ["dark_matter_density"], None)),
+        ("number_density", ("1/code_length**3", ["number_density"], None)),
+        ("pressure", ("dyne/code_length**2", ["pressure"], None)),
+        ("thermal_energy", ("erg / g", ["thermal_energy"], None)),
+        ("temperature", ("K", ["temperature"], None)),
+        ("velocity_x", ("code_length/code_time", ["velocity_x"], None)),
+        ("velocity_y", ("code_length/code_time", ["velocity_y"], None)),
+        ("velocity_z", ("code_length/code_time", ["velocity_z"], None)),
+        ("magnetic_field_x", ("gauss", ["magnetic_field_x"], None)),
+        ("magnetic_field_y", ("gauss", ["magnetic_field_y"], None)),
+        ("magnetic_field_z", ("gauss", ["magnetic_field_z"], None)),
+        ("radiation_acceleration_x", ("code_length/code_time**2", ["radiation_acceleration_x"], None)),
+        ("radiation_acceleration_y", ("code_length/code_time**2", ["radiation_acceleration_y"], None)),
+        ("radiation_acceleration_z", ("code_length/code_time**2", ["radiation_acceleration_z"], None)),
 
         # We need to have a bunch of species fields here, too
-        ("metal_density",   ("code_mass/code_length**3", [], None)),
-        ("hi_density",      ("code_mass/code_length**3", [], None)),
-        ("hii_density",     ("code_mass/code_length**3", [], None)),
-        ("h2i_density",     ("code_mass/code_length**3", [], None)),
-        ("h2ii_density",    ("code_mass/code_length**3", [], None)),
-        ("h2m_density",     ("code_mass/code_length**3", [], None)),
-        ("hei_density",     ("code_mass/code_length**3", [], None)),
-        ("heii_density",    ("code_mass/code_length**3", [], None)),
-        ("heiii_density",   ("code_mass/code_length**3", [], None)),
-        ("hdi_density",     ("code_mass/code_length**3", [], None)),
-        ("di_density",      ("code_mass/code_length**3", [], None)),
-        ("dii_density",     ("code_mass/code_length**3", [], None)),
+        ("metal_density",   ("code_mass/code_length**3", ["metal_density"], None)),
+        ("hi_density",      ("code_mass/code_length**3", ["hi_density"], None)),
+        ("hii_density",     ("code_mass/code_length**3", ["hii_density"], None)),
+        ("h2i_density",     ("code_mass/code_length**3", ["h2i_density"], None)),
+        ("h2ii_density",    ("code_mass/code_length**3", ["h2ii_density"], None)),
+        ("h2m_density",     ("code_mass/code_length**3", ["h2m_density"], None)),
+        ("hei_density",     ("code_mass/code_length**3", ["hei_density"], None)),
+        ("heii_density",    ("code_mass/code_length**3", ["heii_density"], None)),
+        ("heiii_density",   ("code_mass/code_length**3", ["heiii_density"], None)),
+        ("hdi_density",     ("code_mass/code_length**3", ["hdi_density"], None)),
+        ("di_density",      ("code_mass/code_length**3", ["di_density"], None)),
+        ("dii_density",     ("code_mass/code_length**3", ["dii_density"], None)),
     )
 
     known_particle_fields = (
-        ("particle_position", ("code_length", [], None)),
-        ("particle_position_x", ("code_length", [], None)),
-        ("particle_position_y", ("code_length", [], None)),
-        ("particle_position_z", ("code_length", [], None)),
-        ("particle_velocity", ("code_length/code_time", [], None)),
-        ("particle_velocity_x", ("code_length/code_time", [], None)),
-        ("particle_velocity_y", ("code_length/code_time", [], None)),
-        ("particle_velocity_z", ("code_length/code_time", [], None)),
-        ("particle_index", ("", [], None)),
-        ("particle_gas_density", ("code_mass/code_length**3", [], None)),
-        ("particle_gas_temperature", ("K", [], None)),
-        ("particle_mass", ("code_mass", [], None)),
-        ("smoothing_length", ("code_length", [], None)),
-        ("density", ("code_mass/code_length**3", [], None)),
-        ("temperature", ("code_temperature", [], None)),
-        ("creation_time", ("code_time", [], None)),
+        ("particle_position", ("code_length", ["particle_position"], None)),
+        ("particle_position_x", ("code_length", ["particle_position_x"], None)),
+        ("particle_position_y", ("code_length", ["particle_position_y"], None)),
+        ("particle_position_z", ("code_length", ["particle_position_z"], None)),
+        ("particle_velocity", ("code_length/code_time", ["particle_velocity"], None)),
+        ("particle_velocity_x", ("code_length/code_time", ["particle_velocity_x"], None)),
+        ("particle_velocity_y", ("code_length/code_time", ["particle_velocity_y"], None)),
+        ("particle_velocity_z", ("code_length/code_time", ["particle_velocity_z"], None)),
+        ("particle_index", ("", ["particle_index"], None)),
+        ("particle_gas_density", ("code_mass/code_length**3", ["particle_gas_density"], None)),
+        ("particle_gas_temperature", ("K", ["particle_gas_temperature"], None)),
+        ("particle_mass", ("code_mass", ["particle_mass"], None)),
+        ("smoothing_length", ("code_length", ["smoothing_length"], None)),
+        ("density", ("code_mass/code_length**3", ["density"], None)),
+        ("temperature", ("code_temperature", ["temperature"], None)),
+        ("creation_time", ("code_time", ["creation_time"], None)),
     )
 
     def setup_fluid_fields(self):

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -44,7 +44,7 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
-        if any((ftype not in ("gas",) for ftype, fname in fields)):
+        if any((ftype not in self.ds.fluid_types for ftype, fname in fields)):
             raise NotImplementedError
         rv = {}
         for field in fields:

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/geometry/object_finding_mixin.py
--- a/yt/geometry/object_finding_mixin.py
+++ b/yt/geometry/object_finding_mixin.py
@@ -79,7 +79,7 @@
             source = self.all_data()
         mylog.debug("Searching %s grids for maximum value of %s",
                     len(source._grids), field)
-        max_val, maxi, mx, my, mz = \
+        max_val, mx, my, mz = \
             source.quantities["MaxLocation"]( field )
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
               max_val, mx, my, mz)

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -930,6 +930,12 @@
     yield assert_equal, warr, iarr
     yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']
 
+    warr.write_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group')
+
+    giarr = YTArray.from_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group')
+
+    yield assert_equal, warr, giarr
+
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -736,8 +736,8 @@
     # End unit conversion methods
     #
 
-    def write_hdf5(self, filename, dataset_name=None, info=None):
-        r"""Writes ImageArray to hdf5 file.
+    def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
+        r"""Writes a YTArray to hdf5 file.
 
         Parameters
         ----------
@@ -750,16 +750,17 @@
         info: dictionary
             A dictionary of supplementary info to write to append as attributes
             to the dataset.
+            
+        group_name: string
+            An optional group to write the arrays to. If not specified, the arrays
+            are datasets at the top level by default.
 
         Examples
         --------
         >>> a = YTArray([1,2,3], 'cm')
-
         >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
-
         >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
         ...              info=myinfo)
-
         """
         import h5py
         from yt.extern.six.moves import cPickle as pickle
@@ -773,8 +774,15 @@
             dataset_name = 'array_data'
 
         f = h5py.File(filename)
-        if dataset_name in f.keys():
-            d = f[dataset_name]
+        if group_name is not None:
+            if group_name in f:
+                g = f[group_name]
+            else:
+                g = f.create_group(group_name)
+        else:
+            g = f
+        if dataset_name in g.keys():
+            d = g[dataset_name]
             # Overwrite without deleting if we can get away with it.
             if d.shape == self.shape and d.dtype == self.dtype:
                 d[:] = self
@@ -782,16 +790,16 @@
                     del d.attrs[k]
             else:
                 del f[dataset_name]
-                d = f.create_dataset(dataset_name, data=self)
+                d = g.create_dataset(dataset_name, data=self)
         else:
-            d = f.create_dataset(dataset_name, data=self)
+            d = g.create_dataset(dataset_name, data=self)
 
         for k, v in info.items():
             d.attrs[k] = v
         f.close()
 
     @classmethod
-    def from_hdf5(cls, filename, dataset_name=None):
+    def from_hdf5(cls, filename, dataset_name=None, group_name=None):
         r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray.
 
         Parameters
@@ -803,6 +811,10 @@
             The name of the dataset to read from.  If the dataset has a units
             attribute, attempt to infer units as well.
 
+        group_name: string
+            An optional group to read the arrays from. If not specified, the arrays
+            are datasets at the top level by default.
+
         """
         import h5py
         from yt.extern.six.moves import cPickle as pickle
@@ -811,7 +823,11 @@
             dataset_name = 'array_data'
 
         f = h5py.File(filename)
-        dataset = f[dataset_name]
+        if group_name is not None:
+            g = f[group_name]
+        else:
+            g = f
+        dataset = g[dataset_name]
         data = dataset[:]
         units = dataset.attrs.get('units', '')
         if 'unit_registry' in dataset.attrs.keys():

diff -r 9298b1c172561ce955cd60bd06e4b37335aa11b8 -r b660d9a31e926779087778c1b2bf37f780e14f5d yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -84,7 +84,7 @@
 
         arr[idx] = 0.6
 
-    data = dict(Density=arr)
+    data = dict(density=(arr, "g/cm**3"))
     ds = load_uniform_grid(data, arr.shape, bbox=bbox)
 
     return ds
@@ -96,7 +96,7 @@
 
     sc = Scene()
 
-    vol = VolumeSource(ds, field=('gas', 'Density'))
+    vol = VolumeSource(ds, field=('gas', 'density'))
 
     tf = vol.transfer_function
     tf = ColorTransferFunction((0.1, 1.0))


https://bitbucket.org/yt_analysis/yt/commits/8fb1df8031ab/
Changeset:   8fb1df8031ab
Branch:      yt
User:        jzuhone
Date:        2015-12-09 04:50:31+00:00
Summary:     Get rid of these aliases since we do it the new way now
Affected #:  1 file

diff -r b660d9a31e926779087778c1b2bf37f780e14f5d -r 8fb1df8031abd8c84b6d83f5d6ce3cc4233d01b2 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -28,9 +28,9 @@
         ("velocity_x", ("code_length/code_time", ["velocity_x"], None)),
         ("velocity_y", ("code_length/code_time", ["velocity_y"], None)),
         ("velocity_z", ("code_length/code_time", ["velocity_z"], None)),
-        ("magnetic_field_x", ("gauss", ["magnetic_field_x"], None)),
-        ("magnetic_field_y", ("gauss", ["magnetic_field_y"], None)),
-        ("magnetic_field_z", ("gauss", ["magnetic_field_z"], None)),
+        ("magnetic_field_x", ("gauss", [], None)),
+        ("magnetic_field_y", ("gauss", [], None)),
+        ("magnetic_field_z", ("gauss", [], None)),
         ("radiation_acceleration_x", ("code_length/code_time**2", ["radiation_acceleration_x"], None)),
         ("radiation_acceleration_y", ("code_length/code_time**2", ["radiation_acceleration_y"], None)),
         ("radiation_acceleration_z", ("code_length/code_time**2", ["radiation_acceleration_z"], None)),


https://bitbucket.org/yt_analysis/yt/commits/2b8ee16dcdc6/
Changeset:   2b8ee16dcdc6
Branch:      yt
User:        jzuhone
Date:        2015-12-09 05:26:44+00:00
Summary:     Updating the magnetic field test
Affected #:  1 file

diff -r 8fb1df8031abd8c84b6d83f5d6ce3cc4233d01b2 -r 2b8ee16dcdc6d33b28e4521cd67a2675803afa5b yt/fields/tests/test_magnetic_fields.py
--- a/yt/fields/tests/test_magnetic_fields.py
+++ b/yt/fields/tests/test_magnetic_fields.py
@@ -1,6 +1,7 @@
 import numpy as np
 from yt.utilities.physical_constants import mu_0
 from yt.testing import assert_almost_equal, fake_random_ds
+from yt.frontends.stream.api import load_uniform_grid
 
 def setup():
     from yt.config import ytcfg
@@ -8,40 +9,44 @@
 
 def test_magnetic_fields():
 
-    fields = ("magnetic_field_x", "magnetic_field_y", "magnetic_field_z")
-    cgs_units = ("gauss",)*3
-    mks_units = ("T",)*3
+    ddims = (16,16,16)
+    data1 = {"magnetic_field_x":(np.random.random(size=ddims),"T"),
+             "magnetic_field_y":(np.random.random(size=ddims),"T"),
+             "magnetic_field_z":(np.random.random(size=ddims),"T")}
+    data2 = {}
+    for field in data1:
+        data2[field] = (data1[field][0]*1.0e4, "gauss")
 
-    ds_cgs = fake_random_ds(16, fields=fields, units=cgs_units, nprocs=4)
-    ds_mks = fake_random_ds(16, fields=fields, units=mks_units, nprocs=4,
-                            unit_system="mks")
+    ds1 = load_uniform_grid(data1, ddims, unit_system="cgs")
+    ds2 = load_uniform_grid(data2, ddims, unit_system="mks")
 
-    ds_cgs.index
-    ds_mks.index
+    ds1.index
+    ds2.index
 
-    dd_cgs = ds_cgs.all_data()
-    dd_mks = ds_mks.all_data()
+    dd1 = ds1.all_data()
+    dd2 = ds2.all_data()
 
-    assert ds_cgs.field_info["gas","magnetic_field_strength"].units == "gauss"
-    assert ds_cgs.field_info["gas","magnetic_field_poloidal"].units == "gauss"
-    assert ds_cgs.field_info["gas","magnetic_field_toroidal"].units == "gauss"
-    assert ds_mks.field_info["gas","magnetic_field_strength"].units == "T"
-    assert ds_mks.field_info["gas","magnetic_field_poloidal"].units == "T"
-    assert ds_mks.field_info["gas","magnetic_field_toroidal"].units == "T"
+    assert ds1.fields.gas.magnetic_field_strength.units == "gauss"
+    assert ds1.fields.gas.magnetic_field_poloidal.units == "gauss"
+    assert ds1.fields.gas.magnetic_field_toroidal.units == "gauss"
+    assert ds2.fields.gas.magnetic_field_strength.units == "T"
+    assert ds2.fields.gas.magnetic_field_poloidal.units == "T"
+    assert ds2.fields.gas.magnetic_field_toroidal.units == "T"
 
-    emag_cgs = (dd_cgs["magnetic_field_x"]**2 +
-                dd_cgs["magnetic_field_y"]**2 +
-                dd_cgs["magnetic_field_z"]**2)/(8.0*np.pi)
-    emag_cgs.convert_to_units("dyne/cm**2")
+    emag1 = (dd1["magnetic_field_x"]**2 +
+             dd1["magnetic_field_y"]**2 +
+             dd1["magnetic_field_z"]**2)/(8.0*np.pi)
+    emag1.convert_to_units("dyne/cm**2")
 
-    emag_mks = (dd_mks["magnetic_field_x"]**2 +
-                dd_mks["magnetic_field_y"]**2 +
-                dd_mks["magnetic_field_z"]**2)/(2.0*mu_0)
-    emag_mks.convert_to_units("Pa")
+    emag2 = (dd2["magnetic_field_x"]**2 +
+             dd2["magnetic_field_y"]**2 +
+             dd2["magnetic_field_z"]**2)/(2.0*mu_0)
+    emag2.convert_to_units("Pa")
 
-    yield assert_almost_equal, emag_cgs, dd_cgs["magnetic_energy"]
-    yield assert_almost_equal, emag_mks, dd_mks["magnetic_energy"]
+    yield assert_almost_equal, emag1, dd1["magnetic_energy"]
+    yield assert_almost_equal, emag2, dd2["magnetic_energy"]
 
-    assert str(emag_cgs.units) == str(dd_cgs["magnetic_energy"].units)
-    assert str(emag_mks.units) == str(dd_mks["magnetic_energy"].units)
+    assert str(emag1.units) == str(dd1["magnetic_energy"].units)
+    assert str(emag2.units) == str(dd2["magnetic_energy"].units)
 
+    yield assert_almost_equal, emag1.in_cgs(), emag2.in_cgs()


https://bitbucket.org/yt_analysis/yt/commits/a2bc84de2c06/
Changeset:   a2bc84de2c06
Branch:      yt
User:        jzuhone
Date:        2015-12-09 16:42:37+00:00
Summary:     Always make this
Affected #:  1 file

diff -r 2b8ee16dcdc6d33b28e4521cd67a2675803afa5b -r a2bc84de2c063ac880b40780ea24870a35463f07 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -237,9 +237,8 @@
         self.set_units()
         self._setup_coordinate_handler()
 
-        if unit_system == "code":
-            create_code_unit_system(self)
-            unit_system = str(self)
+        create_code_unit_system(self)
+        unit_system = str(self)
         self.unit_system = unit_system_registry[unit_system]
 
         # Because we need an instantiated class to check the ds's existence in


https://bitbucket.org/yt_analysis/yt/commits/182c1bf6b420/
Changeset:   182c1bf6b420
Branch:      yt
User:        jzuhone
Date:        2015-12-09 16:56:28+00:00
Summary:     That was a mistake
Affected #:  1 file

diff -r a2bc84de2c063ac880b40780ea24870a35463f07 -r 182c1bf6b420b9e60370add05022c990c913dcfa yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -238,7 +238,8 @@
         self._setup_coordinate_handler()
 
         create_code_unit_system(self)
-        unit_system = str(self)
+        if unit_system == "code":
+            unit_system = str(self)
         self.unit_system = unit_system_registry[unit_system]
 
         # Because we need an instantiated class to check the ds's existence in


https://bitbucket.org/yt_analysis/yt/commits/312697c0bc6e/
Changeset:   312697c0bc6e
Branch:      yt
User:        jzuhone
Date:        2015-12-09 16:58:39+00:00
Summary:     Pass in a dataset to get the code units for that dataset returned
Affected #:  1 file

diff -r 182c1bf6b420b9e60370add05022c990c913dcfa -r 312697c0bc6e78eb5ddbda62be1524b498605492 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -420,7 +420,10 @@
         if unit_system == "cgs":
             return yt_base_unit
         else:
-            unit_system = unit_system_registry[unit_system]
+            if unit_system == "code":
+                raise RuntimeError("To convert to a \"code\" unit system, "
+                                   "specify a dataset with \"unit_system\" == ds!")
+            unit_system = unit_system_registry[str(unit_system)]
             units_string = self._get_system_unit_string(unit_system.base_units)
             u = Unit(units_string, registry=self.registry)
             base_value = get_conversion_factor(self, yt_base_unit)[0]


https://bitbucket.org/yt_analysis/yt/commits/c78de7235f90/
Changeset:   c78de7235f90
Branch:      yt
User:        jzuhone
Date:        2015-12-09 17:01:07+00:00
Summary:     Allow datasets to be passed in here as well, though not sure how well it would work
Affected #:  1 file

diff -r 312697c0bc6e78eb5ddbda62be1524b498605492 -r c78de7235f90a366b6f2e25466a9948fa4258f24 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -82,9 +82,7 @@
                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
-        if isinstance(unit_system, string_types):
-            unit_system = unit_system_registry[unit_system]
-        self.unit_system = unit_system
+        self.unit_system = unit_system_registry[str(unit_system)]
 
     def hubble_distance(self):
         r"""


https://bitbucket.org/yt_analysis/yt/commits/dacc32bc3aab/
Changeset:   dacc32bc3aab
Branch:      yt
User:        jzuhone
Date:        2015-12-09 17:10:07+00:00
Summary:     This should fix allowing code-based unit systems in the Cosmology object
Affected #:  1 file

diff -r c78de7235f90a366b6f2e25466a9948fa4258f24 -r dacc32bc3aab1baeab998fa3bf6d1f7842dab9aa yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -72,6 +72,11 @@
         self.omega_matter = omega_matter
         self.omega_lambda = omega_lambda
         self.omega_curvature = omega_curvature
+        self.unit_system = unit_system_registry[str(unit_system)]
+        # The following will only be true if the unit system is
+        # associated with a specific dataset
+        if self.unit_system.registry is not None:
+            unit_registry = self.unit_system.registry
         if unit_registry is None:
             unit_registry = UnitRegistry()
             unit_registry.modify("h", hubble_constant)
@@ -82,7 +87,6 @@
                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
-        self.unit_system = unit_system_registry[str(unit_system)]
 
     def hubble_distance(self):
         r"""
@@ -217,7 +221,7 @@
         """
         
         return (self.comoving_transverse_distance(0, z_f) / (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_units(self.unit_system["volume"])
+                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_units(self.unit_system["length"])
 
     def angular_scale(self, z_i, z_f):
         r"""


https://bitbucket.org/yt_analysis/yt/commits/89573bd16f63/
Changeset:   89573bd16f63
Branch:      yt
User:        jzuhone
Date:        2015-12-09 17:45:05+00:00
Summary:     Make sure we can grab this from the top
Affected #:  1 file

diff -r dacc32bc3aab1baeab998fa3bf6d1f7842dab9aa -r 89573bd16f636a6ebe221b9f024d07fdd4917c68 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -178,3 +178,4 @@
     ortho_find, quartiles, periodic_position
 
 from yt.units.unit_systems import UnitSystem
+from yt.units.unit_object import unit_system_registry


https://bitbucket.org/yt_analysis/yt/commits/89c43906803e/
Changeset:   89c43906803e
Branch:      yt
User:        jzuhone
Date:        2015-12-10 16:06:33+00:00
Summary:     Bail out if we don't have magnetic fields in the dataset
Affected #:  1 file

diff -r 89573bd16f636a6ebe221b9f024d07fdd4917c68 -r 89c43906803e800fff2778fec33a67a47219fb64 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -117,6 +117,8 @@
 def setup_magnetic_field_aliases(registry, ds_ftype, ds_fields, ftype="gas"):
     unit_system = registry.ds.unit_system
     ds_fields = [(ds_ftype, fd) for fd in ds_fields]
+    if ds_fields[0] not in registry:
+        return
     from_units = Unit(registry[ds_fields[0]].units,
                       registry=registry.ds.unit_registry)
     if dimensions.current_mks in unit_system.base_units:


https://bitbucket.org/yt_analysis/yt/commits/082ac82b9931/
Changeset:   082ac82b9931
Branch:      yt
User:        jzuhone
Date:        2015-12-11 04:29:57+00:00
Summary:     Adding geometrized and planck unit systems
Affected #:  4 files

diff -r 89c43906803e800fff2778fec33a67a47219fb64 -r 082ac82b993161b0ad98e8c05c0bea5fcbf82576 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -22,7 +22,8 @@
     kelvin_per_rankine, speed_of_light_cm_per_s, planck_length_cm, \
     planck_charge_esu, planck_energy_erg, planck_mass_grams, \
     planck_temperature_K, planck_time_s, mass_hydrogen_grams, \
-    grams_per_pound, standard_gravity_cm_per_s2, pascal_per_atm
+    grams_per_pound, standard_gravity_cm_per_s2, pascal_per_atm, \
+    newton_constant_cgs
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -142,6 +143,11 @@
     "q_pl": (planck_charge_esu, dimensions.charge_cgs, 0.0, r"q_{\rm{P}}"),
     "E_pl": (planck_energy_erg, dimensions.energy, 0.0, r"E_{\rm{P}}"),
 
+    # Geometrized units
+
+    "M_geom": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"),
+    "L_geom": (newton_constant_cgs*mass_sun_grams/speed_of_light_cm_per_s**2, dimensions.length, 0.0, r"M_\odot"),
+    "T_geom": (newton_constant_cgs*mass_sun_grams/speed_of_light_cm_per_s**3, dimensions.time, 0.0, r"M_\odot"),
 }
 
 # This dictionary formatting from magnitude package, credit to Juan Reyero.

diff -r 89c43906803e800fff2778fec33a67a47219fb64 -r 082ac82b993161b0ad98e8c05c0bea5fcbf82576 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -84,6 +84,8 @@
 cgs_unit_system["pressure"] = "dyne/cm**2"
 cgs_unit_system["force"] = "dyne"
 cgs_unit_system["magnetic_field_cgs"] = "gauss"
+cgs_unit_system["charge_cgs"] = "esu"
+cgs_unit_system["current_cgs"] = "statA"
 
 mks_unit_system = UnitSystem("mks", "m", "kg", "s", "K", "radian",
                              current_mks_unit="A")
@@ -92,6 +94,7 @@
 mks_unit_system["pressure"] = "Pa"
 mks_unit_system["force"] = "N"
 mks_unit_system["magnetic_field_mks"] = "T"
+cgs_unit_system["charge_mks"] = "C"
 
 imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", "R", "radian")
 imperial_unit_system["force"] = "lbf"
@@ -102,4 +105,10 @@
 galactic_unit_system["energy"] = "keV"
 galactic_unit_system["magnetic_field_cgs"] = "uG"
 
-solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr", "K", "radian")
\ No newline at end of file
+solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr", "K", "radian")
+
+geometrized_unit_system = UnitSystem("geometrized", "L_geom", "M_geom", "T_geom", "K", "radian")
+
+planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", "T_pl", "radian")
+cgs_unit_system["energy"] = "E_pl"
+cgs_unit_system["charge_cgs"] = "q_pl"

diff -r 89c43906803e800fff2778fec33a67a47219fb64 -r 082ac82b993161b0ad98e8c05c0bea5fcbf82576 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -21,7 +21,8 @@
     planck_energy_erg, \
     planck_charge_esu, \
     planck_temperature_K, \
-    standard_gravity_cm_per_s2
+    standard_gravity_cm_per_s2, \
+    newton_constant_cgs
 from yt.units.yt_array import YTQuantity
 
 mass_electron_cgs = YTQuantity(mass_electron_grams, 'g')
@@ -63,7 +64,7 @@
 kboltz = boltzmann_constant_cgs
 kb = kboltz
 
-gravitational_constant_cgs = YTQuantity(6.67384e-8, 'cm**3/g/s**2')
+gravitational_constant_cgs = YTQuantity(newton_constant_cgs, 'cm**3/g/s**2')
 gravitational_constant = gravitational_constant_cgs
 G = gravitational_constant_cgs
 

diff -r 89c43906803e800fff2778fec33a67a47219fb64 -r 082ac82b993161b0ad98e8c05c0bea5fcbf82576 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -73,6 +73,7 @@
 # velocities, accelerations
 speed_of_light_cm_per_s = 2.99792458e10
 standard_gravity_cm_per_s2 = 9.80665e2
+newton_constant_cgs = 6.67384e-8
 
 # temperature / energy
 boltzmann_constant_erg_per_K = 1.3806488e-16


https://bitbucket.org/yt_analysis/yt/commits/f0227f5d8feb/
Changeset:   f0227f5d8feb
Branch:      yt
User:        jzuhone
Date:        2015-12-11 04:40:15+00:00
Summary:     Fixing up planck units
Affected #:  3 files

diff -r 082ac82b993161b0ad98e8c05c0bea5fcbf82576 -r f0227f5d8febdac4a521377568f75e6049970cbc yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -23,7 +23,7 @@
     planck_charge_esu, planck_energy_erg, planck_mass_grams, \
     planck_temperature_K, planck_time_s, mass_hydrogen_grams, \
     grams_per_pound, standard_gravity_cm_per_s2, pascal_per_atm, \
-    newton_constant_cgs
+    newton_cgs
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -146,8 +146,8 @@
     # Geometrized units
 
     "M_geom": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"),
-    "L_geom": (newton_constant_cgs*mass_sun_grams/speed_of_light_cm_per_s**2, dimensions.length, 0.0, r"M_\odot"),
-    "T_geom": (newton_constant_cgs*mass_sun_grams/speed_of_light_cm_per_s**3, dimensions.time, 0.0, r"M_\odot"),
+    "L_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**2, dimensions.length, 0.0, r"M_\odot"),
+    "T_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**3, dimensions.time, 0.0, r"M_\odot"),
 }
 
 # This dictionary formatting from magnitude package, credit to Juan Reyero.

diff -r 082ac82b993161b0ad98e8c05c0bea5fcbf82576 -r f0227f5d8febdac4a521377568f75e6049970cbc yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -22,7 +22,8 @@
     planck_charge_esu, \
     planck_temperature_K, \
     standard_gravity_cm_per_s2, \
-    newton_constant_cgs
+    newton_cgs, \
+    planck_cgs
 from yt.units.yt_array import YTQuantity
 
 mass_electron_cgs = YTQuantity(mass_electron_grams, 'g')
@@ -64,11 +65,11 @@
 kboltz = boltzmann_constant_cgs
 kb = kboltz
 
-gravitational_constant_cgs = YTQuantity(newton_constant_cgs, 'cm**3/g/s**2')
+gravitational_constant_cgs = YTQuantity(newton_cgs, 'cm**3/g/s**2')
 gravitational_constant = gravitational_constant_cgs
 G = gravitational_constant_cgs
 
-planck_constant_cgs = YTQuantity(6.62606957e-27, 'erg*s')
+planck_constant_cgs = YTQuantity(planck_cgs, 'erg*s')
 planck_constant = planck_constant_cgs
 hcgs = planck_constant_cgs
 hbar = 0.5*hcgs/pi

diff -r 082ac82b993161b0ad98e8c05c0bea5fcbf82576 -r f0227f5d8febdac4a521377568f75e6049970cbc yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -1,3 +1,4 @@
+import numpy as np
 #
 # Physical Constants and Units Conversion Factors
 #
@@ -73,7 +74,10 @@
 # velocities, accelerations
 speed_of_light_cm_per_s = 2.99792458e10
 standard_gravity_cm_per_s2 = 9.80665e2
-newton_constant_cgs = 6.67384e-8
+
+# some constants
+newton_cgs = 6.67384e-8
+planck_cgs = 6.62606957e-27
 
 # temperature / energy
 boltzmann_constant_erg_per_K = 1.3806488e-16
@@ -116,12 +120,13 @@
 TINY = 1.0e-40
 
 # Planck units
-planck_mass_grams = 2.17650925245e-05
-planck_length_cm = 1.6161992557e-33
+hbar_cgs = 0.5*planck_cgs/np.pi
+planck_mass_grams = np.sqrt(hbar_cgs*speed_of_light_cm_per_s/newton_cgs)
+planck_length_cm = np.sqrt(hbar_cgs*newton_cgs/speed_of_light_cm_per_s**3)
 planck_time_s = planck_length_cm / speed_of_light_cm_per_s
 planck_energy_erg = planck_mass_grams * speed_of_light_cm_per_s * speed_of_light_cm_per_s
 planck_temperature_K = planck_energy_erg / boltzmann_constant_erg_per_K
-planck_charge_esu = 5.62274532302e-09
+planck_charge_esu = np.sqrt(hbar_cgs*speed_of_light_cm_per_s)
 
 # Imperial and other non-metric units
 grams_per_pound = 453.59237


https://bitbucket.org/yt_analysis/yt/commits/bd77557e51bc/
Changeset:   bd77557e51bc
Branch:      yt
User:        jzuhone
Date:        2015-12-11 04:41:05+00:00
Summary:     No space here
Affected #:  1 file

diff -r f0227f5d8febdac4a521377568f75e6049970cbc -r bd77557e51bcb813efd273dac8d397b824192762 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -144,7 +144,6 @@
     "E_pl": (planck_energy_erg, dimensions.energy, 0.0, r"E_{\rm{P}}"),
 
     # Geometrized units
-
     "M_geom": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"),
     "L_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**2, dimensions.length, 0.0, r"M_\odot"),
     "T_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**3, dimensions.time, 0.0, r"M_\odot"),


https://bitbucket.org/yt_analysis/yt/commits/a4dc91a5c537/
Changeset:   a4dc91a5c537
Branch:      yt
User:        jzuhone
Date:        2015-12-11 04:53:58+00:00
Summary:     Give unit systems access to constants in their units
Affected #:  1 file

diff -r bd77557e51bcb813efd273dac8d397b824192762 -r a4dc91a5c537cb983cb0179a212906b10c71edb9 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -14,6 +14,20 @@
 from yt.extern.six import string_types
 from yt.units import dimensions
 from yt.units.unit_object import Unit, unit_system_registry
+from yt.utilities import physical_constants as pc
+
+class UnitSystemConstants(object):
+    def __init__(self, name):
+        self.name = name
+
+    def __repr__(self):
+        return "Physical constants in %s units." % self.name
+
+    def __str__(self):
+        return self.name
+
+    def __getattr__(self, item):
+        return getattr(pc, item).in_base(self.name)
 
 class UnitSystem(object):
     def __init__(self, name, length_unit, mass_unit, time_unit,
@@ -33,6 +47,7 @@
         self.base_units = self.units_map.copy()
         unit_system_registry[name] = self
         self.name = name
+        self.constants = UnitSystemConstants(self.name)
 
     def __getitem__(self, key):
         if isinstance(key, string_types):


https://bitbucket.org/yt_analysis/yt/commits/c2df48e42ce5/
Changeset:   c2df48e42ce5
Branch:      yt
User:        jzuhone
Date:        2015-12-11 06:33:39+00:00
Summary:     Fixing broken tests
Affected #:  1 file

diff -r a4dc91a5c537cb983cb0179a212906b10c71edb9 -r c2df48e42ce521e150142830ab35ce2b59d168e2 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -72,11 +72,6 @@
         self.omega_matter = omega_matter
         self.omega_lambda = omega_lambda
         self.omega_curvature = omega_curvature
-        self.unit_system = unit_system_registry[str(unit_system)]
-        # The following will only be true if the unit system is
-        # associated with a specific dataset
-        if self.unit_system.registry is not None:
-            unit_registry = self.unit_system.registry
         if unit_registry is None:
             unit_registry = UnitRegistry()
             unit_registry.modify("h", hubble_constant)
@@ -87,6 +82,12 @@
                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
+        # This is a hack, but we have problems with the unit registry
+        # if we don't do it
+        unit_system = unit_system_registry[str(unit_system)]
+        self.unit_system = {}
+        for dim in ["length","volume","density","time","angle","frequency"]:
+            self.unit_system[dim] = str(unit_system[dim])
 
     def hubble_distance(self):
         r"""


https://bitbucket.org/yt_analysis/yt/commits/936191172b14/
Changeset:   936191172b14
Branch:      yt
User:        jzuhone
Date:        2015-12-11 20:22:03+00:00
Summary:     These should be lowercase in case we add T_geom later
Affected #:  2 files

diff -r c2df48e42ce521e150142830ab35ce2b59d168e2 -r 936191172b1437490a5c19fe47250cb0436809ea yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -144,9 +144,9 @@
     "E_pl": (planck_energy_erg, dimensions.energy, 0.0, r"E_{\rm{P}}"),
 
     # Geometrized units
-    "M_geom": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"),
-    "L_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**2, dimensions.length, 0.0, r"M_\odot"),
-    "T_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**3, dimensions.time, 0.0, r"M_\odot"),
+    "m_geom": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"),
+    "l_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**2, dimensions.length, 0.0, r"M_\odot"),
+    "t_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**3, dimensions.time, 0.0, r"M_\odot"),
 }
 
 # This dictionary formatting from magnitude package, credit to Juan Reyero.

diff -r c2df48e42ce521e150142830ab35ce2b59d168e2 -r 936191172b1437490a5c19fe47250cb0436809ea yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -122,7 +122,7 @@
 
 solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr", "K", "radian")
 
-geometrized_unit_system = UnitSystem("geometrized", "L_geom", "M_geom", "T_geom", "K", "radian")
+geometrized_unit_system = UnitSystem("geometrized", "l_geom", "m_geom", "t_geom", "K", "radian")
 
 planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", "T_pl", "radian")
 cgs_unit_system["energy"] = "E_pl"


https://bitbucket.org/yt_analysis/yt/commits/a012a2ef90ac/
Changeset:   a012a2ef90ac
Branch:      yt
User:        jzuhone
Date:        2015-12-12 14:37:39+00:00
Summary:     Fixing bugs
Affected #:  2 files

diff -r 936191172b1437490a5c19fe47250cb0436809ea -r a012a2ef90ac819b2c55313122f17a03d0dffe77 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -133,9 +133,8 @@
             if num_cells == 0:
                 continue
             vol = chunk["cell_volume"].in_cgs().v
-            EM = (chunk["density"]/mp).v**2
+            EM = (chunk["density"]/mp).in_cgs().v**2
             EM *= 0.5*(1.+self.X_H)*self.X_H*vol
-            EM.convert_to_cgs()
 
             if isinstance(self.Zmet, string_types):
                 metalZ = chunk[self.Zmet].v

diff -r 936191172b1437490a5c19fe47250cb0436809ea -r a012a2ef90ac819b2c55313122f17a03d0dffe77 yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -17,7 +17,6 @@
 from yt.convenience import load
 from yt.testing import assert_almost_equal, requires_file
 from yt.config import ytcfg
-import numpy as np
 
 def test_unit_systems():
     goofy_unit_system = UnitSystem("goofy", "ly", "lbm", "hr", "R",


https://bitbucket.org/yt_analysis/yt/commits/81faae396527/
Changeset:   81faae396527
Branch:      yt
User:        jzuhone
Date:        2015-12-12 14:51:47+00:00
Summary:     Best if we avoid usage of in_units(unit_system[dim])
Affected #:  4 files

diff -r a012a2ef90ac819b2c55313122f17a03d0dffe77 -r 81faae396527efdce7850ce6bb5540c96616b370 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -20,8 +20,8 @@
 
 def get_radius(data, field_prefix):
     unit_system = data.ds.unit_system
-    center = data.get_field_parameter("center").in_units(unit_system["length"])
-    DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).in_units(unit_system["length"])
+    center = data.get_field_parameter("center").in_base(unit_system.name)
+    DW = (data.ds.domain_right_edge - data.ds.domain_left_edge).in_base(unit_system.name)
     # This is in cm**2 so it can be the destination for our r later.
     radius2 = data.ds.arr(np.zeros(data[field_prefix+"x"].shape,
                          dtype='float64'), 'cm**2')
@@ -31,7 +31,7 @@
     for i, ax in enumerate('xyz'):
         # This will coerce the units, so we don't need to worry that we copied
         # it from a cm**2 array.
-        np.subtract(data["%s%s" % (field_prefix, ax)].in_units(unit_system["length"]),
+        np.subtract(data["%s%s" % (field_prefix, ax)].in_base(unit_system.name),
                     center[i], r)
         if data.ds.periodicity[i] is True:
             np.abs(r, r)

diff -r a012a2ef90ac819b2c55313122f17a03d0dffe77 -r 81faae396527efdce7850ce6bb5540c96616b370 yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -107,7 +107,7 @@
         parameter.
         """
         coords = get_periodic_rvec(data)
-        return data.ds.arr(get_sph_r(coords), "code_length").in_units(unit_system["length"])
+        return data.ds.arr(get_sph_r(coords), "code_length").in_base(unit_system.name)
 
     registry.add_field(("index", "spherical_radius"),
                        function=_spherical_radius,
@@ -169,7 +169,7 @@
         """
         normal = data.get_field_parameter("normal")
         coords = get_periodic_rvec(data)
-        return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_units(unit_system["length"])
+        return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_base(unit_system.name)
 
     registry.add_field(("index", "cylindrical_radius"),
                        function=_cylindrical_radius,
@@ -194,7 +194,7 @@
         """
         normal = data.get_field_parameter("normal")
         coords = get_periodic_rvec(data)
-        return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_units(unit_system["length"])
+        return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_base(unit_system.name)
 
     registry.add_field(("index", "cylindrical_z"),
                        function=_cylindrical_z,

diff -r a012a2ef90ac819b2c55313122f17a03d0dffe77 -r 81faae396527efdce7850ce6bb5540c96616b370 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -780,8 +780,8 @@
     def _vol_weight(field, data):
         pos = data[ptype, coord_name]
         pos = pos.convert_to_units("code_length")
-        mass = data[ptype, mass_name].in_units(unit_system["mass"])
-        dens = data[ptype, density_name].in_units(unit_system["density"])
+        mass = data[ptype, mass_name].in_base(unit_system.name)
+        dens = data[ptype, density_name].in_base(unit_system.name)
         quan = data[ptype, smoothed_field]
         quan = quan.convert_to_units(field_units)
 
@@ -806,7 +806,7 @@
         rv[np.isnan(rv)] = 0.0
         # Now some quick unit conversions.
         # This should be used when seeking a non-normalized value:
-        rv /= hsml.uq**3 / hsml.uq.in_units(unit_system["length"]).uq**3
+        rv /= hsml.uq**3 / hsml.uq.in_base(unit_system.name).uq**3
         rv = data.apply_units(rv, field_units)
         return rv
     registry.add_field(field_name, function = _vol_weight,

diff -r a012a2ef90ac819b2c55313122f17a03d0dffe77 -r 81faae396527efdce7850ce6bb5540c96616b370 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -82,19 +82,14 @@
                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
-        # This is a hack, but we have problems with the unit registry
-        # if we don't do it
-        unit_system = unit_system_registry[str(unit_system)]
-        self.unit_system = {}
-        for dim in ["length","volume","density","time","angle","frequency"]:
-            self.unit_system[dim] = str(unit_system[dim])
+        self.unit_system = unit_system
 
     def hubble_distance(self):
         r"""
         The distance corresponding to c / h, where c is the speed of light 
         and h is the Hubble parameter in units of 1 / time.
         """
-        return self.quan((speed_of_light_cgs / self.hubble_constant)).in_units(self.unit_system["length"])
+        return self.quan((speed_of_light_cgs / self.hubble_constant)).in_base(self.unit_system)
 
     def comoving_radial_distance(self, z_i, z_f):
         r"""
@@ -116,7 +111,7 @@
         
         """
         return (self.hubble_distance() *
-                trapzint(self.inverse_expansion_factor, z_i, z_f)).in_units(self.unit_system["length"])
+                trapzint(self.inverse_expansion_factor, z_i, z_f)).in_base(self.unit_system)
 
     def comoving_transverse_distance(self, z_i, z_f):
         r"""
@@ -142,13 +137,13 @@
             return (self.hubble_distance() / np.sqrt(self.omega_curvature) * 
                     np.sinh(np.sqrt(self.omega_curvature) * 
                             self.comoving_radial_distance(z_i, z_f) /
-                            self.hubble_distance())).in_units(self.unit_system["length"])
+                            self.hubble_distance())).in_base(self.unit_system)
         elif (self.omega_curvature < 0):
             return (self.hubble_distance() /
                     np.sqrt(np.fabs(self.omega_curvature)) * 
                     np.sin(np.sqrt(np.fabs(self.omega_curvature)) * 
                            self.comoving_radial_distance(z_i, z_f) /
-                           self.hubble_distance())).in_units(self.unit_system["length"])
+                           self.hubble_distance())).in_base(self.unit_system)
         else:
             return self.comoving_radial_distance(z_i, z_f)
 
@@ -183,7 +178,7 @@
                       np.sinh(np.fabs(self.omega_curvature) * 
                             self.comoving_transverse_distance(z_i, z_f) /
                             self.hubble_distance()) /
-                            np.sqrt(self.omega_curvature))).in_units(self.unit_system["volume"])
+                            np.sqrt(self.omega_curvature))).in_base(self.unit_system)
         elif (self.omega_curvature < 0):
              return (2 * np.pi * np.power(self.hubble_distance(), 3) /
                      np.fabs(self.omega_curvature) * 
@@ -195,11 +190,11 @@
                       np.arcsin(np.fabs(self.omega_curvature) * 
                            self.comoving_transverse_distance(z_i, z_f) /
                            self.hubble_distance()) /
-                      np.sqrt(np.fabs(self.omega_curvature)))).in_units(self.unit_system["volume"])
+                      np.sqrt(np.fabs(self.omega_curvature)))).in_base(self.unit_system)
         else:
              return (4 * np.pi *
                      np.power(self.comoving_transverse_distance(z_i, z_f), 3) /\
-                     3).in_units(self.unit_system["volume"])
+                     3).in_base(self.unit_system)
 
     def angular_diameter_distance(self, z_i, z_f):
         r"""
@@ -222,7 +217,7 @@
         """
         
         return (self.comoving_transverse_distance(0, z_f) / (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_units(self.unit_system["length"])
+                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_base(self.unit_system)
 
     def angular_scale(self, z_i, z_f):
         r"""
@@ -246,7 +241,7 @@
 
         scale = self.angular_diameter_distance(z_i, z_f) / \
           self.quan(1, "radian")
-        return scale.in_units(self.unit_system["length"]/self.unit_system["angle"])
+        return scale.in_base(self.unit_system)
     
     def luminosity_distance(self, z_i, z_f):
         r"""
@@ -269,7 +264,7 @@
         """
 
         return (self.comoving_transverse_distance(0, z_f) * (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) * (1 + z_i)).in_units(self.unit_system["length"])
+                self.comoving_transverse_distance(0, z_i) * (1 + z_i)).in_base(self.unit_system)
 
     def lookback_time(self, z_i, z_f):
         r"""
@@ -291,7 +286,7 @@
 
         """
         return (trapzint(self.age_integrand, z_i, z_f) / \
-                self.hubble_constant).in_units(self.unit_system["time"])
+                self.hubble_constant).in_base(self.unit_system)
     
     def hubble_time(self, z, z_inf=1e6):
         r"""
@@ -318,7 +313,7 @@
 
         """
         return (trapzint(self.age_integrand, z, z_inf) /
-                self.hubble_constant).in_units(self.unit_system["time"])
+                self.hubble_constant).in_base(self.unit_system)
 
     def critical_density(self, z):
         r"""
@@ -341,7 +336,7 @@
         return (3.0 / 8.0 / np.pi * 
                 self.hubble_constant**2 / G *
                 ((1 + z)**3.0 * self.omega_matter + 
-                 self.omega_lambda)).in_units(self.unit_system["density"])
+                 self.omega_lambda)).in_base(self.unit_system)
 
     def hubble_parameter(self, z):
         r"""
@@ -359,7 +354,7 @@
         >>> print co.hubble_parameter(1.0).in_units("km/s/Mpc")
 
         """
-        return self.hubble_constant.in_units(self.unit_system["frequency"]) * self.expansion_factor(z)
+        return self.hubble_constant.in_base(self.unit_system) * self.expansion_factor(z)
 
     def age_integrand(self, z):
         return (1 / (z + 1) / self.expansion_factor(z))
@@ -535,7 +530,7 @@
   
         my_time = t0 / self.hubble_constant
     
-        return my_time.in_units(self.unit_system["time"])
+        return my_time.in_base(self.unit_system)
 
     _arr = None
     @property


https://bitbucket.org/yt_analysis/yt/commits/089c5fdbf299/
Changeset:   089c5fdbf299
Branch:      yt
User:        jzuhone
Date:        2015-12-12 18:21:25+00:00
Summary:     Make specification of temperature and angle units optional, defaulting to "K" and "rad", respectively.
Affected #:  2 files

diff -r 81faae396527efdce7850ce6bb5540c96616b370 -r 089c5fdbf299534f853ae57df98e11718d0fd3cc yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -19,8 +19,8 @@
 from yt.config import ytcfg
 
 def test_unit_systems():
-    goofy_unit_system = UnitSystem("goofy", "ly", "lbm", "hr", "R",
-                                   "arcsec", current_mks_unit="mA")
+    goofy_unit_system = UnitSystem("goofy", "ly", "lbm", "hr", temperature_unit="R",
+                                   angle_unit="arcsec", current_mks_unit="mA")
     assert goofy_unit_system["temperature"] == Unit("R")
     assert goofy_unit_system[dimensions.solid_angle] == Unit("arcsec**2")
     assert goofy_unit_system["energy"] == Unit("lbm*ly**2/hr**2")

diff -r 81faae396527efdce7850ce6bb5540c96616b370 -r 089c5fdbf299534f853ae57df98e11718d0fd3cc yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -31,9 +31,13 @@
 
 class UnitSystem(object):
     def __init__(self, name, length_unit, mass_unit, time_unit,
-                 temperature_unit, angle_unit, current_mks_unit=None,
+                 temperature_unit=None, angle_unit=None, current_mks_unit=None,
                  registry=None):
         self.registry = registry
+        if temperature_unit is None:
+            temperature_unit = "K"
+        if angle_unit is None:
+            angle_unit = "rad"
         self.units_map = {dimensions.length: Unit(length_unit, registry=self.registry),
                           dimensions.mass: Unit(mass_unit, registry=self.registry),
                           dimensions.time: Unit(time_unit, registry=self.registry),
@@ -90,10 +94,10 @@
 
 def create_code_unit_system(ds):
     code_unit_system = UnitSystem(str(ds), "code_length", "code_mass", "code_time",
-                                  "code_temperature", "radian", registry=ds.unit_registry)
+                                  "code_temperature", registry=ds.unit_registry)
     code_unit_system["velocity"] = "code_velocity"
 
-cgs_unit_system = UnitSystem("cgs", "cm", "g", "s", "K", "radian")
+cgs_unit_system = UnitSystem("cgs", "cm", "g", "s")
 cgs_unit_system["energy"] = "erg"
 cgs_unit_system["specific_energy"] = "erg/g"
 cgs_unit_system["pressure"] = "dyne/cm**2"
@@ -102,8 +106,7 @@
 cgs_unit_system["charge_cgs"] = "esu"
 cgs_unit_system["current_cgs"] = "statA"
 
-mks_unit_system = UnitSystem("mks", "m", "kg", "s", "K", "radian",
-                             current_mks_unit="A")
+mks_unit_system = UnitSystem("mks", "m", "kg", "s", current_mks_unit="A")
 mks_unit_system["energy"] = "J"
 mks_unit_system["specific_energy"] = "J/kg"
 mks_unit_system["pressure"] = "Pa"
@@ -111,19 +114,19 @@
 mks_unit_system["magnetic_field_mks"] = "T"
 cgs_unit_system["charge_mks"] = "C"
 
-imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", "R", "radian")
+imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", temperature_unit="R")
 imperial_unit_system["force"] = "lbf"
 imperial_unit_system["energy"] = "ft*lbf"
 imperial_unit_system["pressure"] = "lbf/ft**2"
 
-galactic_unit_system = UnitSystem("galactic", "kpc", "Msun", "Myr", "K", "radian")
+galactic_unit_system = UnitSystem("galactic", "kpc", "Msun", "Myr")
 galactic_unit_system["energy"] = "keV"
 galactic_unit_system["magnetic_field_cgs"] = "uG"
 
-solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr", "K", "radian")
+solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr")
 
-geometrized_unit_system = UnitSystem("geometrized", "l_geom", "m_geom", "t_geom", "K", "radian")
+geometrized_unit_system = UnitSystem("geometrized", "l_geom", "m_geom", "t_geom")
 
-planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", "T_pl", "radian")
+planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", temperature_unit="T_pl")
 cgs_unit_system["energy"] = "E_pl"
 cgs_unit_system["charge_cgs"] = "q_pl"


https://bitbucket.org/yt_analysis/yt/commits/4b35d16f3256/
Changeset:   4b35d16f3256
Branch:      yt
User:        jzuhone
Date:        2015-12-12 19:19:02+00:00
Summary:     Fixing flake8
Affected #:  3 files

diff -r 089c5fdbf299534f853ae57df98e11718d0fd3cc -r 4b35d16f3256f3693ab84aff4637d9c8e6572bc6 yt/fields/tests/test_magnetic_fields.py
--- a/yt/fields/tests/test_magnetic_fields.py
+++ b/yt/fields/tests/test_magnetic_fields.py
@@ -1,6 +1,6 @@
 import numpy as np
 from yt.utilities.physical_constants import mu_0
-from yt.testing import assert_almost_equal, fake_random_ds
+from yt.testing import assert_almost_equal
 from yt.frontends.stream.api import load_uniform_grid
 
 def setup():

diff -r 089c5fdbf299534f853ae57df98e11718d0fd3cc -r 4b35d16f3256f3693ab84aff4637d9c8e6572bc6 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -29,7 +29,7 @@
     isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
     modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs
 
-from yt.units.unit_object import Unit, UnitParseError, unit_system_registry
+from yt.units.unit_object import Unit, UnitParseError
 from yt.units.unit_registry import UnitRegistry
 from yt.units.dimensions import dimensionless, current_mks, em_dimensions
 from yt.utilities.exceptions import \

diff -r 089c5fdbf299534f853ae57df98e11718d0fd3cc -r 4b35d16f3256f3693ab84aff4637d9c8e6572bc6 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -17,14 +17,12 @@
 import functools
 import numpy as np
 
-from yt.extern.six import string_types
 from yt.units import dimensions
 from yt.units.unit_registry import \
      UnitRegistry
 from yt.units.yt_array import \
      YTArray, \
      YTQuantity
-from yt.units.unit_object import unit_system_registry
 
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs as G, \


https://bitbucket.org/yt_analysis/yt/commits/99984cd16b48/
Changeset:   99984cd16b48
Branch:      yt
User:        jzuhone
Date:        2015-12-14 23:32:15+00:00
Summary:     Revert unnecessary change
Affected #:  1 file

diff -r 4b35d16f3256f3693ab84aff4637d9c8e6572bc6 -r 99984cd16b487eaa036cce930095bf4c42678fed yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -710,7 +710,7 @@
             # setting plot aspect.
             if self.aspect is None:
                 self.aspect = float((self.ds.quan(1.0, unit_y) /
-                                     self.ds.quan(1.0, unit_x)))
+                                     self.ds.quan(1.0, unit_x)).in_cgs())
 
             extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
             extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
@@ -1672,7 +1672,7 @@
         self._cb_size = 0.0375*fsize
         self._ax_text_size = [1.2*fontscale, 0.9*fontscale]
         self._top_buff_size = 0.30*fontscale
-        self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2]))
+        self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2])).in_cgs()
         self._unit_aspect = aspect
 
         size, axrect, caxrect = self._get_best_layout()


https://bitbucket.org/yt_analysis/yt/commits/f785bd83721b/
Changeset:   f785bd83721b
Branch:      yt
User:        jzuhone
Date:        2015-12-15 19:47:26+00:00
Summary:     Merge
Affected #:  20 files

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -22,4 +22,21 @@
 ngoldbau at ucsc.edu = goldbaum at ucolick.org
 biondo at wisc.edu = Biondo at wisc.edu
 samgeen at googlemail.com = samgeen at gmail.com
-fbogert = fbogert at ucsc.edu
\ No newline at end of file
+fbogert = fbogert at ucsc.edu
+bwoshea = oshea at msu.edu
+mornkr at slac.stanford.edu = me at jihoonkim.org
+kbarrow = kssbarrow at gatech.edu
+kssbarrow at gmail.com = kssbarrow at gatech.edu
+kassbarrow at gmail.com = kssbarrow at gatech.edu
+antoine.strugarek at cea.fr = strugarek at astro.umontreal.ca
+rosen at ucolick.org = alrosen at ucsc.edu
+jzuhone = jzuhone at gmail.com
+karraki at nmsu.edu = karraki at gmail.com
+hckr at eml.cc = astrohckr at gmail.com
+julian3 at illinois.edu = astrohckr at gmail.com
+cosmosquark = bthompson2090 at gmail.com
+chris.m.malone at lanl.gov = chris.m.malone at gmail.com
+jnaiman at ucolick.org = jnaiman
+migueld.deval = miguel at archlinux.net
+slevy at ncsa.illinois.edu = salevy at illinois.edu
+malzraa at gmail.com = kellerbw at mcmaster.ca
\ No newline at end of file

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -4,20 +4,30 @@
                 Tom Abel (tabel at stanford.edu)
                 Gabriel Altay (gabriel.altay at gmail.com)
                 Kenza Arraki (karraki at gmail.com)
+                Kirk Barrow (kssbarrow at gatech.edu)
+                Ricarda Beckmann (Ricarda.Beckmann at astro.ox.ac.uk)
                 Elliott Biondo (biondo at wisc.edu)
                 Alex Bogert (fbogert at ucsc.edu)
+                André-Patrick Bubel (code at andre-bubel.de)
                 Pengfei Chen (madcpf at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
                 Miguel de Val-Borro (miguel.deval at gmail.com)
+                Bili Dong (qobilidop at gmail.com)
+                Nicholas Earl (nchlsearl at gmail.com)
                 Hilary Egan (hilaryye at gmail.com)
+                Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
+                Adam Ginsburg (keflavich at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
+                William Gray (graywilliamj at gmail.com)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
+                Anni Järvenpää (anni.jarvenpaa at gmail.com)
+                Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
@@ -25,11 +35,15 @@
                 Kacper Kowalik (xarthisius.kk at gmail.com)
                 Mark Krumholz (mkrumhol at ucsc.edu)
                 Michael Kuhlen (mqk at astro.berkeley.edu)
+                Meagan Lang (langmm.astro at gmail.com)
+                Doris Lee (dorislee at berkeley.edu)
                 Eve Lee (elee at cita.utoronto.ca)
                 Sam Leitner (sam.leitner at gmail.com)
+                Stuart Levy (salevy at illinois.edu)
                 Yuan Li (yuan at astro.columbia.edu)
                 Chris Malone (chris.m.malone at gmail.com)
                 Josh Maloney (joshua.moloney at colorado.edu)
+                Jonah Miller (jonah.maxwell.miller at gmail.com)
                 Chris Moody (cemoody at ucsc.edu)
                 Stuart Mumford (stuart at mumford.me.uk)
                 Andrew Myers (atmyers at astro.berkeley.edu)
@@ -44,6 +58,7 @@
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
+                Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
@@ -59,6 +74,7 @@
                 Ji Suoqing (jisuoqing at gmail.com)
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
                 Benjamin Thompson (bthompson2090 at gmail.com)
+                Robert Thompson (rthompsonj at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1344,7 +1344,8 @@
 
     def _retrieve_halos(self):
         # First get the halo particulars.
-        lines = file("%s.out" % self.basename)
+        with open("%s.out" % self.basename, 'r') as fh:
+            lines = fh.readlines()
         # The location of particle data for each halo.
         locations = self._collect_halo_data_locations()
         halo = 0
@@ -1395,7 +1396,8 @@
 
     def _collect_halo_data_locations(self):
         # The halos are listed in order in the file.
-        lines = file("%s.txt" % self.basename)
+        with open("%s.txt" % self.basename, 'r') as fh:
+            lines = fh.readlines()
         locations = []
         realpath = path.realpath("%s.txt" % self.basename)
         for line in lines:
@@ -1408,7 +1410,6 @@
                 item = item.split("/")
                 temp.append(path.join(path.dirname(realpath), item[-1]))
             locations.append(temp)
-        lines.close()
         return locations
 
 class TextHaloList(HaloList):
@@ -1422,7 +1423,8 @@
 
     def _retrieve_halos(self, fname, columns, comment):
         # First get the halo particulars.
-        lines = file(fname)
+        with open(fname, 'r') as fh:
+            lines = fh.readlines()
         halo = 0
         base_set = ['x', 'y', 'z', 'r']
         keys = columns.keys()

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -17,9 +17,11 @@
 import numpy as np
 from functools import wraps
 import fileinput
+import io
 from re import finditer
 from tempfile import TemporaryFile
 import os
+import sys
 import zipfile
 
 from yt.config import ytcfg
@@ -592,20 +594,14 @@
         return tuple(self.ActiveDimensions.tolist())
 
     def _setup_data_source(self):
-        LE = self.left_edge - self.base_dds
-        RE = self.right_edge + self.base_dds
-        if not all(self.ds.periodicity):
-            for i in range(3):
-                if self.ds.periodicity[i]: continue
-                LE[i] = max(LE[i], self.ds.domain_left_edge[i])
-                RE[i] = min(RE[i], self.ds.domain_right_edge[i])
-        self._data_source = self.ds.region(self.center, LE, RE)
+        self._data_source = self.ds.region(self.center,
+            self.left_edge, self.right_edge)
         self._data_source.min_level = 0
         self._data_source.max_level = self.level
-        self._pdata_source = self.ds.region(self.center,
-            self.left_edge, self.right_edge)
-        self._pdata_source.min_level = 0
-        self._pdata_source.max_level = self.level
+        # This triggers "special" behavior in the RegionSelector to ensure we
+        # select *cells* whose bounding boxes overlap with our region, not just
+        # their cell centers.
+        self._data_source.loose_selection = True
 
     def get_data(self, fields = None):
         if fields is None: return
@@ -644,7 +640,7 @@
 
     def _fill_particles(self, part):
         for p in part:
-            self[p] = self._pdata_source[p]
+            self[p] = self._data_source[p]
 
     def _fill_fields(self, fields):
         fields = [f for f in fields if f not in self.field_data]
@@ -1278,14 +1274,13 @@
     def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr,
                            color_field_max, color_field_min, color_field,
                            emit_field_max, emit_field_min, emit_field): # this now holds for obj files
-        from sys import version
         if color_field is not None:
             if color_log: cs = np.log10(cs)
         if emit_field is not None:
             if emit_log: em = np.log10(em)
         if color_field is not None:
             if color_field_min is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     cs = [float(field) for field in cs]
                     cs = np.array(cs)
                 mi = cs.min()
@@ -1293,7 +1288,7 @@
                 mi = color_field_min
                 if color_log: mi = np.log10(mi)
             if color_field_max is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     cs = [float(field) for field in cs]
                     cs = np.array(cs)
                 ma = cs.max()
@@ -1311,7 +1306,7 @@
         # now, get emission
         if emit_field is not None:
             if emit_field_min is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     em = [float(field) for field in em]
                     em = np.array(em)
                 emi = em.min()
@@ -1319,7 +1314,7 @@
                 emi = emit_field_min
                 if emit_log: emi = np.log10(emi)
             if emit_field_max is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     em = [float(field) for field in em]
                     em = np.array(em)
                 ema = em.max()
@@ -1339,15 +1334,9 @@
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
-        from sys import version
-        from io import IOBase
         if plot_index is None:
             plot_index = 0
-        if version < '3':
-            checker = file
-        else:
-            checker = IOBase
-        if isinstance(filename, checker):
+        if isinstance(filename, io.IOBase):
             fobj = filename + '.obj'
             fmtl = filename + '.mtl'
         else:
@@ -1639,7 +1628,7 @@
     @parallel_root_only
     def _export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
-        if isinstance(filename, file):
+        if isinstance(filename, io.IOBase):
             f = filename
         else:
             f = open(filename, "wb")

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/data_objects/region_expression.py
--- a/yt/data_objects/region_expression.py
+++ b/yt/data_objects/region_expression.py
@@ -14,6 +14,7 @@
 from yt.extern.six import string_types
 import weakref
 
+from yt.extern.six import string_types
 from yt.utilities.exceptions import YTDimensionalityError
 
 class RegionExpression(object):

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -1148,5 +1148,5 @@
     def _calculate_offsets(self, fields):
         pass
 
-    def __cmp__(self, other):
-        return cmp(self.filename, other.filename)
+    def __lt__(self, other):
+        return self.filename < other.filename

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -152,10 +152,7 @@
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
         self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
-        if PY2:
-            self._fhandle = file(self.index_filename,'rb')
-        else:
-            self._fhandle = open(self.index_filename,'rb')
+        self._fhandle = open(self.index_filename,'rb')
         GridIndex.__init__(self, ds, dataset_type)
 
         self._fhandle.close()

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/frontends/gadget/simulation_handling.py
--- a/yt/frontends/gadget/simulation_handling.py
+++ b/yt/frontends/gadget/simulation_handling.py
@@ -375,7 +375,7 @@
         else:
             if self.parameters["OutputListOn"]:
                 a_values = [float(a) for a in 
-                           file(self.parameters["OutputListFilename"], "r").readlines()]
+                            open(self.parameters["OutputListFilename"], "r").readlines()]
             else:
                 a_values = [float(self.parameters["TimeOfFirstSnapshot"])]
                 time_max = float(self.parameters["TimeMax"])

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/frontends/owls_subfind/io.py
--- a/yt/frontends/owls_subfind/io.py
+++ b/yt/frontends/owls_subfind/io.py
@@ -42,7 +42,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda f: f.filename):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]
@@ -76,7 +76,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda f: f.filename):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -38,6 +38,10 @@
 from .fields import \
     TipsyFieldInfo
 
+import sys
+if sys.version_info > (3,):
+    long = int
+
 class TipsyFile(ParticleFile):
     def __init__(self, ds, io, filename, file_id):
         # To go above 1 domain, we need to include an indexing step in the

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -16,6 +16,7 @@
 
 import errno
 from yt.extern.six import string_types
+from yt.extern.six.moves import input
 import time
 import inspect
 import traceback
@@ -586,7 +587,7 @@
         data = urllib.parse.urlencode(data)
     req = urllib.request.Request(uri, data)
     if use_pass:
-        username = raw_input("Bitbucket Username? ")
+        username = input("Bitbucket Username? ")
         password = getpass.getpass()
         upw = '%s:%s' % (username, password)
         req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
@@ -604,7 +605,7 @@
         print("*** is a delicate act, I require you to respond   ***")
         print("*** to the prompt with the word 'yes'.            ***")
         print()
-        response = raw_input("Do you want me to try to check it out? ")
+        response = input("Do you want me to try to check it out? ")
         if response != "yes":
             print()
             print("Okay, I understand.  You can check it out yourself.")

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -41,31 +41,27 @@
     cdef int maxn
     cdef int curn
     cdef bint periodicity[3]
-    cdef np.int64_t *doffs
-    cdef np.int64_t *pinds
-    cdef np.int64_t *pcounts
-    cdef np.float64_t *ppos
     # Note that we are preallocating here, so this is *not* threadsafe.
     cdef NeighborList *neighbors
     cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
-                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t dds[3], np.float64_t[:,:] ppos,
                                np.float64_t **fields, 
-                               np.int64_t *doffs, np.int64_t **nind, 
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind, 
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize, np.float64_t *oct_left_edges,
-                               np.float64_t *oct_dds)
+                               int *nsize, np.float64_t[:,:] oct_left_edges,
+                               np.float64_t[:,:] oct_dds)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id, 
                              Oct **oct = ?, int extra_layer = ?)
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
-                               np.float64_t *ppos,
+                               np.float64_t[:,:] ppos,
                                np.float64_t **fields, 
-                               np.int64_t *doffs, np.int64_t **nind, 
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind, 
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
@@ -76,13 +72,13 @@
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
-                            np.int64_t *doffs,
-                            np.int64_t *pcounts,
-                            np.int64_t *pinds,
-                            np.float64_t *ppos,
+                            np.int64_t[:] doffs,
+                            np.int64_t[:] pcounts,
+                            np.int64_t[:] pinds,
+                            np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
-                            np.float64_t* oct_left_edges,
-                            np.float64_t* oct_dds)
+                            np.float64_t[:,:] oct_left_edges,
+                            np.float64_t[:,:] oct_dds)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields)

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -42,6 +42,7 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
+ at cython.initializedcheck(False)
 cdef np.float64_t r2dist(np.float64_t ppos[3],
                          np.float64_t cpos[3],
                          np.float64_t DW[3],
@@ -80,6 +81,7 @@
         self.nvals = nvals
         self.nfields = nfields
         self.maxn = max_neighbors
+
         self.neighbors = <NeighborList *> malloc(
             sizeof(NeighborList) * self.maxn)
         self.neighbor_reset()
@@ -94,16 +96,17 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_octree(self, OctreeContainer mesh_octree,
-                     np.ndarray[np.int64_t, ndim=1] mdom_ind,
-                     np.ndarray[np.float64_t, ndim=2] positions,
-                     np.ndarray[np.float64_t, ndim=2] oct_positions,
+                     np.int64_t [:] mdom_ind,
+                     np.float64_t[:,:] positions,
+                     np.float64_t[:,:] oct_positions,
                      fields = None, int domain_id = -1,
                      int domain_offset = 0,
                      periodicity = (True, True, True),
                      index_fields = None,
                      OctreeContainer particle_octree = None,
-                     np.ndarray[np.int64_t, ndim=1] pdom_ind = None,
+                     np.int64_t [:] pdom_ind = None,
                      geometry = "cartesian"):
         # This will be a several-step operation.
         #
@@ -134,10 +137,10 @@
             pdom_ind = mdom_ind
         cdef int nf, i, j, n
         cdef int dims[3]
+        cdef np.float64_t[:] *field_check
         cdef np.float64_t **field_pointers
         cdef np.float64_t *field_vals
         cdef np.float64_t pos[3]
-        cdef np.float64_t *ppos
         cdef np.float64_t dds[3]
         cdef np.float64_t **octree_field_pointers
         cdef int nsize = 0
@@ -146,15 +149,12 @@
         cdef Oct *oct
         cdef np.int64_t numpart, offset, local_ind, poff
         cdef np.int64_t moff_p, moff_m
-        cdef np.int64_t *doffs
-        cdef np.int64_t *pinds
-        cdef np.int64_t *pcounts
-        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
-        cdef np.ndarray[np.int64_t, ndim=2] doff_m
+        cdef np.int64_t[:] pind, doff, pdoms, pcount
+        cdef np.int64_t[:,:] doff_m
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         cdef np.ndarray[np.float64_t, ndim=4] iarr
-        cdef np.ndarray[np.float64_t, ndim=2] cart_positions
-        cdef np.ndarray[np.float64_t, ndim=2] oct_left_edges, oct_dds
+        cdef np.float64_t[:,:] cart_positions
+        cdef np.float64_t[:,:] oct_left_edges, oct_dds
         cdef OctInfo oinfo
         if geometry == "cartesian":
             self.pos_setup = cart_coord_setup
@@ -245,11 +245,6 @@
         #raise RuntimeError
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
-        ppos = <np.float64_t *> positions.data
-        cart_pos = <np.float64_t *> cart_positions.data
-        doffs = <np.int64_t*> doff.data
-        pinds = <np.int64_t*> pind.data
-        pcounts = <np.int64_t*> pcount.data
         cdef np.ndarray[np.uint8_t, ndim=1] visited
         visited = np.zeros(mdom_ind.shape[0], dtype="uint8")
         cdef int nproc = 0
@@ -263,10 +258,10 @@
             if offset < 0: continue
             nproc += 1
             self.neighbor_process(
-                dims, moi.left_edge, moi.dds, cart_pos, field_pointers, doffs,
-                &nind, pinds, pcounts, offset, index_field_pointers,
-                particle_octree, domain_id, &nsize, &oct_left_edges[0, 0],
-                &oct_dds[0, 0])
+                dims, moi.left_edge, moi.dds, cart_positions, field_pointers, doff,
+                &nind, pind, pcount, offset, index_field_pointers,
+                particle_octree, domain_id, &nsize, oct_left_edges,
+                oct_dds)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -275,6 +270,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_particles(self, OctreeContainer particle_octree,
                      np.ndarray[np.int64_t, ndim=1] pdom_ind,
                      np.ndarray[np.float64_t, ndim=2] positions,
@@ -293,7 +289,6 @@
         cdef int dims[3]
         cdef np.float64_t **field_pointers
         cdef np.float64_t *field_vals
-        cdef np.float64_t *ppos
         cdef np.float64_t dds[3]
         cdef np.float64_t pos[3]
         cdef np.float64_t **octree_field_pointers
@@ -304,10 +299,7 @@
         cdef Oct **neighbors = NULL
         cdef np.int64_t nneighbors, numpart, offset, local_ind
         cdef np.int64_t moff_p, moff_m, pind0, poff
-        cdef np.int64_t *doffs
-        cdef np.int64_t *pinds
-        cdef np.int64_t *pcounts
-        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.int64_t[:] pind, doff, pdoms, pcount
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         cdef np.ndarray[np.float64_t, ndim=2] cart_positions
         if geometry == "cartesian":
@@ -376,11 +368,6 @@
         #raise RuntimeError
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
-        ppos = <np.float64_t *> positions.data
-        cart_pos = <np.float64_t *> cart_positions.data
-        doffs = <np.int64_t*> doff.data
-        pinds = <np.int64_t*> pind.data
-        pcounts = <np.int64_t*> pcount.data
         cdef int maxnei = 0
         cdef int nproc = 0
         for i in range(doff.shape[0]):
@@ -392,8 +379,8 @@
                 pind0 = pind[doff[i] + j]
                 for k in range(3):
                     pos[k] = positions[pind0, k]
-                self.neighbor_process_particle(pos, cart_pos, field_pointers,
-                            doffs, &nind, pinds, pcounts, pind0,
+                self.neighbor_process_particle(pos, cart_positions, field_pointers,
+                            doff, &nind, pind, pcount, pind0,
                             NULL, particle_octree, domain_id, &nsize)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
@@ -468,6 +455,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_grid(self, gobj,
                      np.ndarray[np.float64_t, ndim=2] positions,
                      fields = None):
@@ -524,16 +512,20 @@
         if self.curn < self.maxn:
             self.curn += 1
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
-                            np.int64_t *doffs,
-                            np.int64_t *pcounts,
-                            np.int64_t *pinds,
-                            np.float64_t *ppos,
+                            np.int64_t[:] doffs,
+                            np.int64_t[:] pcounts,
+                            np.int64_t[:] pinds,
+                            np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
-                            np.float64_t *oct_left_edges,
-                            np.float64_t *oct_dds,
+                            np.float64_t[:,:] oct_left_edges,
+                            np.float64_t[:,:] oct_dds,
                             ):
         # We are now given the number of neighbors, the indices into the
         # domains for them, and the number of particles for each.
@@ -545,7 +537,7 @@
             if nind[ni] == -1: continue
             # terminate early if all 8 corners of oct are farther away than
             # most distant currently known neighbor
-            if oct_left_edges != NULL and self.curn == self.maxn:
+            if oct_left_edges != None and self.curn == self.maxn:
                 r2_trunc = self.neighbors[self.curn - 1].r2
                 # iterate over each dimension in the outer loop so we can
                 # consolidate temporary storage
@@ -555,8 +547,8 @@
                 r2 = 0.0
                 for k in range(3):
                     # We start at left edge, then do halfway, then right edge.
-                    ex[0] = oct_left_edges[3*nind[ni] + k]
-                    ex[1] = ex[0] + oct_dds[3*nind[ni] + k]
+                    ex[0] = oct_left_edges[nind[ni], k]
+                    ex[1] = ex[0] + oct_dds[nind[ni], k]
                     # There are three possibilities; we are between, left-of,
                     # or right-of the extrema.  Thanks to
                     # http://stackoverflow.com/questions/5254838/calculating-distance-between-a-point-and-a-rectangular-box-nearest-point
@@ -581,19 +573,23 @@
             for i in range(pc):
                 pn = pinds[offset + i]
                 for j in range(3):
-                    pos[j] = ppos[pn * 3 + j]
+                    pos[j] = ppos[pn, j]
                 self.neighbor_eval(pn, pos, cpos)
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
-                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t dds[3], np.float64_t[:,:] ppos,
                                np.float64_t **fields,
-                               np.int64_t *doffs, np.int64_t **nind,
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t [:] doffs, np.int64_t **nind,
+                               np.int64_t [:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize, np.float64_t *oct_left_edges,
-                               np.float64_t *oct_dds):
+                               int *nsize, np.float64_t[:,:] oct_left_edges,
+                               np.float64_t[:,:] oct_dds):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -626,11 +622,15 @@
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
-                               np.float64_t *ppos,
+                               np.float64_t[:,:] ppos,
                                np.float64_t **fields,
-                               np.int64_t *doffs, np.int64_t **nind,
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind,
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree,
@@ -649,7 +649,7 @@
         nneighbors = self.neighbor_search(opos, octree,
                         nind, nsize, nneighbors, domain_id, &oct, 0)
         self.neighbor_find(nneighbors, nind[0], doffs, pcounts, pinds, ppos,
-                           opos, NULL, NULL)
+                           opos, None, None)
         self.process(offset, i, j, k, dim, opos, fields, index_fields)
 
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):
@@ -686,6 +686,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -744,6 +745,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -777,6 +779,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -812,6 +815,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -832,6 +836,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -804,6 +804,7 @@
     cdef np.float64_t left_edge[3]
     cdef np.float64_t right_edge[3]
     cdef np.float64_t right_edge_shift[3]
+    cdef bint loose_selection
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -818,6 +819,9 @@
         cdef np.ndarray[np.float64_t, ndim=1] DRE = _ensure_code(dobj.ds.domain_right_edge)
         cdef np.float64_t region_width[3]
         cdef bint p[3]
+        # This is for if we want to include zones that overlap and whose
+        # centers are not strictly included.
+        self.loose_selection = getattr(dobj, "loose_selection", False)
 
         for i in range(3):
             region_width[i] = RE[i] - LE[i]
@@ -877,6 +881,13 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        cdef np.float64_t left_edge[3], right_edge[3]
+        cdef int i
+        if self.loose_selection:
+            for i in range(3):
+                left_edge[i] = pos[i] - dds[i]*0.5
+                right_edge[i] = pos[i] + dds[i]*0.5
+            return self.select_bbox(left_edge, right_edge)
         return self.select_point(pos)
 
     @cython.boundscheck(False)

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -38,7 +38,7 @@
     update_hg, \
     enable_plugins
 from yt.extern.six import add_metaclass, string_types
-from yt.extern.six.moves import urllib
+from yt.extern.six.moves import urllib, input
 from yt.convenience import load
 from yt.visualization.plot_window import \
     SlicePlot, \
@@ -325,7 +325,7 @@
     if not os.path.exists(date_file):
         print("Could not determine when yt stack was last updated.")
         return
-    print("".join(file(date_file, 'r').readlines()))
+    print("".join(open(date_file, 'r').readlines()))
     print("To update all dependencies, run \"yt update --all\".")
 
 def _update_yt_stack(path):
@@ -345,7 +345,7 @@
     print()
     print("[hit enter to continue or Ctrl-C to stop]")
     try:
-        raw_input()
+        input()
     except:
         sys.exit(0)
     os.environ["REINST_YT"] = "1"
@@ -381,7 +381,7 @@
         data = urllib.parse.urlencode(data)
     req = urllib.request.Request(uri, data)
     if use_pass:
-        username = raw_input("Bitbucket Username? ")
+        username = input("Bitbucket Username? ")
         password = getpass.getpass()
         upw = '%s:%s' % (username, password)
         req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
@@ -419,7 +419,7 @@
         print("   http://yt-project.org/irc.html")
         print("   http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org")
         print()
-        summary = raw_input("Press <enter> if you remain firm in your conviction to continue.")
+        summary = input("Press <enter> if you remain firm in your conviction to continue.")
         print()
         print()
         print("Okay, sorry about that. How about a nice, pithy ( < 12 words )")
@@ -430,7 +430,7 @@
             current_version = get_yt_version()
         except:
             current_version = "Unavailable"
-        summary = raw_input("Summary? ")
+        summary = input("Summary? ")
         bugtype = "bug"
         data = dict(title = summary, type=bugtype)
         print()
@@ -442,7 +442,7 @@
         if "EDITOR" in os.environ:
             print()
             print("Press enter to spawn your editor, %s" % os.environ["EDITOR"])
-            raw_input()
+            input()
             tf = tempfile.NamedTemporaryFile(delete=False)
             fn = tf.name
             tf.close()
@@ -463,7 +463,7 @@
             print()
             lines = []
             while 1:
-                line = raw_input()
+                line = input()
                 if line.strip() == "---": break
                 lines.append(line)
             content = "\n".join(lines)
@@ -487,7 +487,7 @@
         print("'submit'.  Next we'll ask for your Bitbucket Username.")
         print("If you don't have one, run the 'yt bootstrap_dev' command.")
         print()
-        raw_input()
+        input()
         retval = bb_apicall(endpoint, data, use_pass=True)
         import json
         retval = json.loads(retval)
@@ -528,17 +528,17 @@
         print()
         print("What username would you like to go by?")
         print()
-        username = raw_input("Username? ")
+        username = input("Username? ")
         if len(username) == 0: sys.exit(1)
         print()
         print("To start out, what's your name?")
         print()
-        name = raw_input("Name? ")
+        name = input("Name? ")
         if len(name) == 0: sys.exit(1)
         print()
         print("And your email address?")
         print()
-        email = raw_input("Email? ")
+        email = input("Email? ")
         if len(email) == 0: sys.exit(1)
         print()
         print("Please choose a password:")
@@ -554,12 +554,12 @@
         print("Would you like a URL displayed for your user?")
         print("Leave blank if no.")
         print()
-        url = raw_input("URL? ")
+        url = input("URL? ")
         print()
         print("Okay, press enter to register.  You should receive a welcome")
         print("message at %s when this is complete." % email)
         print()
-        raw_input()
+        input()
         data = dict(name = name, email = email, username = username,
                     password = password1, password2 = password2,
                     url = url, zap = "rowsdower")

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -798,6 +798,8 @@
     cdef int offsets[3][3]
     cdef np.int64_t off
     for i in range(3):
+        # Offsets here is a way of accounting for periodicity.  It keeps track
+        # of how to offset our grid as we loop over the icoords.
         dim[i] = output_fields[0].shape[i]
         offsets[i][0] = offsets[i][2] = 0
         offsets[i][1] = 1
@@ -815,27 +817,36 @@
                 if offsets[0][wi] == 0: continue
                 off = (left_index[0] + level_dims[0]*(wi-1))
                 iind[0] = ipos[i, 0] * rf - off
+                # rf here is the "refinement factor", or, the number of zones
+                # that this zone could potentially contribute to our filled
+                # grid.
                 for oi in range(rf):
                     # Now we need to apply our offset
                     oind[0] = oi + iind[0]
-                    if oind[0] < 0 or oind[0] >= dim[0]:
+                    if oind[0] < 0:
                         continue
+                    elif oind[0] >= dim[0]:
+                        break
                     for wj in range(3):
                         if offsets[1][wj] == 0: continue
                         off = (left_index[1] + level_dims[1]*(wj-1))
                         iind[1] = ipos[i, 1] * rf - off
                         for oj in range(rf):
                             oind[1] = oj + iind[1]
-                            if oind[1] < 0 or oind[1] >= dim[1]:
+                            if oind[1] < 0:
                                 continue
+                            elif oind[1] >= dim[1]:
+                                break
                             for wk in range(3):
                                 if offsets[2][wk] == 0: continue
                                 off = (left_index[2] + level_dims[2]*(wk-1))
                                 iind[2] = ipos[i, 2] * rf - off
                                 for ok in range(rf):
                                     oind[2] = ok + iind[2]
-                                    if oind[2] < 0 or oind[2] >= dim[2]:
+                                    if oind[2] < 0:
                                         continue
+                                    elif oind[2] >= dim[2]:
+                                        break
                                     ofield[oind[0], oind[1], oind[2]] = \
                                         ifield[i]
                                     tot += 1

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/utilities/poster/encode.py
--- a/yt/utilities/poster/encode.py
+++ b/yt/utilities/poster/encode.py
@@ -114,11 +114,11 @@
                 except:
                     raise ValueError("Could not determine filesize")
 
-    def __cmp__(self, other):
+    def __lt__(self, other):
         attrs = ['name', 'value', 'filename', 'filetype', 'filesize', 'fileobj']
         myattrs = [getattr(self, a) for a in attrs]
         oattrs = [getattr(other, a) for a in attrs]
-        return cmp(myattrs, oattrs)
+        return myattrs < oattrs
 
     def reset(self):
         if self.fileobj is not None:

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/utilities/pyparselibconfig/libconfig.py
--- a/yt/utilities/pyparselibconfig/libconfig.py
+++ b/yt/utilities/pyparselibconfig/libconfig.py
@@ -74,7 +74,7 @@
         return eval(v)
 
     def write(self, filename):
-        f = file(filename, 'w')
+        f = open(filename, 'w')
 
         self.write_dict(f, self, 0)
 

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -292,7 +292,7 @@
             self.load_memmaps()
 
     def write(self, filename):
-        f = file(filename, 'w')
+        f = open(filename, 'w')
         f.write("# SDF 1.0\n")
         f.write("parameter byteorder = %s;\n" % (self.parameters['byteorder']))
         for c in self.comments:

diff -r 99984cd16b487eaa036cce930095bf4c42678fed -r f785bd83721bae6f4483f52189b4de65a0ad084f yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -520,7 +520,7 @@
         """
 
         dtheta = (1.0*theta)/n_steps
-        for i in xrange(n_steps):
+        for i in range(n_steps):
             self.rotate(dtheta, rot_vector=rot_vector, rot_center=rot_center)
             yield i
 
@@ -561,7 +561,7 @@
             dx = position_diff**(1.0/n_steps)
         else:
             dx = (final - self.position)*1.0/n_steps
-        for i in xrange(n_steps):
+        for i in range(n_steps):
             if exponential:
                 self.set_position(self.position * dx)
             else:
@@ -621,7 +621,7 @@
 
         """
         f = final**(1.0/n_steps)
-        for i in xrange(n_steps):
+        for i in range(n_steps):
             self.zoom(f)
             yield i
 


https://bitbucket.org/yt_analysis/yt/commits/983fe96985a6/
Changeset:   983fe96985a6
Branch:      yt
User:        jzuhone
Date:        2015-12-17 18:53:41+00:00
Summary:     Remove this duplicate import
Affected #:  1 file

diff -r f785bd83721bae6f4483f52189b4de65a0ad084f -r 983fe96985a68eddac8a8fa333c8678e872b272d yt/data_objects/region_expression.py
--- a/yt/data_objects/region_expression.py
+++ b/yt/data_objects/region_expression.py
@@ -11,7 +11,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.extern.six import string_types
 import weakref
 
 from yt.extern.six import string_types


https://bitbucket.org/yt_analysis/yt/commits/3430f9329ab4/
Changeset:   3430f9329ab4
Branch:      yt
User:        jzuhone
Date:        2015-12-18 01:44:49+00:00
Summary:     Bugfixes
Affected #:  1 file

diff -r 983fe96985a68eddac8a8fa333c8678e872b272d -r 3430f9329ab4fc15debc6237c233235f3d43aab1 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -112,7 +112,7 @@
 mks_unit_system["pressure"] = "Pa"
 mks_unit_system["force"] = "N"
 mks_unit_system["magnetic_field_mks"] = "T"
-cgs_unit_system["charge_mks"] = "C"
+mks_unit_system["charge_mks"] = "C"
 
 imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", temperature_unit="R")
 imperial_unit_system["force"] = "lbf"
@@ -128,5 +128,5 @@
 geometrized_unit_system = UnitSystem("geometrized", "l_geom", "m_geom", "t_geom")
 
 planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", temperature_unit="T_pl")
-cgs_unit_system["energy"] = "E_pl"
-cgs_unit_system["charge_cgs"] = "q_pl"
+planck_unit_system["energy"] = "E_pl"
+planck_unit_system["charge_cgs"] = "q_pl"


https://bitbucket.org/yt_analysis/yt/commits/9c6e0d5ed564/
Changeset:   9c6e0d5ed564
Branch:      yt
User:        jzuhone
Date:        2015-12-18 05:51:27+00:00
Summary:     This change allows us to get dimensions not just in the base units
Affected #:  1 file

diff -r 3430f9329ab4fc15debc6237c233235f3d43aab1 -r 9c6e0d5ed564ae6a26fadc4eddaa298e8be7cf47 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -85,7 +85,7 @@
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \
                 units not in skip_output_units:
                 u = Unit(units, registry = self.ds.unit_registry)
-                output_units = str(u.get_base_equivalent(self.ds.unit_system.name))
+                output_units = str(self.ds.unit_system[u.dimensions])
             else:
                 output_units = units
             if (ptype, f) not in self.field_list:
@@ -283,7 +283,7 @@
             # as well.
             u = Unit(self[original_name].units,
                       registry = self.ds.unit_registry)
-            units = str(u.get_base_equivalent(self.ds.unit_system.name))
+            units = str(self.ds.unit_system[u.dimensions])
         self.field_aliases[alias_name] = original_name
         self.add_field(alias_name,
             function = TranslationFunc(original_name),


https://bitbucket.org/yt_analysis/yt/commits/9e3ec0f7837c/
Changeset:   9e3ec0f7837c
Branch:      yt
User:        jzuhone
Date:        2015-12-18 05:53:21+00:00
Summary:     Allow the default unit system to be globally configurable
Affected #:  5 files

diff -r 9c6e0d5ed564ae6a26fadc4eddaa298e8be7cf47 -r 9e3ec0f7837c192ea30f20ed3748f6d0d9916b9a yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -61,6 +61,7 @@
     ignore_invalid_unit_operation_errors = 'False',
     chunk_size = '1000',
     xray_data_dir = '/does/not/exist',
+    default_unit_system = 'cgs',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 9c6e0d5ed564ae6a26fadc4eddaa298e8be7cf47 -r 9e3ec0f7837c192ea30f20ed3748f6d0d9916b9a yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -194,7 +194,7 @@
         return obj
 
     def __init__(self, filename, dataset_type=None, file_style=None, 
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system=None):
         """
         Base class for generating new output types.  Principally consists of
         a *filename* and a *dataset_type* which will be passed on to children.
@@ -238,7 +238,9 @@
         self._setup_coordinate_handler()
 
         create_code_unit_system(self)
-        if unit_system == "code":
+        if unit_system is None:
+            unit_system = ytcfg.get("yt","default_unit_system")
+        elif unit_system == "code":
             unit_system = str(self)
         self.unit_system = unit_system_registry[unit_system]
 

diff -r 9c6e0d5ed564ae6a26fadc4eddaa298e8be7cf47 -r 9e3ec0f7837c192ea30f20ed3748f6d0d9916b9a yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -32,6 +32,7 @@
     UnitRegistry, \
     UnitParseError
 from yt.utilities.exceptions import YTUnitsNotReducible
+from yt.config import ytcfg
 
 import copy
 import token
@@ -410,13 +411,15 @@
             units.append("(%s)%s" % (unit_string, power_string))
         return " * ".join(units)
 
-    def get_base_equivalent(self, unit_system="cgs"):
+    def get_base_equivalent(self, unit_system=None):
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
         yt_base_unit_string = self._get_system_unit_string(default_base_units)
         yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
                             dimensions=self.dimensions, registry=self.registry)
+        if unit_system is None:
+            unit_system = ytcfg.get("yt", "default_unit_system")
         if unit_system == "cgs":
             return yt_base_unit
         else:

diff -r 9c6e0d5ed564ae6a26fadc4eddaa298e8be7cf47 -r 9e3ec0f7837c192ea30f20ed3748f6d0d9916b9a yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -61,12 +61,13 @@
         if key not in self.units_map:
             dims = key.expand()
             units = Unit("", registry=self.registry)
-            for factor in dims.as_ordered_factors():
-                dim = list(factor.free_symbols)[0]
-                u = self.units_map[dim]
-                if factor.is_Pow:
-                    u = u ** factor.as_base_exp()[1]
-                units *= u
+            if dims != dimensions.dimensionless:
+                for factor in dims.as_ordered_factors():
+                    dim = list(factor.free_symbols)[0]
+                    u = self.units_map[dim]
+                    if factor.is_Pow:
+                        u = u ** factor.as_base_exp()[1]
+                    units *= u
             self.units_map[key] = units
         return self.units_map[key]
 

diff -r 9c6e0d5ed564ae6a26fadc4eddaa298e8be7cf47 -r 9e3ec0f7837c192ea30f20ed3748f6d0d9916b9a yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -451,7 +451,7 @@
 
         return self
 
-    def convert_to_base(self, unit_system="cgs"):
+    def convert_to_base(self, unit_system=None):
         """
         Convert the array and units to the equivalent base units in
         the specified unit system.
@@ -460,7 +460,8 @@
         ----------
         unit_system : string, optional
             The unit system to be used in the conversion. If not specified,
-            the default base units of cgs are used.
+            the default base units are used ("cgs" unless configured
+            differently).
 
         Examples
         --------
@@ -517,7 +518,7 @@
         """
         return self.in_units(units)
 
-    def in_base(self, unit_system="cgs"):
+    def in_base(self, unit_system=None):
         """
         Creates a copy of this array with the data in the specified unit system,
         and returns it in that system's base units.
@@ -526,7 +527,8 @@
         ----------
         unit_system : string, optional
             The unit system to be used in the conversion. If not specified,
-            the default base units of cgs are used.
+            the default base units are used ("cgs" unless configured 
+            differently).
 
         Examples
         --------


https://bitbucket.org/yt_analysis/yt/commits/dc0faa60f060/
Changeset:   dc0faa60f060
Branch:      yt
User:        jzuhone
Date:        2015-12-18 05:56:13+00:00
Summary:     Let cosmology use the default unit system by default
Affected #:  1 file

diff -r 9e3ec0f7837c192ea30f20ed3748f6d0d9916b9a -r dc0faa60f06079afd20d0e44630b232d71584da3 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -28,6 +28,8 @@
     gravitational_constant_cgs as G, \
     speed_of_light_cgs
 
+from yt.config import ytcfg
+
 class Cosmology(object):
     r"""
     Create a cosmology calculator to compute cosmological distances and times.
@@ -51,7 +53,7 @@
         Default: 0.0.
     unit_system : UnitSystem, optional 
         The units system to use when making calculations. If not specified,
-        cgs units are assumed.
+        the default unit system is assumed.
 
     Examples
     --------
@@ -66,7 +68,7 @@
                  omega_lambda = 0.73,
                  omega_curvature = 0.0,
                  unit_registry = None,
-                 unit_system = "cgs"):
+                 unit_system = None):
         self.omega_matter = omega_matter
         self.omega_lambda = omega_lambda
         self.omega_curvature = omega_curvature
@@ -80,6 +82,8 @@
                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
+        if unit_system is None:
+            unit_system = ytcfg.get("yt","default_unit_system")
         self.unit_system = unit_system
 
     def hubble_distance(self):


https://bitbucket.org/yt_analysis/yt/commits/40947b40301d/
Changeset:   40947b40301d
Branch:      yt
User:        jzuhone
Date:        2015-12-18 06:02:17+00:00
Summary:     Setting several of the unit_system kwargs to None in the frontends
Affected #:  9 files

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -162,7 +162,7 @@
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
                  file_particle_data=None, file_particle_stars=None,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system=None):
         self.fluid_types += ("art", )
         if fields is None:
             fields = fluid_fields
@@ -421,7 +421,7 @@
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
                  file_particle_stars=None, units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         self.over_refine_factor = 1
         self.n_ref = 64
         self.particle_types += ("all",)

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -318,7 +318,7 @@
 
     def __init__(self, filename, dataset_type='artio',
                  storage_filename=None, max_range = 1024,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system=None):
         from sys import version
         if self._handle is not None:
             return

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -444,7 +444,7 @@
 
     def __init__(self, filename, dataset_type='athena',
                  storage_filename=None, parameters=None,
-                 units_override=None, nprocs=1, unit_system="cgs"):
+                 units_override=None, nprocs=1, unit_system=None):
         self.fluid_types += ("athena",)
         self.nprocs = nprocs
         if parameters is None:

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -370,7 +370,7 @@
                  dataset_type='boxlib_native',
                  storage_filename=None,
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         """
         The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -729,7 +729,7 @@
                  dataset_type='orion_native',
                  storage_filename=None,
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
 
         BoxlibDataset.__init__(self, output_dir,
                                cparam_filename, fparam_filename,

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -246,7 +246,7 @@
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system=None):
         self.fluid_types += ("chombo",)
         self._handle = HDF5FileHandler(filename)
         self.dataset_type = dataset_type
@@ -447,7 +447,7 @@
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system=None):
 
         ChomboDataset.__init__(self, filename, dataset_type, 
                                storage_filename, ini_filename,

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -666,7 +666,7 @@
                  conversion_override = None,
                  storage_filename = None,
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         """
         This class is a stripped down class that simply reads and parses
         *filename* without looking at the index.  *dataset_type* gets passed

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -312,7 +312,7 @@
                  suppress_astropy_warnings=True,
                  parameters=None,
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
 
         if parameters is None:
             parameters = {}

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -183,7 +183,7 @@
                  storage_filename = None,
                  particle_filename = None, 
                  units_override = None,
-                 unit_system = "cgs"):
+                 unit_system = None):
 
         self.fluid_types += ("flash",)
         if self._handle is not None: return

diff -r dc0faa60f06079afd20d0e44630b232d71584da3 -r 40947b40301d276719141944ab1d1fe5c38f9bb5 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -79,7 +79,7 @@
                  field_spec = "default",
                  ptype_spec = "default",
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         if self._instantiated: return
         self._header_spec = self._setup_binary_spec(
             header_spec, gadget_header_specs)
@@ -344,7 +344,7 @@
                  over_refine_factor=1,
                  bounding_box = None,
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         self.storage_filename = None
         filename = os.path.abspath(filename)
         if units_override is not None:


https://bitbucket.org/yt_analysis/yt/commits/014fd9924170/
Changeset:   014fd9924170
Branch:      yt
User:        jzuhone
Date:        2015-12-18 06:08:53+00:00
Summary:     Setting the default unit_system to None in the rest of the frontends
Affected #:  12 files

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -109,7 +109,7 @@
 
     def __init__(self, filename, dataset_type="gadget_fof_hdf5",
                  n_ref=16, over_refine_factor=1,
-                 unit_base=None, units_override=None, unit_system="cgs"):
+                 unit_base=None, units_override=None, unit_system=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if unit_base is not None and "UnitLength_in_cm" in unit_base:

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -174,7 +174,7 @@
 
     def __init__(self, filename, dataset_type='grid_data_format',
                  storage_filename=None, geometry=None,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system=None):
         self.geometry = geometry
         self.fluid_types += ("gdf",)
         Dataset.__init__(self, filename, dataset_type,

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -45,7 +45,7 @@
 
     def __init__(self, filename, dataset_type="halocatalog_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(HaloCatalogDataset, self).__init__(filename, dataset_type,

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/http_stream/data_structures.py
--- a/yt/frontends/http_stream/data_structures.py
+++ b/yt/frontends/http_stream/data_structures.py
@@ -48,7 +48,7 @@
     def __init__(self, base_url,
                  dataset_type = "http_particle_stream",
                  n_ref = 64, over_refine_factor=1, 
-                 unit_system="cgs"):
+                 unit_system=None):
         if requests is None:
             raise RuntimeError
         self.base_url = base_url

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -66,7 +66,7 @@
 
     def __init__(self, filename, dataset_type='moab_hex8',
                  storage_filename = None, units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         self.fluid_types += ("moab",)
         Dataset.__init__(self, filename, dataset_type,
                          units_override=units_override,
@@ -148,7 +148,7 @@
 
     def __init__(self, pyne_mesh, dataset_type='moab_hex8_pyne',
                  storage_filename = None, units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         self.fluid_types += ("pyne",)
         filename = "pyne_mesh_" + str(id(pyne_mesh))
         self.pyne_mesh = pyne_mesh

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -106,7 +106,7 @@
 
     def __init__(self, filename, dataset_type="subfind_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(OWLSSubfindDataset, self).__init__(filename, dataset_type,

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -520,7 +520,7 @@
     
     def __init__(self, filename, dataset_type='ramses',
                  fields = None, storage_filename = None,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/rockstar/data_structures.py
--- a/yt/frontends/rockstar/data_structures.py
+++ b/yt/frontends/rockstar/data_structures.py
@@ -51,7 +51,7 @@
 
     def __init__(self, filename, dataset_type="rockstar_binary",
                  n_ref = 16, over_refine_factor = 1,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(RockstarDataset, self).__init__(filename, dataset_type,

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -76,7 +76,7 @@
                  midx_level = None,
                  field_map = None,
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -288,7 +288,7 @@
     _dataset_type = 'stream'
 
     def __init__(self, stream_handler, storage_filename=None,
-                 geometry="cartesian", unit_system="cgs"):
+                 geometry="cartesian", unit_system=None):
         #if parameter_override is None: parameter_override = {}
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}
@@ -522,7 +522,7 @@
                       nprocs=1, sim_time=0.0, mass_unit=None, time_unit=None,
                       velocity_unit=None, magnetic_unit=None,
                       periodicity=(True, True, True),
-                      geometry="cartesian", unit_system="cgs"):
+                      geometry="cartesian", unit_system=None):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -710,7 +710,7 @@
                    bbox=None, sim_time=0.0, length_unit=None,
                    mass_unit=None, time_unit=None, velocity_unit=None,
                    magnetic_unit=None, periodicity=(True, True, True),
-                   geometry="cartesian", refine_by=2, unit_system="cgs"):
+                   geometry="cartesian", refine_by=2, unit_system=None):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
     This should allow a sequence of grids of varying resolution of data to be
@@ -1005,7 +1005,7 @@
                    velocity_unit=None, magnetic_unit=None,
                    periodicity=(True, True, True),
                    n_ref = 64, over_refine_factor = 1, geometry = "cartesian",
-                   unit_system="cgs"):
+                   unit_system=None):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -1233,7 +1233,7 @@
                          mass_unit = None, time_unit = None,
                          velocity_unit = None, magnetic_unit = None,
                          periodicity=(True, True, True),
-                         geometry = "cartesian", unit_system="cgs"):
+                         geometry = "cartesian", unit_system=None):
     r"""Load a hexahedral mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -1463,7 +1463,7 @@
                 velocity_unit=None, magnetic_unit=None,
                 periodicity=(True, True, True),
                 over_refine_factor = 1, partial_coverage = 1,
-                unit_system="cgs"):
+                unit_system=None):
     r"""Load an octree mask into yt.
 
     Octrees can be saved out by calling save_octree on an OctreeContainer.
@@ -1611,7 +1611,7 @@
                          mass_unit = None, time_unit = None,
                          velocity_unit = None, magnetic_unit = None,
                          periodicity=(False, False, False),
-                         geometry = "cartesian", unit_system="cgs"):
+                         geometry = "cartesian", unit_system=None):
     r"""Load an unstructured mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -76,7 +76,7 @@
                  n_ref=64, over_refine_factor=1,
                  bounding_box=None,
                  units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         # Because Tipsy outputs don't have a fixed domain boundary, one can
         # specify a bounding box which effectively gives a domain_left_edge
         # and domain_right_edge

diff -r 40947b40301d276719141944ab1d1fe5c38f9bb5 -r 014fd99241703bd279a2c990840d2d9a6751ec6c yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -159,7 +159,7 @@
 
     def __init__(self, filename, dataset_type="ytdatacontainer_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None,
-                 unit_system="cgs"):
+                 unit_system=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(YTDataContainerDataset, self).__init__(filename, dataset_type,
@@ -344,7 +344,7 @@
     default_fluid_type = "grid"
     fluid_types = ("grid", "gas", "deposit", "index")
 
-    def __init__(self, filename, unit_system="cgs"):
+    def __init__(self, filename, unit_system=None):
         super(YTGridDataset, self).__init__(filename, self._dataset_type,
                                             unit_system=unit_system)
         self.data = self.index.grids[0]
@@ -587,7 +587,7 @@
 
 class YTProfileDataset(YTNonspatialDataset):
     """Dataset for saved profile objects."""
-    def __init__(self, filename, unit_system="cgs"):
+    def __init__(self, filename, unit_system=None):
         super(YTProfileDataset, self).__init__(filename,
                                                unit_system=unit_system)
 


https://bitbucket.org/yt_analysis/yt/commits/3276924eef9c/
Changeset:   3276924eef9c
Branch:      yt
User:        jzuhone
Date:        2015-12-18 15:25:28+00:00
Summary:     Backing out the global configuration changes--this opened a Pandora's box that I'm not ready to resolve
Affected #:  27 files

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -61,7 +61,6 @@
     ignore_invalid_unit_operation_errors = 'False',
     chunk_size = '1000',
     xray_data_dir = '/does/not/exist',
-    default_unit_system = 'cgs',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -194,7 +194,7 @@
         return obj
 
     def __init__(self, filename, dataset_type=None, file_style=None, 
-                 units_override=None, unit_system=None):
+                 units_override=None, unit_system="cgs"):
         """
         Base class for generating new output types.  Principally consists of
         a *filename* and a *dataset_type* which will be passed on to children.
@@ -238,9 +238,7 @@
         self._setup_coordinate_handler()
 
         create_code_unit_system(self)
-        if unit_system is None:
-            unit_system = ytcfg.get("yt","default_unit_system")
-        elif unit_system == "code":
+        if unit_system == "code":
             unit_system = str(self)
         self.unit_system = unit_system_registry[unit_system]
 

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -162,7 +162,7 @@
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
                  file_particle_data=None, file_particle_stars=None,
-                 units_override=None, unit_system=None):
+                 units_override=None, unit_system="cgs"):
         self.fluid_types += ("art", )
         if fields is None:
             fields = fluid_fields
@@ -421,7 +421,7 @@
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
                  file_particle_stars=None, units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         self.over_refine_factor = 1
         self.n_ref = 64
         self.particle_types += ("all",)

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -318,7 +318,7 @@
 
     def __init__(self, filename, dataset_type='artio',
                  storage_filename=None, max_range = 1024,
-                 units_override=None, unit_system=None):
+                 units_override=None, unit_system="cgs"):
         from sys import version
         if self._handle is not None:
             return

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -444,7 +444,7 @@
 
     def __init__(self, filename, dataset_type='athena',
                  storage_filename=None, parameters=None,
-                 units_override=None, nprocs=1, unit_system=None):
+                 units_override=None, nprocs=1, unit_system="cgs"):
         self.fluid_types += ("athena",)
         self.nprocs = nprocs
         if parameters is None:

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -370,7 +370,7 @@
                  dataset_type='boxlib_native',
                  storage_filename=None,
                  units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         """
         The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -729,7 +729,7 @@
                  dataset_type='orion_native',
                  storage_filename=None,
                  units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
 
         BoxlibDataset.__init__(self, output_dir,
                                cparam_filename, fparam_filename,

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -246,7 +246,7 @@
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None,
-                 units_override=None, unit_system=None):
+                 units_override=None, unit_system="cgs"):
         self.fluid_types += ("chombo",)
         self._handle = HDF5FileHandler(filename)
         self.dataset_type = dataset_type
@@ -447,7 +447,7 @@
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None,
-                 units_override=None, unit_system=None):
+                 units_override=None, unit_system="cgs"):
 
         ChomboDataset.__init__(self, filename, dataset_type, 
                                storage_filename, ini_filename,

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -666,7 +666,7 @@
                  conversion_override = None,
                  storage_filename = None,
                  units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         """
         This class is a stripped down class that simply reads and parses
         *filename* without looking at the index.  *dataset_type* gets passed

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -312,7 +312,7 @@
                  suppress_astropy_warnings=True,
                  parameters=None,
                  units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
 
         if parameters is None:
             parameters = {}

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -183,7 +183,7 @@
                  storage_filename = None,
                  particle_filename = None, 
                  units_override = None,
-                 unit_system = None):
+                 unit_system = "cgs"):
 
         self.fluid_types += ("flash",)
         if self._handle is not None: return

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -79,7 +79,7 @@
                  field_spec = "default",
                  ptype_spec = "default",
                  units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         if self._instantiated: return
         self._header_spec = self._setup_binary_spec(
             header_spec, gadget_header_specs)
@@ -344,7 +344,7 @@
                  over_refine_factor=1,
                  bounding_box = None,
                  units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         self.storage_filename = None
         filename = os.path.abspath(filename)
         if units_override is not None:

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -109,7 +109,7 @@
 
     def __init__(self, filename, dataset_type="gadget_fof_hdf5",
                  n_ref=16, over_refine_factor=1,
-                 unit_base=None, units_override=None, unit_system=None):
+                 unit_base=None, units_override=None, unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if unit_base is not None and "UnitLength_in_cm" in unit_base:

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -174,7 +174,7 @@
 
     def __init__(self, filename, dataset_type='grid_data_format',
                  storage_filename=None, geometry=None,
-                 units_override=None, unit_system=None):
+                 units_override=None, unit_system="cgs"):
         self.geometry = geometry
         self.fluid_types += ("gdf",)
         Dataset.__init__(self, filename, dataset_type,

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -45,7 +45,7 @@
 
     def __init__(self, filename, dataset_type="halocatalog_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(HaloCatalogDataset, self).__init__(filename, dataset_type,

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/http_stream/data_structures.py
--- a/yt/frontends/http_stream/data_structures.py
+++ b/yt/frontends/http_stream/data_structures.py
@@ -48,7 +48,7 @@
     def __init__(self, base_url,
                  dataset_type = "http_particle_stream",
                  n_ref = 64, over_refine_factor=1, 
-                 unit_system=None):
+                 unit_system="cgs"):
         if requests is None:
             raise RuntimeError
         self.base_url = base_url

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -66,7 +66,7 @@
 
     def __init__(self, filename, dataset_type='moab_hex8',
                  storage_filename = None, units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         self.fluid_types += ("moab",)
         Dataset.__init__(self, filename, dataset_type,
                          units_override=units_override,
@@ -148,7 +148,7 @@
 
     def __init__(self, pyne_mesh, dataset_type='moab_hex8_pyne',
                  storage_filename = None, units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         self.fluid_types += ("pyne",)
         filename = "pyne_mesh_" + str(id(pyne_mesh))
         self.pyne_mesh = pyne_mesh

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -106,7 +106,7 @@
 
     def __init__(self, filename, dataset_type="subfind_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(OWLSSubfindDataset, self).__init__(filename, dataset_type,

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -520,7 +520,7 @@
     
     def __init__(self, filename, dataset_type='ramses',
                  fields = None, storage_filename = None,
-                 units_override=None, unit_system=None):
+                 units_override=None, unit_system="cgs"):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/rockstar/data_structures.py
--- a/yt/frontends/rockstar/data_structures.py
+++ b/yt/frontends/rockstar/data_structures.py
@@ -51,7 +51,7 @@
 
     def __init__(self, filename, dataset_type="rockstar_binary",
                  n_ref = 16, over_refine_factor = 1,
-                 units_override=None, unit_system=None):
+                 units_override=None, unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(RockstarDataset, self).__init__(filename, dataset_type,

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -76,7 +76,7 @@
                  midx_level = None,
                  field_map = None,
                  units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -288,7 +288,7 @@
     _dataset_type = 'stream'
 
     def __init__(self, stream_handler, storage_filename=None,
-                 geometry="cartesian", unit_system=None):
+                 geometry="cartesian", unit_system="cgs"):
         #if parameter_override is None: parameter_override = {}
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}
@@ -522,7 +522,7 @@
                       nprocs=1, sim_time=0.0, mass_unit=None, time_unit=None,
                       velocity_unit=None, magnetic_unit=None,
                       periodicity=(True, True, True),
-                      geometry="cartesian", unit_system=None):
+                      geometry="cartesian", unit_system="cgs"):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -710,7 +710,7 @@
                    bbox=None, sim_time=0.0, length_unit=None,
                    mass_unit=None, time_unit=None, velocity_unit=None,
                    magnetic_unit=None, periodicity=(True, True, True),
-                   geometry="cartesian", refine_by=2, unit_system=None):
+                   geometry="cartesian", refine_by=2, unit_system="cgs"):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
     This should allow a sequence of grids of varying resolution of data to be
@@ -1005,7 +1005,7 @@
                    velocity_unit=None, magnetic_unit=None,
                    periodicity=(True, True, True),
                    n_ref = 64, over_refine_factor = 1, geometry = "cartesian",
-                   unit_system=None):
+                   unit_system="cgs"):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -1233,7 +1233,7 @@
                          mass_unit = None, time_unit = None,
                          velocity_unit = None, magnetic_unit = None,
                          periodicity=(True, True, True),
-                         geometry = "cartesian", unit_system=None):
+                         geometry = "cartesian", unit_system="cgs"):
     r"""Load a hexahedral mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -1463,7 +1463,7 @@
                 velocity_unit=None, magnetic_unit=None,
                 periodicity=(True, True, True),
                 over_refine_factor = 1, partial_coverage = 1,
-                unit_system=None):
+                unit_system="cgs"):
     r"""Load an octree mask into yt.
 
     Octrees can be saved out by calling save_octree on an OctreeContainer.
@@ -1611,7 +1611,7 @@
                          mass_unit = None, time_unit = None,
                          velocity_unit = None, magnetic_unit = None,
                          periodicity=(False, False, False),
-                         geometry = "cartesian", unit_system=None):
+                         geometry = "cartesian", unit_system="cgs"):
     r"""Load an unstructured mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -76,7 +76,7 @@
                  n_ref=64, over_refine_factor=1,
                  bounding_box=None,
                  units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         # Because Tipsy outputs don't have a fixed domain boundary, one can
         # specify a bounding box which effectively gives a domain_left_edge
         # and domain_right_edge

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -159,7 +159,7 @@
 
     def __init__(self, filename, dataset_type="ytdatacontainer_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None,
-                 unit_system=None):
+                 unit_system="cgs"):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(YTDataContainerDataset, self).__init__(filename, dataset_type,
@@ -344,7 +344,7 @@
     default_fluid_type = "grid"
     fluid_types = ("grid", "gas", "deposit", "index")
 
-    def __init__(self, filename, unit_system=None):
+    def __init__(self, filename, unit_system="cgs"):
         super(YTGridDataset, self).__init__(filename, self._dataset_type,
                                             unit_system=unit_system)
         self.data = self.index.grids[0]
@@ -587,7 +587,7 @@
 
 class YTProfileDataset(YTNonspatialDataset):
     """Dataset for saved profile objects."""
-    def __init__(self, filename, unit_system=None):
+    def __init__(self, filename, unit_system="cgs"):
         super(YTProfileDataset, self).__init__(filename,
                                                unit_system=unit_system)
 

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -32,7 +32,6 @@
     UnitRegistry, \
     UnitParseError
 from yt.utilities.exceptions import YTUnitsNotReducible
-from yt.config import ytcfg
 
 import copy
 import token
@@ -411,15 +410,13 @@
             units.append("(%s)%s" % (unit_string, power_string))
         return " * ".join(units)
 
-    def get_base_equivalent(self, unit_system=None):
+    def get_base_equivalent(self, unit_system="cgs"):
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
         yt_base_unit_string = self._get_system_unit_string(default_base_units)
         yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
                             dimensions=self.dimensions, registry=self.registry)
-        if unit_system is None:
-            unit_system = ytcfg.get("yt", "default_unit_system")
         if unit_system == "cgs":
             return yt_base_unit
         else:

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -61,13 +61,12 @@
         if key not in self.units_map:
             dims = key.expand()
             units = Unit("", registry=self.registry)
-            if dims != dimensions.dimensionless:
-                for factor in dims.as_ordered_factors():
-                    dim = list(factor.free_symbols)[0]
-                    u = self.units_map[dim]
-                    if factor.is_Pow:
-                        u = u ** factor.as_base_exp()[1]
-                    units *= u
+            for factor in dims.as_ordered_factors():
+                dim = list(factor.free_symbols)[0]
+                u = self.units_map[dim]
+                if factor.is_Pow:
+                    u = u ** factor.as_base_exp()[1]
+                units *= u
             self.units_map[key] = units
         return self.units_map[key]
 

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -451,7 +451,7 @@
 
         return self
 
-    def convert_to_base(self, unit_system=None):
+    def convert_to_base(self, unit_system="cgs"):
         """
         Convert the array and units to the equivalent base units in
         the specified unit system.
@@ -460,8 +460,7 @@
         ----------
         unit_system : string, optional
             The unit system to be used in the conversion. If not specified,
-            the default base units are used ("cgs" unless configured
-            differently).
+            the default base units of cgs are used.
 
         Examples
         --------
@@ -518,7 +517,7 @@
         """
         return self.in_units(units)
 
-    def in_base(self, unit_system=None):
+    def in_base(self, unit_system="cgs"):
         """
         Creates a copy of this array with the data in the specified unit system,
         and returns it in that system's base units.
@@ -527,8 +526,7 @@
         ----------
         unit_system : string, optional
             The unit system to be used in the conversion. If not specified,
-            the default base units are used ("cgs" unless configured 
-            differently).
+            the default base units of cgs are used.
 
         Examples
         --------

diff -r 014fd99241703bd279a2c990840d2d9a6751ec6c -r 3276924eef9c8af8b1e342ace13c76073d750e6a yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -28,8 +28,6 @@
     gravitational_constant_cgs as G, \
     speed_of_light_cgs
 
-from yt.config import ytcfg
-
 class Cosmology(object):
     r"""
     Create a cosmology calculator to compute cosmological distances and times.
@@ -53,7 +51,7 @@
         Default: 0.0.
     unit_system : UnitSystem, optional 
         The units system to use when making calculations. If not specified,
-        the default unit system is assumed.
+        cgs units are assumed.
 
     Examples
     --------
@@ -68,7 +66,7 @@
                  omega_lambda = 0.73,
                  omega_curvature = 0.0,
                  unit_registry = None,
-                 unit_system = None):
+                 unit_system = "cgs"):
         self.omega_matter = omega_matter
         self.omega_lambda = omega_lambda
         self.omega_curvature = omega_curvature
@@ -82,8 +80,6 @@
                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.unit_registry = unit_registry
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
-        if unit_system is None:
-            unit_system = ytcfg.get("yt","default_unit_system")
         self.unit_system = unit_system
 
     def hubble_distance(self):


https://bitbucket.org/yt_analysis/yt/commits/90857ce3ab17/
Changeset:   90857ce3ab17
Branch:      yt
User:        jzuhone
Date:        2015-12-18 15:29:54+00:00
Summary:     Preventing code duplication
Affected #:  2 files

diff -r 3276924eef9c8af8b1e342ace13c76073d750e6a -r 90857ce3ab1733adf476d02221aa9226e02c5e3b yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -393,28 +393,11 @@
                 return False
         return True
 
-    def _get_system_unit_string(self, base_units):
-        # The dimensions of a unit object is the product of the base dimensions.
-        # Use sympy to factor the dimensions into base CGS unit symbols.
-        units = []
-        my_dims = self.dimensions.expand()
-        if my_dims is dimensionless:
-            return ""
-        for factor in my_dims.as_ordered_factors():
-            dim = list(factor.free_symbols)[0]
-            unit_string = str(base_units[dim])
-            if factor.is_Pow:
-                power_string = "**(%s)" % factor.as_base_exp()[1]
-            else:
-                power_string = ""
-            units.append("(%s)%s" % (unit_string, power_string))
-        return " * ".join(units)
-
     def get_base_equivalent(self, unit_system="cgs"):
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
-        yt_base_unit_string = self._get_system_unit_string(default_base_units)
+        yt_base_unit_string = get_system_unit_string(self.dimensions, default_base_units)
         yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
                             dimensions=self.dimensions, registry=self.registry)
         if unit_system == "cgs":
@@ -424,7 +407,7 @@
                 raise RuntimeError("To convert to a \"code\" unit system, "
                                    "specify a dataset with \"unit_system\" == ds!")
             unit_system = unit_system_registry[str(unit_system)]
-            units_string = self._get_system_unit_string(unit_system.base_units)
+            units_string = get_system_unit_string(self.dimensions, unit_system.base_units)
             u = Unit(units_string, registry=self.registry)
             base_value = get_conversion_factor(self, yt_base_unit)[0]
             base_value /= get_conversion_factor(self, u)[0]
@@ -621,3 +604,20 @@
                                  "allowed.  Got dimensions '%s'" % dimensions)
     elif not isinstance(dimensions, Basic):
         raise UnitParseError("Bad dimensionality expression '%s'." % dimensions)
+
+def get_system_unit_string(dimensions, base_units):
+    # The dimensions of a unit object is the product of the base dimensions.
+    # Use sympy to factor the dimensions into base CGS unit symbols.
+    units = []
+    my_dims = dimensions.expand()
+    if my_dims is dimensionless:
+        return ""
+    for factor in my_dims.as_ordered_factors():
+        dim = list(factor.free_symbols)[0]
+        unit_string = str(base_units[dim])
+        if factor.is_Pow:
+            power_string = "**(%s)" % factor.as_base_exp()[1]
+        else:
+            power_string = ""
+        units.append("(%s)%s" % (unit_string, power_string))
+    return " * ".join(units)

diff -r 3276924eef9c8af8b1e342ace13c76073d750e6a -r 90857ce3ab1733adf476d02221aa9226e02c5e3b yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -13,7 +13,7 @@
 
 from yt.extern.six import string_types
 from yt.units import dimensions
-from yt.units.unit_object import Unit, unit_system_registry
+from yt.units.unit_object import Unit, unit_system_registry, get_system_unit_string
 from yt.utilities import physical_constants as pc
 
 class UnitSystemConstants(object):
@@ -59,15 +59,8 @@
                 self._dims.append(key)
             key = getattr(dimensions, key)
         if key not in self.units_map:
-            dims = key.expand()
-            units = Unit("", registry=self.registry)
-            for factor in dims.as_ordered_factors():
-                dim = list(factor.free_symbols)[0]
-                u = self.units_map[dim]
-                if factor.is_Pow:
-                    u = u ** factor.as_base_exp()[1]
-                units *= u
-            self.units_map[key] = units
+            units = get_system_unit_string(key, self.units_map)
+            self.units_map[key] = Unit(units, registry=self.registry)
         return self.units_map[key]
 
     def __setitem__(self, key, value):


https://bitbucket.org/yt_analysis/yt/commits/e7a0f1d53a43/
Changeset:   e7a0f1d53a43
Branch:      yt
User:        jzuhone
Date:        2015-12-18 16:44:32+00:00
Summary:     Catch this here instead
Affected #:  1 file

diff -r 90857ce3ab1733adf476d02221aa9226e02c5e3b -r e7a0f1d53a439173b72333ae35092638a0e12558 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -401,6 +401,8 @@
         yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
                             dimensions=self.dimensions, registry=self.registry)
         if unit_system == "cgs":
+            if current_mks in self.dimensions.free_symbols:
+                raise YTUnitsNotReducible(self, "cgs")
             return yt_base_unit
         else:
             if unit_system == "code":
@@ -418,8 +420,6 @@
         """
         Create and return dimensionally-equivalent cgs units.
         """
-        if current_mks in self.dimensions.free_symbols:
-            raise YTUnitsNotReducible(self, "cgs")
         return self.get_base_equivalent(unit_system="cgs")
 
     def get_mks_equivalent(self):


https://bitbucket.org/yt_analysis/yt/commits/fdf87b9e254b/
Changeset:   fdf87b9e254b
Branch:      yt
User:        jzuhone
Date:        2015-12-18 17:03:44+00:00
Summary:     Better error message
Affected #:  1 file

diff -r e7a0f1d53a439173b72333ae35092638a0e12558 -r fdf87b9e254b64e4020a24f3273a8382c3558f57 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -406,8 +406,9 @@
             return yt_base_unit
         else:
             if unit_system == "code":
-                raise RuntimeError("To convert to a \"code\" unit system, "
-                                   "specify a dataset with \"unit_system\" == ds!")
+                raise RuntimeError(r'You must refer to a dataset instance to convert to a '
+                                   r'code unit system. Try again with unit_system=ds instead, '
+                                   r'where \'ds\' is your dataset.')
             unit_system = unit_system_registry[str(unit_system)]
             units_string = get_system_unit_string(self.dimensions, unit_system.base_units)
             u = Unit(units_string, registry=self.registry)


https://bitbucket.org/yt_analysis/yt/commits/eef7a06b34be/
Changeset:   eef7a06b34be
Branch:      yt
User:        jzuhone
Date:        2015-12-18 17:06:44+00:00
Summary:     Merging
Affected #:  89 files

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -57,6 +57,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/element_mappings.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
+yt/utilities/lib/mesh_intersection.cpp
 syntax: glob
 *.pyc
 .*.swp

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -327,6 +327,100 @@
 
 .. _loading-fits-data:
 
+Exodus II Data
+--------------
+
+Exodus II is a file format for Finite Element datasets that is used by the MOOSE
+framework for file IO. Support for this format (and for unstructured mesh data in 
+general) is a new feature as of yt 3.3, so while we aim to fully support it, we also expect 
+there to be some buggy features at present. Currently, yt can visualize first-order
+mesh types only (4-node quads, 8-node hexes, 3-node triangles, and 4-node tetrahedra).
+Development of higher-order visualization capability is a work in progress.
+
+To load an Exodus II dataset, you can use the ``yt.load`` command on the Exodus II
+file:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+
+Because Exodus II datasets can have multiple steps (which can correspond to time steps, 
+picard iterations, non-linear solve iterations, etc...), you can also specify a step
+argument when you load an Exodus II data that defines the index at which to look when
+you read data from the file.
+
+You can access the connectivity information directly by doing:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   print(ds.index.meshes[0].connectivity_coords)
+   print(ds.index.meshes[0].connectivity_indices)
+   print(ds.index.meshes[1].connectivity_coords)
+   print(ds.index.meshes[1].connectivity_indices)
+
+This particular dataset has two meshes in it, both of which are made of 8-node hexes.
+yt uses a field name convention to access these different meshes in plots and data
+objects. To see all the fields found in a particlular dataset, you can do:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   print(ds.field_list)
+
+This will give you a list of field names like ``('connect1', 'diffused')`` and 
+``('connect2', 'convected')``. Here, fields labelled with ``'connect1'`` correspond to the
+first mesh, and those with ``'connect2'`` to the second, and so on. To grab the value
+of the ``'convected'`` variable at all the nodes in the first mesh, for example, you
+would do:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()  # geometric selection, this just grabs everything
+   print(ad['connect1', 'convected'])
+
+In this dataset, ``('connect1', 'convected')`` is nodal field, meaning that the field values
+are defined at the vertices of the elements. If we examine the shape of the returned array:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'convected'].shape)
+
+we see that this mesh has 12480 8-node hexahedral elements, and that we get 8 field values
+for each element. To get the vertex positions at which these field values are defined, we
+can do, for instance:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'vertex_x'])
+
+If we instead look at an element-centered field, like ``('connect1', 'conv_indicator')``,
+we get:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'conv_indicator'].shape)
+
+we instead get only one field value per element.
+
+For information about visualizing unstructured mesh data, including Exodus II datasets, 
+please see :ref:`unstructured-mesh-slices` and :ref:`unstructured_mesh_rendering`. 
+
+
 FITS Data
 ---------
 
@@ -1035,8 +1129,8 @@
 
 In addition to the above grid types, you can also load data stored on
 unstructured meshes. This type of mesh is used, for example, in many
-finite element calculations. Currently, hexahedral, tetrahedral, and
-wedge-shaped mesh element are supported.
+finite element calculations. Currently, hexahedral and tetrahedral
+mesh elements are supported.
 
 To load an unstructured mesh, you need to specify the following. First,
 you need to have a coordinates array, which should be an (L, 3) array

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -22,7 +22,7 @@
 * Late-stage beta support for Python 3 - unit tests and answer tests pass for 
   all the major frontends under python 3.4, and yt should now be mostly if not 
   fully usable.  Because many of the yt developers are still on Python 2 at 
-  this point, this should be considered a “late stage beta” as there may be 
+  this point, this should be considered a "late stage beta" as there may be 
   remaining issues yet to be identified or worked out.
 * Now supporting Gadget Friend-of-Friends/Subfind catalogs - see here to learn 
   how to load halo catalogs as regular yt datasets.

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -355,6 +355,78 @@
 keyword arguments, as described in
 :class:`~yt.visualization.plot_window.OffAxisProjectionPlot`
 
+.. _unstructured-mesh-slices:
+
+Unstructured Mesh Slices
+------------------------
+
+Unstructured Mesh datasets can be sliced using the same syntax as above.
+Here is an example script using a publically available MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+   sl = yt.SlicePlot(ds, 'x', ('connect1', 'diffused'))
+   sl.zoom(0.75)
+   sl.save()
+
+Here, we plot the ``'diffused'`` variable, using a slice normal to the ``'x'`` direction, 
+through the meshed labelled by ``'connect1'``. By default, the slice goes through the
+center of the domain. We have also zoomed out a bit to get a better view of the 
+resulting structure. To instead plot the ``'convected'`` variable, using a slice normal
+to the ``'z'`` direction through the mesh labelled by ``'connect2'``, we do:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+   sl = yt.SlicePlot(ds, 'z', ('connect2', 'convected'))
+   sl.zoom(0.75)
+   sl.save()
+
+These slices are made by sampling the finite element solution at the points corresponding 
+to each pixel of the image. The ``'convected'`` and ``'diffused'`` variables are node-centered,
+so this interpolation is performed by converting the sample point the reference coordinate
+system of the element and evaluating the appropriate shape functions. You can also
+plot element-centered fields:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e-s010')
+   sl = yt.SlicePlot(ds, 'y', ('connect1', 'conv_indicator'))
+   sl.zoom(0.75)
+   sl.save()
+
+We can also annotate the mesh lines, as follows:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e-s010')
+   sl = yt.SlicePlot(ds, 'z', ('connect1', 'diffused'))
+   sl.annotate_mesh_lines(thresh=0.1)
+   sl.zoom(0.75)
+   sl.save()
+
+This annotation is performed by marking the pixels where the mapped coordinate is close
+to the element boundary. What counts as 'close' (in the mapped coordinate system) is 
+determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
+thinner.
+
+Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
+slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
+an example using another MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e')
+   sl = yt.SlicePlot(ds, 2, ('connect1', 'nodal_aux'))
+   sl.save()
+
+
 Plot Customization: Recentering, Resizing, Colormaps, and More
 --------------------------------------------------------------
 

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -3,19 +3,16 @@
 Unstructured Mesh Rendering
 ===========================
 
+Installation
+^^^^^^^^^^^^
+
 Beginning with version 3.3, yt has the ability to volume render unstructured
-meshes from, for example, finite element calculations. In order to use this
-capability, a few additional dependencies are required beyond those you get
-when you run the install script. First, `embree <https://embree.github.io>`_
+mesh data - like that created by finite element calculations, for example. 
+In order to use this capability, a few additional dependencies are required 
+beyond those you get when you run the install script. First, `embree <https://embree.github.io>`_
 (a fast software ray-tracing library from Intel) must be installed, either
 by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. Once
-Embree is installed, you must also create a symlink next to the library. For
-example, if the libraries were installed at /usr/local/lib/, you must do
-
-.. code-block:: bash
-
-    sudo ln -s /usr/local/lib/libembree.2.6.1.dylib /usr/local/lib/libembree.so
+at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. 
 
 Second, the python bindings for embree (called 
 `pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
@@ -25,23 +22,39 @@
 
     git clone https://github.com/scopatz/pyembree
 
-To install, navigate to the root directory and run the setup script:
+To install, navigate to the root directory and run the setup script.
+If Embree was installed to some location that is not in your path by default,
+you will need to pass in CFLAGS and LDFLAGS to the setup.py script. For example,
+the Mac OS X package installer puts the installation at /opt/local/ instead of 
+usr/local. To account for this, you would do:
 
 .. code-block:: bash
 
-    python setup.py develop
+    CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install
 
-If Embree was installed to some location that is not in your path by default,
-you will need to pass in CFLAGS and LDFLAGS to the setup.py script. For example,
-the Mac OS package installer puts the installation at /opt/local/ instead of 
-usr/local. To account for this, you would do:
+Once embree and pyembree are installed, you must rebuild yt from source in order to use
+the unstructured mesh rendering capability. Once again, if embree is installed in a 
+location that is not part of your default search path, you must tell yt where to find it.
+There are a number of ways to do this. One way is to again manually pass in the flags
+when running the setup script in the yt-hg directory:
 
 .. code-block:: bash
 
     CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py develop
 
-You must also use these flags when building any part of yt that links against
-pyembree.
+You can also set EMBREE_DIR environment variable to '/opt/local', in which case
+you could just run 
+
+.. code-block:: bash
+   
+   python setup.py develop
+
+as usual. Finally, if you create a file called embree.cfg in the yt-hg directory with
+the location of the embree installation, the setup script will find this and use it, 
+provided EMBREE_DIR is not set. We recommend one of the later two methods, especially
+if you plan on re-compiling the cython extensions regularly. Note that none of this is
+neccessary if you installed embree into a location that is in your default path, such
+as /usr/local.
 
 Once the pre-requisites are installed, unstructured mesh data can be rendered
 much like any other dataset. In particular, a new type of 
@@ -55,120 +68,293 @@
 :class:`~yt.visualization.volume_rendering.render_source.RenderSource` is called,
 a set of rays are cast at the source. Each time a ray strikes the source mesh,
 the data is sampled at the intersection point at the resulting value gets 
-saved into an image.
+saved into an image. See below for examples.
 
-See below for examples. First, here is an example of rendering a hexahedral mesh.
+Examples
+^^^^^^^^
+
+First, here is an example of rendering an 8-node, hexahedral MOOSE dataset.
 
 .. python-script::
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load the data
-   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
-   mesh_id = 0
-   field_name = ('gas', 'diffused')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'diffused'))
 
-   # set up camera
+   # setup the camera
    cam = Camera(ds)
-   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
-   cam.resolution = (800, 800)
-   cam.set_position(camera_position, north_vector)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
 
-   # make the image
-   im = ms.render(cam)
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
 
-   # plot and save
-   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0, vmax=2.0)
-   plt.gca().axes.get_xaxis().set_visible(False)
-   plt.gca().axes.get_yaxis().set_visible(False)
-   cb = plt.colorbar()
-   cb.set_label(field_name[1])
-   plt.savefig('hex_mesh_render.png')
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   pw.write_png(im, 'hex_mesh_render.png')
 
-Next, here is an example of rendering a dataset with tetrahedral mesh elements.
+You can also overplot the mesh boundaries:
 
 .. python-script::
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load the data
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect1', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+   cam.resolution = (800, 800)
+
+   ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex_render_with_mesh.png')
+
+As with slices, you can visualize different meshes and different fields. For example,
+Here is a script similar to the above that plots the "diffused" variable 
+using the mesh labelled by "connect2":
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect2', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   pw.write_png(im, 'hex_mesh_render.png')
+
+Next, here is an example of rendering a dataset with tetrahedral mesh elements.
+Note that in this dataset, there are multiple "steps" per file, so we specify
+that we want to look at the last one.
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
    filename = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
-   coords, connectivity, data = get_data(filename)
-   mesh_id = 0
-   field_name = ('gas', 'u')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load(filename, step=-1)  # we look at the last time frame
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'u'))
 
-   # set up camera
+   # setup the camera 
    cam = Camera(ds)
    camera_position = ds.arr([3.0, 3.0, 3.0], 'code_length')
    cam.set_width(ds.arr([2.0, 2.0, 2.0], 'code_length'))
    north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.set_position(camera_position, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 1.0))
+   pw.write_png(im, 'tetra_render.png')
+
+Another example, this time plotting the temperature field from a 20-node hex 
+MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)  # we load the last time frame
+
+   ms = MeshSource(ds, ('connect2', 'temp'))
+
+   # set up the camera
+   cam = Camera(ds)
+   camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
+   north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+   cam.width = ds.arr([0.04, 0.04, 0.04], 'code_length')
    cam.resolution = (800, 800)
    cam.set_position(camera_position, north_vector)
 
-   # make the image
-   im = ms.render(cam)
+   im = ms.render(cam, cmap='hot', color_bounds=(500.0, 1700.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex20_render.png')
 
-   # plot and save
-   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0.0, vmax=1.0)
-   plt.gca().axes.get_xaxis().set_visible(False)
-   plt.gca().axes.get_yaxis().set_visible(False)
-   cb = plt.colorbar()
-   cb.set_label(field_name[1])
-   plt.savefig('tet_mesh_render.png')
+As with other volume renderings in yt, you can swap out different lenses. Here is 
+an example that uses a "perspective" lens, for which the rays diverge from the 
+camera position according to some opening angle:
 
-Finally, here is a script that creates frames of a movie. It calls the rotate()
-method 300 times, saving a new image to the disk each time.
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect2', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds, lens_type='perspective')
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex_mesh_render_perspective.png')
+
+You can also create scenes that have multiple meshes. The ray-tracing infrastructure
+will keep track of the depth information for each source separately, and composite
+the final image accordingly. In the next example, we show how to render a scene 
+with two meshes on it:
+
+.. python-script::
+
+    import yt
+    from yt.visualization.volume_rendering.api import MeshSource, Camera, Scene
+    import yt.utilities.png_writer as pw
+
+    ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+    # this time we create an empty scene and add sources to it one-by-one
+    sc = Scene()
+
+    cam = Camera(ds)
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
+                     ds.arr([0.0, 1.0, 0.0], 'dimensionless'))
+    cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
+    cam.resolution = (800, 800)
+
+    sc.camera = cam
+
+    # create two distinct MeshSources from 'connect1' and 'connect2'
+    ms1 = MeshSource(ds, ('connect1', 'diffused'))
+    ms2 = MeshSource(ds, ('connect2', 'diffused'))
+
+    sc.add_source(ms1)
+    sc.add_source(ms2)
+
+    im = sc.render()
+
+    pw.write_png(im, 'composite_render.png')
+
+
+Making Movies
+^^^^^^^^^^^^^
+
+Here are a couple of example scripts that show how to create image frames that 
+can later be stiched together into a movie. In the first example, we look at a 
+single dataset at a fixed time, but we move the camera around to get a different
+vantage point. We call the rotate() method 300 times, saving a new image to the 
+disk each time.
 
 .. code-block:: python
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load dataset
-   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
-   mesh_id = 0
-   field_name = ('gas', 'diffused')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'diffused'))
 
-   # set up camera
+   # setup the camera
    cam = Camera(ds)
-   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
-   cam.set_position(camera_position, north_vector)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+   cam.resolution = (800, 800)
    cam.steady_north = True
 
    # make movie frames
    num_frames = 301
    for i in range(num_frames):
        cam.rotate(2.0*np.pi/num_frames)
-       im = ms.render(cam)
-       plt.imshow(im, cmap='Eos A', origin='lower',vmin=0.0, vmax=2.0)
-       plt.gca().axes.get_xaxis().set_visible(False)
-       plt.gca().axes.get_yaxis().set_visible(False)
-       cb = plt.colorbar()
-       cb.set_label('diffused')
-       plt.savefig('movie_frames/surface_render_%.4d.png' % i)
-       plt.clf()
+       im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+       pw.write_png(im, 'movie_frames/surface_render_%.4d.png' % i)
+
+Finally, this example demonstrates how to loop over the time steps in a single
+file with a fixed camera position:
+
+.. code-block:: python
+
+    import yt
+    from yt.visualization.volume_rendering.api import MeshSource, Camera
+    import pylab as plt
+
+    NUM_STEPS = 127
+    CMAP = 'hot'
+    VMIN = 300.0
+    VMAX = 2000.0
+
+    for step in range(NUM_STEPS):
+
+        ds = yt.load("MOOSE_sample_data/mps_out.e", step=step)
+
+	time = ds._get_current_time()
+
+	# the field name is a tuple of strings. The first string
+	# specifies which mesh will be plotted, the second string
+	# specifies the name of the field.
+	field_name = ('connect2', 'temp')
+
+	# this initializes the render source
+	ms = MeshSource(ds, field_name)
+
+	# set up the camera here. these values were arrived by
+	# calling pitch, yaw, and roll in the notebook until I
+	# got the angle I wanted.
+	cam = Camera(ds)
+	camera_position = ds.arr([0.1, 0.0, 0.1], 'code_length')
+	cam.focus = ds.domain_center
+	north_vector = ds.arr([0.3032476, 0.71782557, -0.62671153], 'dimensionless')
+	cam.width = ds.arr([ 0.04,  0.04,  0.04], 'code_length')
+	cam.resolution = (800, 800)
+	cam.set_position(camera_position, north_vector)
+
+	# actually make the image here
+	im = ms.render(cam, cmap=CMAP, color_bounds=(VMIN, VMAX))
+
+	# Plot the result using matplotlib and save.
+	# Note that we are setting the upper and lower
+	# bounds of the colorbar to be the same for all
+	# frames of the image.
+
+	# must clear the image between frames
+	plt.clf()
+	fig = plt.gcf()
+	ax = plt.gca()
+	ax.imshow(im, interpolation='nearest', origin='lower')
+
+	# Add the colorbar using a fake (not shown) image.
+	p = ax.imshow(ms.data, visible=False, cmap=CMAP, vmin=VMIN, vmax=VMAX)
+	cb = fig.colorbar(p)
+	cb.set_label(field_name[1])
+
+	ax.text(25, 750, 'time = %.2e' % time, color='k')
+	ax.axes.get_xaxis().set_visible(False)
+	ax.axes.get_yaxis().set_visible(False)
+
+	plt.savefig('movie_frames/test_%.3d' % step)

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,3 +4,4 @@
 h5py==2.5.0 
 nose==1.3.6 
 sympy==0.7.6 
+

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 tests/nose_runner.py
--- /dev/null
+++ b/tests/nose_runner.py
@@ -0,0 +1,54 @@
+import sys
+import os
+import yaml
+import multiprocessing as mp
+import nose
+import glob
+from contextlib import closing
+from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import AnswerTesting
+
+
+def run_job(argv):
+    with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
+        cur_stderr = sys.stderr
+        sys.stderr = fstderr
+        answer = argv[0]
+        test_dir = ytcfg.get("yt", "test_data_dir")
+        answers_dir = os.path.join(test_dir, "answers")
+        if not os.path.isdir(os.path.join(answers_dir, answer)):
+            nose.run(argv=argv + ['--answer-store'],
+                     addplugins=[AnswerTesting()], exit=False)
+        nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
+    sys.stderr = cur_stderr
+
+if __name__ == "__main__":
+    test_dir = ytcfg.get("yt", "test_data_dir")
+    answers_dir = os.path.join(test_dir, "answers")
+    with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
+        tests = yaml.load(obj)
+
+    base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+                 '--with-answer-testing', '--answer-big-data', '--local']
+    args = [['unittests', '-v', '-s', '--nologcapture']]
+    for answer in list(tests.keys()):
+        argv = [answer]
+        argv += base_argv
+        argv.append('--xunit-file=%s.xml' % answer)
+        argv.append('--answer-name=%s' % answer)
+        argv += tests[answer]
+        args.append(argv)
+    
+    processes = [mp.Process(target=run_job, args=(args[i],))
+                 for i in range(len(args))]
+    for p in processes:
+        p.start()
+    for p in processes:
+        p.join(timeout=7200)
+        if p.is_alive():
+            p.terminate()
+            p.join(timeout=30)
+    for fname in glob.glob("*.out"):
+        with open(fname, 'r') as fin:
+            print(fin.read())
+        os.remove(fname)

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 tests/tests_2.7.yaml
--- /dev/null
+++ b/tests/tests_2.7.yaml
@@ -0,0 +1,51 @@
+local_artio_270:
+  - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_270:
+  - yt/frontends/athena
+
+local_chombo_270:
+  - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_270:
+  - yt/frontends/enzo
+
+local_fits_270:
+  - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_270:
+  - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_270:
+  - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_270:
+  - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+  - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+  - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_270:
+  - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_270:
+  - yt/visualization/tests/test_plotwindow.py:test_attributes
+  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_270:
+  - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_270:
+  - yt/analysis_modules/radmc3d_export
+  - yt/frontends/moab/tests/test_c5.py
+  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_270:
+  - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_270:
+  - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_270:
+  - yt/frontends/ytdata
\ No newline at end of file

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 tests/tests_3.4.yaml
--- /dev/null
+++ b/tests/tests_3.4.yaml
@@ -0,0 +1,49 @@
+local_artio_340:
+  - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_340:
+  - yt/frontends/athena
+
+local_chombo_340:
+  - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_340:
+  - yt/frontends/enzo
+
+local_fits_340:
+  - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_340:
+  - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_340:
+  - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_340:
+  - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_340:
+  - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_340:
+  - yt/visualization/tests/test_plotwindow.py:test_attributes
+  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_340:
+  - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_340:
+  - yt/analysis_modules/radmc3d_export
+  - yt/frontends/moab/tests/test_c5.py
+  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_340:
+  - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_340:
+  - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_340:
+  - yt/frontends/ytdata
\ No newline at end of file

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -47,6 +47,12 @@
                "y":("z","x"),
                "z":("x","y")}
 
+def force_unicode(value):
+    if hasattr(value, 'decode'):
+        return value.decode('utf8')
+    else:
+        return value
+
 def parse_value(value, default_units):
     if isinstance(value, YTQuantity):
         return value.in_units(default_units)
@@ -919,27 +925,28 @@
 
         p = f["/parameters"]
         parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s")
-        if isinstance(p["area"].value, (string_types, bytes)):
-            parameters["Area"] = p["area"].value.decode("utf8")
+        area = force_unicode(p['area'].value)
+        if isinstance(area, string_types):
+            parameters["Area"] = area
         else:
-            parameters["Area"] = YTQuantity(p["area"].value, "cm**2")
+            parameters["Area"] = YTQuantity(area, "cm**2")
         parameters["Redshift"] = p["redshift"].value
         parameters["AngularDiameterDistance"] = YTQuantity(p["d_a"].value, "Mpc")
         parameters["sky_center"] = YTArray(p["sky_center"][:], "deg")
         parameters["dtheta"] = YTQuantity(p["dtheta"].value, "deg")
         parameters["pix_center"] = p["pix_center"][:]
         if "rmf" in p:
-            parameters["RMF"] = p["rmf"].value.decode("utf8")
+            parameters["RMF"] = force_unicode(p["rmf"].value)
         if "arf" in p:
-            parameters["ARF"] = p["arf"].value.decode("utf8")
+            parameters["ARF"] = force_unicode(p["arf"].value)
         if "channel_type" in p:
-            parameters["ChannelType"] = p["channel_type"].value.decode("utf8")
+            parameters["ChannelType"] = force_unicode(p["channel_type"].value)
         if "mission" in p:
-            parameters["Mission"] = p["mission"].value.decode("utf8")
+            parameters["Mission"] = force_unicode(p["mission"].value)
         if "telescope" in p:
-            parameters["Telescope"] = p["telescope"].value.decode("utf8")
+            parameters["Telescope"] = force_unicode(p["telescope"].value)
         if "instrument" in p:
-            parameters["Instrument"] = p["instrument"].value.decode("utf8")
+            parameters["Instrument"] = force_unicode(p["instrument"].value)
 
         d = f["/data"]
         events["xpix"] = d["xpix"][:]
@@ -1552,4 +1559,4 @@
             d.create_dataset(key, data=f_in[key].value)
 
     f_in.close()
-    f_out.close()
\ No newline at end of file
+    f_out.close()

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -588,7 +588,7 @@
                         extra_attrs=extra_attrs)
 
         return filename
-        
+
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -59,6 +59,9 @@
         self._current_fluid_type = self.ds.default_fluid_type
 
     def _check_consistency(self):
+        if self.connectivity_indices.shape[1] != self._connectivity_length:
+            raise RuntimeError
+
         for gi in range(self.connectivity_indices.shape[0]):
             ind = self.connectivity_indices[gi, :] - self._index_offset
             coords = self.connectivity_coords[ind, :]
@@ -136,7 +139,7 @@
         mask = self._get_selector_mask(selector)
         count = self.count(selector)
         if count == 0: return 0
-        dest[offset:offset+count] = source[mask,...]
+        dest[offset:offset+count] = source[mask, ...]
         return count
 
     def count(self, selector):
@@ -167,11 +170,12 @@
 
     def select_fcoords_vertex(self, dobj = None):
         mask = self._get_selector_mask(dobj.selector)
-        if mask is None: return np.empty((0,self._connectivity_length,3), dtype='float64')
+        if mask is None: return np.empty((0, self._connectivity_length, 3), dtype='float64')
         vertices = self.connectivity_coords[
                 self.connectivity_indices - 1]
         return vertices[mask, :, :]
 
+
 class SemiStructuredMesh(UnstructuredMesh):
     _connectivity_length = 8
     _type_name = 'semi_structured_mesh'

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -24,6 +24,7 @@
     'chombo',
     'eagle',
     'enzo',
+    'exodus_ii',
     'fits',
     'flash',
     'gadget',

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/__init__.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.exodus_ii
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/api.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/api.py
@@ -0,0 +1,27 @@
+"""
+API for yt.frontends.exodus_ii
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+    ExodusIIUnstructuredMesh, \
+    ExodusIIUnstructuredIndex, \
+    ExodusIIDataset
+
+from .fields import \
+    ExodusIIFieldInfo
+
+from .io import \
+    IOHandlerExodusII
+
+from . import tests

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/data_structures.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -0,0 +1,262 @@
+"""
+Exodus II data structures
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import numpy as np
+
+from yt.geometry.unstructured_mesh_handler import \
+    UnstructuredIndex
+from yt.data_objects.unstructured_mesh import \
+    UnstructuredMesh
+from yt.data_objects.static_output import \
+    Dataset
+from .io import \
+    NetCDF4FileHandler
+from yt.utilities.logger import ytLogger as mylog
+from .fields import \
+    ExodusIIFieldInfo
+from .util import \
+    load_info_records, sanitize_string
+
+
+class ExodusIIUnstructuredMesh(UnstructuredMesh):
+    _index_offset = 1
+
+    def __init__(self, *args, **kwargs):
+        super(ExodusIIUnstructuredMesh, self).__init__(*args, **kwargs)
+
+
+class ExodusIIUnstructuredIndex(UnstructuredIndex):
+    def __init__(self, ds, dataset_type = 'exodus_ii'):
+        super(ExodusIIUnstructuredIndex, self).__init__(ds, dataset_type)
+
+    def _initialize_mesh(self):
+        coords = self.ds._read_coordinates()
+        self.meshes = [ExodusIIUnstructuredMesh(
+            mesh_id, self.index_filename, conn_ind, coords, self)
+                       for mesh_id, conn_ind in
+                       enumerate(self.ds._read_connectivity())]
+
+    def _detect_output_fields(self):
+        elem_names = self.dataset.parameters['elem_names']
+        node_names = self.dataset.parameters['nod_names']
+        fnames = elem_names + node_names
+        self.field_list = []
+        for i in range(1, len(self.meshes)+1):
+            self.field_list += [('connect%d' % i, fname) for fname in fnames]
+
+
+class ExodusIIDataset(Dataset):
+    _index_class = ExodusIIUnstructuredIndex
+    _field_info_class = ExodusIIFieldInfo
+
+    def __init__(self,
+                 filename,
+                 step=0,
+                 dataset_type='exodus_ii',
+                 storage_filename=None,
+                 units_override=None):
+
+        self.parameter_filename = filename
+        self.fluid_types += self._get_fluid_types()
+        self.step = step
+        super(ExodusIIDataset, self).__init__(filename, dataset_type,
+                                              units_override=units_override)
+        self.index_filename = filename
+        self.storage_filename = storage_filename
+
+    def _set_code_unit_attributes(self):
+        # This is where quantities are created that represent the various
+        # on-disk units.  These are the currently available quantities which
+        # should be set, along with examples of how to set them to standard
+        # values.
+        #
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.time_unit = self.quan(1.0, "s")
+        #
+        # These can also be set:
+        # self.velocity_unit = self.quan(1.0, "cm/s")
+        # self.magnetic_unit = self.quan(1.0, "gauss")
+
+    def _parse_parameter_file(self):
+        self._handle = NetCDF4FileHandler(self.parameter_filename)
+        self._vars = self._handle.dataset.variables
+        self._read_glo_var()
+        self.dimensionality = self._vars['coor_names'].shape[0]
+        self.parameters['info_records'] = self._load_info_records()
+        self.unique_identifier = self._get_unique_identifier()
+        self.num_steps = len(self._vars['time_whole'])
+        self.current_time = self._get_current_time()
+        self.parameters['num_meshes'] = self._vars['eb_status'].shape[0]
+        self.parameters['elem_names'] = self._get_elem_names()
+        self.parameters['nod_names'] = self._get_nod_names()
+        self.domain_left_edge = self._load_domain_edge(0)
+        self.domain_right_edge = self._load_domain_edge(1)
+
+        # set up psuedo-3D for lodim datasets here
+        if self.dimensionality == 2:
+            self.domain_left_edge = np.append(self.domain_left_edge, 0.0)
+            self.domain_right_edge = np.append(self.domain_right_edge, 1.0)
+
+        self.periodicity = (False, False, False)
+
+        # These attributes don't really make sense for unstructured
+        # mesh data, but yt warns if they are not present, so we set
+        # them to dummy values here.
+        self.domain_dimensions = np.ones(3, "int32")
+        self.cosmological_simulation = 0
+        self.current_redshift = 0
+        self.omega_lambda = 0
+        self.omega_matter = 0
+        self.hubble_constant = 0
+        self.refine_by = 0
+
+    def _get_fluid_types(self):
+        handle = NetCDF4FileHandler(self.parameter_filename).dataset
+        fluid_types = ()
+        i = 1
+        while True:
+            ftype = 'connect%d' % i
+            if ftype in handle.variables:
+                fluid_types += (ftype,)
+                i += 1
+            else:
+                break
+        return fluid_types
+
+    def _read_glo_var(self):
+        """
+        Adds each global variable to the dict of parameters
+
+        """
+        names = self._get_glo_names()
+        if not names:
+            return
+        values = self._vars['vals_glo_var'][:].transpose()
+        for name, value in zip(names, values):
+            self.parameters[name] = value
+
+    def _load_info_records(self):
+        """
+        Returns parsed version of the info_records.
+        """
+        try:
+            return load_info_records(self._vars['info_records'])
+        except (KeyError, TypeError):
+            mylog.warning("No info_records found")
+            return []
+
+    def _get_unique_identifier(self):
+        return self.parameter_filename
+
+    def _get_current_time(self):
+        try:
+            return self._vars['time_whole'][self.step]
+        except IndexError:
+            raise RuntimeError("Invalid step number, max is %d" \
+                               % (self.num_steps - 1))            
+        except (KeyError, TypeError):
+            return 0.0
+
+    def _get_glo_names(self):
+        """
+
+        Returns the names of the global vars, if available.
+
+        """
+
+        if "name_glo_var" not in self._vars:
+            mylog.warning("name_glo_var not found")
+            return []
+        else:
+            return [sanitize_string(v.tostring()) for v in
+                    self._vars["name_glo_var"]]
+            
+    def _get_elem_names(self):
+        """
+
+        Returns the names of the element vars, if available.
+
+        """
+
+        if "name_elem_var" not in self._vars:
+            mylog.warning("name_elem_var not found")
+            return []
+        else:
+            return [sanitize_string(v.tostring()) for v in
+                    self._vars["name_elem_var"]]
+
+    def _get_nod_names(self):
+        """
+
+        Returns the names of the node vars, if available
+
+        """
+
+        if "name_nod_var" not in self._vars:
+            mylog.warning("name_nod_var not found")
+            return []
+        else:
+            return [sanitize_string(v.tostring()) for v in
+                    self._vars["name_nod_var"]]
+
+    def _read_coordinates(self):
+        """
+
+        Loads the coordinates for the mesh
+
+        """
+        
+        coord_axes = 'xyz'[:self.dimensionality]
+
+        mylog.info("Loading coordinates")
+        if "coord" not in self._vars:
+            return np.array([self._vars["coord%s" % ax][:]
+                             for ax in coord_axes]).transpose().copy()
+        else:
+            return np.array([coord for coord in
+                             self._vars["coord"][:]]).transpose().copy()
+
+    def _read_connectivity(self):
+        """
+        Loads the connectivity data for the mesh
+        """
+        mylog.info("Loading connectivity")
+        connectivity = []
+        for i in range(self.parameters['num_meshes']):
+            connectivity.append(self._vars["connect%d" % (i+1)][:].astype("i8"))
+        return connectivity
+
+    def _load_domain_edge(self, domain_idx):
+        """
+        Loads the boundaries for the domain edge
+
+        Parameters:
+        - domain_idx: 0 corresponds to the left edge, 1 corresponds to the right edge
+        """
+        if domain_idx == 0:
+            return self._read_coordinates().min(axis=0)
+        if domain_idx == 1:
+            return self._read_coordinates().max(axis=0)
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            from netCDF4 import Dataset
+            filename = args[0]
+            with Dataset(filename) as f:
+                f.variables['connect1']
+            return True
+        except:
+            pass

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/definitions.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/definitions.py
@@ -0,0 +1,1 @@
+# This file is often empty.  It can hold definitions related to a frontend.

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/fields.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/fields.py
@@ -0,0 +1,45 @@
+"""
+ExodusII-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+
+# We need to specify which fields we might have in our dataset.  The field info
+# container subclass here will define which fields it knows about.  There are
+# optionally methods on it that get called which can be subclassed.
+
+class ExodusIIFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # Each entry here is of the form
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
+
+    known_particle_fields = (
+        # Identical form to above
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
+
+    def __init__(self, ds, field_list):
+        super(ExodusIIFieldInfo, self).__init__(ds, field_list)
+        # If you want, you can check self.field_list
+
+    def setup_fluid_fields(self):
+        # Here we do anything that might need info about the dataset.
+        # You can use self.alias, self.add_output_field and self.add_field .
+        pass
+
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/io.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/io.py
@@ -0,0 +1,84 @@
+"""
+ExodusII-specific IO functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.file_handler import \
+    NetCDF4FileHandler
+
+
+class IOHandlerExodusII(BaseIOHandler):
+    _particle_reader = False
+    _dataset_type = "exodus_ii"
+    _INDEX_OFFSET = 1
+
+    def __init__(self, ds):
+        self.filename = ds.index_filename
+        exodus_ii_handler = NetCDF4FileHandler(self.filename)
+        self.handler = exodus_ii_handler.dataset
+        super(IOHandlerExodusII, self).__init__(ds)
+        self.node_fields = ds._get_nod_names()
+        self.elem_fields = ds._get_elem_names()
+
+    def _read_particle_coords(self, chunks, ptf):
+        pass
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        pass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        # This needs to allocate a set of arrays inside a dictionary, where the
+        # keys are the (ftype, fname) tuples and the values are arrays that
+        # have been masked using whatever selector method is appropriate.  The
+        # dict gets returned at the end and it should be flat, with selected
+        # data.  Note that if you're reading grid data, you might need to
+        # special-case a grid selector object.
+        chunks = list(chunks)
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            ci = self.handler.variables[ftype][:] - self._INDEX_OFFSET
+            num_elem = ci.shape[0]
+            if fname in self.node_fields:
+                nodes_per_element = ci.shape[1]
+                rv[field] = np.empty((num_elem, nodes_per_element), dtype="float64")
+            elif fname in self.elem_fields:
+                rv[field] = np.empty(num_elem, dtype="float64")
+        for field in fields:
+            ind = 0
+            ftype, fname = field
+            mesh_id = int(ftype[-1])
+            chunk = chunks[mesh_id - 1]
+            ci = self.handler.variables[ftype][:] - self._INDEX_OFFSET
+            if fname in self.node_fields:
+                field_ind = self.node_fields.index(fname)
+                fdata = self.handler.variables['vals_nod_var%d' % (field_ind + 1)]
+                data = fdata[self.ds.step][ci]
+                for g in chunk.objs:
+                    ind += g.select(selector, data, rv[field], ind)  # caches
+            if fname in self.elem_fields:
+                field_ind = self.elem_fields.index(fname)
+                fdata = self.handler.variables['vals_elem_var%deb%s' %
+                                               (field_ind + 1, mesh_id)][:]
+                data = fdata[self.ds.step, :]
+                for g in chunk.objs:
+                    ind += g.select(selector, data, rv[field], ind)  # caches
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        # This reads the data from a single chunk, and is only used for
+        # caching.
+        pass

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/setup.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('exodus_ii', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -0,0 +1,71 @@
+"""
+Exodus II frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    assert_array_equal, \
+    requires_file
+from yt.utilities.answer_testing.framework import \
+    data_dir_load
+
+out = "ExodusII/out.e"
+
+
+ at requires_file(out)
+def test_out():
+    ds = data_dir_load(out)
+    field_list = [('connect1', 'conv_indicator'),
+                  ('connect1', 'conv_marker'),
+                  ('connect1', 'convected'),
+                  ('connect1', 'diffused'),
+                  ('connect2', 'conv_indicator'),
+                  ('connect2', 'conv_marker'),
+                  ('connect2', 'convected'),
+                  ('connect2', 'diffused')]
+    yield assert_equal, str(ds), "out.e"
+    yield assert_equal, ds.dimensionality, 3
+    yield assert_equal, ds.current_time, 0.0
+    yield assert_array_equal, ds.parameters['nod_names'], ['convected', 'diffused']
+    yield assert_equal, ds.parameters['num_meshes'], 2
+    yield assert_array_equal, ds.field_list, field_list 
+
+out_s002 = "ExodusII/out.e-s002"
+
+
+ at requires_file(out_s002)
+def test_out002():
+    ds = data_dir_load(out_s002)
+    field_list = [('connect1', 'conv_indicator'),
+                  ('connect1', 'conv_marker'),
+                  ('connect1', 'convected'),
+                  ('connect1', 'diffused'),
+                  ('connect2', 'conv_indicator'),
+                  ('connect2', 'conv_marker'),
+                  ('connect2', 'convected'),
+                  ('connect2', 'diffused')]
+    yield assert_equal, str(ds), "out.e-s002"
+    yield assert_equal, ds.dimensionality, 3
+    yield assert_equal, ds.current_time, 2.0
+    yield assert_array_equal, ds.field_list, field_list 
+
+gold = "ExodusII/gold.e"
+
+
+ at requires_file(gold)
+def test_gold():
+    ds = data_dir_load(gold)
+    field_list = [('connect1', 'forced')]
+    yield assert_equal, str(ds), "gold.e"
+    yield assert_array_equal, ds.field_list, field_list 

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/exodus_ii/util.py
--- /dev/null
+++ b/yt/frontends/exodus_ii/util.py
@@ -0,0 +1,58 @@
+import sys
+import string
+from itertools import takewhile
+from collections import OrderedDict
+import re
+
+_printable = set([ord(_) for _ in string.printable])
+
+def sanitize_string(s):
+    if sys.version_info > (3, ):
+        return "".join([chr(_) for _ in takewhile(lambda a: a in _printable, s)])
+    return "".join([_ for _ in takewhile(lambda a: a in string.printable, s)])
+
+def load_info_records(info_records):
+    info_records_parsed = [sanitize_string(line_chars) for line_chars in info_records]
+    return group_by_sections(info_records_parsed)
+
+def group_by_sections(info_records):
+    # 1. Split by top groupings
+    top_levels = get_top_levels(info_records)
+    # 2. Determine if in section by index number
+    grouped = OrderedDict()
+    for tidx, top_level in enumerate(top_levels):
+        grouped[top_level[1]] = []
+
+        try:
+            next_idx = top_levels[tidx + 1][0]
+        except IndexError:
+            next_idx = len(info_records) - 1
+
+        for idx in range(top_level[0], next_idx):       
+            if idx == top_level[0]:
+                continue
+
+            grouped[top_level[1]].append(info_records[idx])
+
+    
+    if 'Version Info' in grouped.keys():
+        version_info = OrderedDict()
+        for line in grouped['Version Info']:
+            split_line = line.split(":")
+            key = split_line[0]
+            val = ":".join(split_line[1:]).lstrip().rstrip()
+            if key != '':
+                version_info[key] = val
+        grouped['Version Info'] = version_info
+    
+    return grouped
+
+def get_top_levels(info_records):
+    top_levels = []
+    for idx, line in enumerate(info_records):
+        pattern = re.compile("###[a-zA-Z\s]+")
+        if pattern.match(line):
+            clean_line = re.sub(r'[^\w\s]', '', line).lstrip().rstrip()
+            top_levels.append([idx, clean_line])
+    
+    return top_levels

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -25,7 +25,7 @@
 
 isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
 isothermal_bin = "IsothermalCollapse/snap_505"
-gdg = "GadgetDiskGalaxy/snapshot_200.hdf5"
+g64 = "gizmo_64/output/snap_N64L16_135.hdf5"
 
 # This maps from field names to weight field names to use for projections
 iso_fields = OrderedDict(
@@ -42,9 +42,9 @@
 )
 iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
 
-gdg_fields = iso_fields.copy()
-gdg_fields["deposit", "PartType4_density"] = None
-gdg_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
+g64_fields = iso_fields.copy()
+g64_fields["deposit", "PartType4_density"] = None
+g64_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
 
 
 @requires_file(isothermal_h5)
@@ -63,9 +63,9 @@
         test_iso_collapse.__name__ = test.description
         yield test
 
- at requires_ds(gdg, big_data=True)
-def test_gadget_disk_galaxy():
-    for test in sph_answer(gdg, 'snapshot_200', 11907080, gdg_fields,
-                           ds_kwargs=gdg_kwargs):
-        test_gadget_disk_galaxy.__name__ = test.description
+ at requires_ds(g64, big_data=True)
+def test_gizmo_64():
+    for test in sph_answer(g64, 'snap_N64L16_135', 524288, g64_fields,
+                           ds_kwargs=g64_kwargs):
+        test_gizmo_64.__name__ = test.description
         yield test

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -11,6 +11,7 @@
     config.add_subpackage("athena")
     config.add_subpackage("boxlib")
     config.add_subpackage("chombo")
+    config.add_subpackage("exodus_ii")
     config.add_subpackage("eagle")
     config.add_subpackage("enzo")
     config.add_subpackage("fits")
@@ -37,6 +38,7 @@
     config.add_subpackage("chombo/tests")
     config.add_subpackage("eagle/tests")
     config.add_subpackage("enzo/tests")
+    config.add_subpackage("exodus_ii/tests")
     config.add_subpackage("fits/tests")
     config.add_subpackage("flash/tests")
     config.add_subpackage("gadget/tests")
@@ -47,6 +49,7 @@
     config.add_subpackage("ramses/tests")
     config.add_subpackage("rockstar/tests")
     config.add_subpackage("stream/tests")
+    config.add_subpackage("stream/sample_data")
     config.add_subpackage("tipsy/tests")
     config.add_subpackage("ytdata/tests")
     return config

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -34,3 +34,5 @@
       IOHandlerStream
 
 from . import tests
+
+from . import sample_data

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -139,7 +139,7 @@
         self.io = io
         self.particle_types = particle_types
         self.periodicity = periodicity
-            
+
     def get_fields(self):
         return self.fields.all_fields
 
@@ -149,7 +149,7 @@
             return self.particle_types[field]
         else :
             return False
-        
+
 class StreamHierarchy(GridIndex):
 
     grid = StreamGrid
@@ -1520,9 +1520,9 @@
 
     field_units, data = unitify_data(data)
     sfh = StreamDictFieldHandler()
-    
+
     particle_types = set_particle_types(data)
-    
+
     sfh.update({0:data})
     grid_left_edges = domain_left_edge
     grid_right_edges = domain_right_edge
@@ -1606,12 +1606,12 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = "stream_unstructured"
 
-def load_unstructured_mesh(data, connectivity, coordinates,
-                         length_unit = None, bbox=None, sim_time=0.0,
-                         mass_unit = None, time_unit = None,
-                         velocity_unit = None, magnetic_unit = None,
-                         periodicity=(False, False, False),
-                         geometry = "cartesian", unit_system="cgs"):
+def load_unstructured_mesh(connectivity, coordinates, node_data=None,
+                           elem_data=None, length_unit=None, bbox=None,
+                           sim_time=0.0, mass_unit=None, time_unit=None,
+                           velocity_unit=None, magnetic_unit=None,
+                           periodicity=(False, False, False),
+                           geometry = "cartesian", unit_system="cgs"):
     r"""Load an unstructured mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -1625,10 +1625,6 @@
 
     Parameters
     ----------
-    data : dict or list of dicts
-        This is a list of dicts of numpy arrays, where each element in the list
-        is a different mesh, and where the keys of dicts are the field names.
-        If a dict is supplied, this will be assumed to be the only mesh.
     connectivity : list of array_like or array_like
         This is the connectivity array for the meshes; this should either be a
         list where each element in the list is a numpy array or a single numpy
@@ -1638,6 +1634,18 @@
     coordinates : array_like
         This should be of size (L,3) where L is the number of vertices
         indicated in the connectivity matrix.
+    node_data : dict or list of dicts
+        This is a list of dicts of numpy arrays, where each element in the list
+        is a different mesh, and where the keys of dicts are the field names.
+        If a dict is supplied, this will be assumed to be the only mesh. These
+        data fields are assumed to be node-centered, i.e. there must be one
+        value for every node in the mesh.
+    elem_data : dict or list of dicts
+        This is a list of dicts of numpy arrays, where each element in the list
+        is a different mesh, and where the keys of dicts are the field names.
+        If a dict is supplied, this will be assumed to be the only mesh. These
+        data fields are assumed to be element-centered, i.e. there must be only
+        one value for every element in the mesh.
     bbox : array_like (xdim:zdim, LE:RE), optional
         Size of computational domain in units of the length unit.
     sim_time : float, optional
@@ -1665,8 +1673,33 @@
 
     domain_dimensions = np.ones(3, "int32") * 2
     nprocs = 1
+
+    if elem_data is None and node_data is None:
+        raise RuntimeError("No data supplied in load_unstructured_mesh.")
+
+    if isinstance(connectivity, list):
+        num_meshes = len(connectivity)
+    else:
+        num_meshes = 1
+    connectivity = ensure_list(connectivity)
+
+    if elem_data is None:
+        elem_data = [{} for i in range(num_meshes)]
+    elem_data = ensure_list(elem_data)
+
+    if node_data is None:
+        node_data = [{} for i in range(num_meshes)]
+    node_data = ensure_list(node_data)
+
+    data = [{} for i in range(num_meshes)]
+    for elem_dict, data_dict in zip(elem_data, data):
+        for field, values in elem_dict.items():
+            data_dict[field] = values
+    for node_dict, data_dict in zip(node_data, data):
+        for field, values in node_dict.items():
+            data_dict[field] = values
     data = ensure_list(data)
-    connectivity = ensure_list(connectivity)
+
     if bbox is None:
         bbox = np.array([[coordinates[:,i].min() - 0.1 * abs(coordinates[:,i].min()),
                           coordinates[:,i].max() + 0.1 * abs(coordinates[:,i].max())]
@@ -1737,5 +1770,12 @@
     sds = StreamUnstructuredMeshDataset(handler, geometry=geometry,
                                         unit_system=unit_system)
 
+    fluid_types = ()
+    for i in range(1, num_meshes + 1):
+        fluid_types += ('connect%d' % i,)
+    sds.fluid_types = fluid_types
+
+    sds._node_fields = node_data[0].keys()
+    sds._elem_fields = elem_data[0].keys()
+
     return sds
-

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -255,7 +255,6 @@
 
 class IOHandlerStreamUnstructured(BaseIOHandler):
     _dataset_type = "stream_unstructured"
-    _node_types = ("diffused", "convected", "u", "temp")
 
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields
@@ -267,9 +266,8 @@
         mesh_id = chunk.objs[0].mesh_id
         rv = {}
         for field in fields:
-            field_name = field[1]
-            nodes_per_element = self.fields[mesh_id][field].shape[1]
-            if field_name in self._node_types:
+            if field in self.ds._node_fields:
+                nodes_per_element = self.fields[mesh_id][field].shape[1]
                 rv[field] = np.empty((size, nodes_per_element), dtype="float64")
             else:
                 rv[field] = np.empty(size, dtype="float64")

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -21,6 +21,10 @@
     _get_vert_fields, \
     cartesian_to_cylindrical, \
     cylindrical_to_cartesian
+from yt.funcs import mylog
+from yt.utilities.lib.pixelization_routines import \
+    pixelize_element_mesh
+from yt.data_objects.unstructured_mesh import SemiStructuredMesh
 import yt.visualization._MPL as _MPL
 
 
@@ -59,7 +63,54 @@
 
     def pixelize(self, dimension, data_source, field, bounds, size,
                  antialias = True, periodic = True):
-        if dimension < 3:
+        index = data_source.ds.index
+        if (hasattr(index, 'meshes') and
+           not isinstance(index.meshes[0], SemiStructuredMesh)):
+            ftype, fname = field
+            mesh_id = int(ftype[-1]) - 1
+            mesh = index.meshes[mesh_id]
+            coords = mesh.connectivity_coords
+            indices = mesh.connectivity_indices
+            offset = mesh._index_offset
+            ad = data_source.ds.all_data()
+            field_data = ad[field]
+            buff_size = size[0:dimension] + (1,) + size[dimension:]
+
+            c = np.float64(data_source.center[dimension].d)
+            bounds.insert(2*dimension, c)
+            bounds.insert(2*dimension, c)
+            bounds = np.reshape(bounds, (3, 2))
+
+            # if this is an element field, promote to 2D here
+            if len(field_data.shape) == 1:
+                field_data = np.expand_dims(field_data, 1)
+            # if this is a higher-order element, we demote to 1st order
+            # here, for now.
+            elif field_data.shape[1] == 27 or field_data.shape[1] == 20:
+                # hexahedral
+                mylog.warning("High order elements not yet supported, " +
+                              "dropping to 1st order.")
+                field_data = field_data[:, 0:8]
+                indices = indices[:, 0:8]
+            elif field_data.shape[1] == 10:
+                # tetrahedral
+                mylog.warning("High order elements not yet supported, " +
+                              "dropping to 1st order.")
+                field_data = field_data[:,0:4]
+                indices = indices[:, 0:4]
+
+            img = pixelize_element_mesh(coords,
+                                        indices,
+                                        buff_size, field_data, bounds,
+                                        index_offset=offset)
+
+            # re-order the array and squeeze out the dummy dim
+            ax = data_source.axis
+            xax = self.x_axis[ax]
+            yax = self.y_axis[ax]
+            return np.squeeze(np.transpose(img, (yax, xax, ax)))
+
+        elif dimension < 3:
             return self._ortho_pixelize(data_source, field, bounds, size,
                                         antialias, dimension, periodic)
         else:

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -409,6 +409,7 @@
             ind += c.shape[0]
         return ci
 
+
 class ChunkDataCache(object):
     def __init__(self, base_iter, preload_fields, geometry_handler,
                  max_length = 256):

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -397,6 +397,7 @@
         cdef np.ndarray[np.uint8_t, ndim=1] mask
         cdef int i, j, k, selected
         cdef int npoints, nv = mesh._connectivity_length
+        cdef int ndim = mesh.connectivity_coords.shape[1]
         cdef int total = 0
         cdef int offset = mesh._index_offset
         coords = _ensure_code(mesh.connectivity_coords)
@@ -405,11 +406,11 @@
         mask = np.zeros(npoints, dtype='uint8')
         for i in range(npoints):
             selected = 0
-            for k in range(3):
+            for k in range(ndim):
                 le[k] = 1e60
                 re[k] = -1e60
             for j in range(nv):
-                for k in range(3):
+                for k in range(ndim):
                     pos = coords[indices[i, j] - offset, k]
                     le[k] = fmin(pos, le[k])
                     re[k] = fmax(pos, re[k])

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -268,6 +268,50 @@
     return ds
 
 
+def fake_tetrahedral_ds():
+    from yt.frontends.stream.api import load_unstructured_mesh
+    from yt.frontends.stream.sample_data.tetrahedral_mesh import \
+        _connectivity, _coordinates
+
+    # the distance from the origin
+    node_data = {}
+    dist = np.sum(_coordinates**2, 1)
+    node_data[('connect1', 'test')] = dist[_connectivity]
+
+    # each element gets a random number
+    elem_data = {}
+    elem_data[('connect1', 'elem')] = np.random.rand(_connectivity.shape[0])
+
+    ds = load_unstructured_mesh(_connectivity,
+                                _coordinates,
+                                node_data=node_data,
+                                elem_data=elem_data)
+    return ds
+
+
+def fake_hexahedral_ds():
+    from yt.frontends.stream.api import load_unstructured_mesh
+    from yt.frontends.stream.sample_data.hexahedral_mesh import \
+        _connectivity, _coordinates
+
+    _connectivity -= 1  # this mesh has an offset of 1
+
+    # the distance from the origin
+    node_data = {}
+    dist = np.sum(_coordinates**2, 1)
+    node_data[('connect1', 'test')] = dist[_connectivity]
+
+    # each element gets a random number
+    elem_data = {}
+    elem_data[('connect1', 'elem')] = np.random.rand(_connectivity.shape[0])
+
+    ds = load_unstructured_mesh(_connectivity,
+                                _coordinates,
+                                node_data=node_data,
+                                elem_data=elem_data)
+    return ds
+
+
 def expand_keywords(keywords, full=False):
     """
     expand_keywords is a means for testing all possible keyword

diff -r fdf87b9e254b64e4020a24f3273a8382c3558f57 -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 yt/tests/test_flake8.py
--- a/yt/tests/test_flake8.py
+++ b/yt/tests/test_flake8.py
@@ -8,14 +8,15 @@
 @requires_module('flake8')
 def test_flake8():
     yt_dir = os.path.dirname(os.path.abspath(yt.__file__))
-    initial_dir = os.getcwd()
-    os.chdir(yt_dir)
-    output_file = os.path.sep.join([os.path.dirname(initial_dir), 'flake8.out'])
+    output_file = os.environ.get("WORKSPACE", None) or os.getcwd()
+    output_file = os.path.join(output_file, 'flake8.out')
     if os.path.exists(output_file):
         os.remove(output_file)
     output_string = "--output-file=%s" % output_file
-    subprocess.call(['flake8', output_string, os.curdir])
-    os.chdir(initial_dir)
+    config_string = "--config=%s" % os.path.join(os.path.dirname(yt_dir), 
+                                                 'setup.cfg')
+    subprocess.call(['flake8', output_string, config_string, yt_dir])
+    
     with open(output_file) as f:
         flake8_output = f.readlines()
     if flake8_output != []:

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/59afad399bb1/
Changeset:   59afad399bb1
Branch:      yt
User:        jzuhone
Date:        2015-12-19 15:12:59+00:00
Summary:     Some docs on unit systems
Affected #:  2 files

diff -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 -r 59afad399bb1c9574b4919e10e5daf4488718217 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -94,16 +94,16 @@
 
 There is a third, borderline class of field in yt, as well.  This is the
 "alias" type, where a field on disk (for example, (frontend, ``Density``)) is 
-aliased into an internal yt-name (for example, (``gas``, ``density``)).  The 
+aliased into an internal yt-name (for example, (``gas``, ``density``)). The 
 aliasing process allows universally-defined derived fields to take advantage of 
 internal names, and it also provides an easy way to address what units something 
 should be returned in.  If an aliased field is requested (and aliased fields 
 will always be lowercase, with underscores separating words) it will be returned 
-in CGS units (future versions will enable global defaults to be set for MKS and 
-other unit systems), whereas if the frontend-specific field is requested, it 
-will not undergo any unit conversions from its natural units.  (This rule is 
-occasionally violated for fields which are mesh-dependent, specifically particle 
-masses in some cosmology codes.)
+in the units specified by the unit system of the database (see :ref:`unit_systems`
+for a guide to using the different unit systems in yt), whereas if the 
+frontend-specific field is requested, it will not undergo any unit conversions 
+from its natural units.  (This rule is occasionally violated for fields which 
+are mesh-dependent, specifically particle masses in some cosmology codes.)
 
 .. _known-field-types:
 
@@ -125,7 +125,8 @@
 * ``gas`` -- This is the usual default for simulation frontends for fluid
   types.  These fields are typically aliased to the frontend-specific mesh
   fields for grid-based codes or to the deposit fields for particle-based
-  codes.  Default units are in CGS.
+  codes.  Default units are in the unit system of the dataset (see 
+  :ref:`unit_systems` for more information).
 * particle type -- These are particle fields that exist on-disk as written 
   by individual frontends.  If the frontend designates names for these particles
   (i.e. particle type) those names are the field types. 
@@ -238,6 +239,36 @@
    print ds.field_info["gas", "pressure"].get_units()
    print ds.field_info["gas", "pressure"].get_source()
 
+.. _bfields:
+
+Special Handling for Magnetic Fields
+------------------------------------
+
+Magnetic fields require special handling, because their dimensions are different in
+different systems of units, in particular between the CGS and MKS (SI) systems of units.
+Superficially, it would appear that it is easy to convert between the two, since the units 
+of the magnetic field in the CGS and MKS system are gauss (:math:`\rm{G}`) and tesla 
+(:math:`\rm{T}`), respectively, and numerically :math:`1~\rm{G} = 10^{-4}~\rm{T}`. However, 
+if we examine the base units, we find that they have different dimensions:
+
+.. math::
+
+    \rm{1~G = 1~\frac{\sqrt{g}}{\sqrt{cm}\cdot{s}}} \\
+    \rm{1~T = 1~\frac{kg}{A\cdot{s^2}}}
+
+It is easier to see the difference between the dimensionality of the magnetic field in the two
+systems in terms of the definition of the magnetic pressure:
+
+.. math::
+
+    p_B = \frac{B^2}{8\pi}~\rm{(cgs)} \\
+    p_B = \frac{B^2}{2\mu_0}~\rm{(MKS)}
+
+where :math:`\mu_0 = 4\pi \times 10^{-7}~\rm{N/A^2}` is the vacuum permeability. yt automatically
+detects on a per-frontend basis what units the magnetic should be in, and allows conversion between 
+different magnetic field units in the different :ref:`unit_systems` as well. To determine how to
+set up special magnetic field handling when designing a new frontend, check out :ref:`bfields_frontend`.
+
 Particle Fields
 ---------------
 

diff -r eef7a06b34be4c2b9409310f6b717974a49fc3a5 -r 59afad399bb1c9574b4919e10e5daf4488718217 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -104,6 +104,43 @@
 have a display name of ``r"\rho"``.  Omitting the ``"display_name"``
 will result in using a capitalized version of the ``"name"``.
 
+.. _bfields:
+
+Special Handling for Magnetic Fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Setting up access to the magnetic fields in your dataset requires special
+handling, because in different unit systems magnetic fields have different
+dimensions (see :ref:`bfields` for an explanation). If your dataset includes 
+magnetic fields, you should include them in ``known_other_fields``, but do
+not set up aliases for them--instead use the special handling function 
+:meth:`~yt.fields.magnetic_fields.setup_magnetic_field_aliases`. It takes
+as arguments the ``FieldInfoContainer`` instance, the field type of the 
+frontend, and the list of magnetic fields from the frontend. Here is an
+example of how this is implemented in the FLASH frontend:
+
+.. code-block:: python
+
+    class FLASHFieldInfo(FieldInfoContainer):
+        known_other_fields = (
+            ...
+            ("magx", (b_units, [], "B_x")), # Note there is no alias here
+            ("magy", (b_units, [], "B_y")),
+            ("magz", (b_units, [], "B_z")),
+            ...
+        )
+
+        def setup_fluid_fields(self):
+            from yt.fields.magnetic_field import \
+                setup_magnetic_field_aliases
+            ...
+            setup_magnetic_field_aliases(self, "flash", ["mag%s" % ax for ax in "xyz"])    
+
+This function should always be imported and called from within the 
+``setup_fluid_fields`` method of the ``FieldInfoContainer``. If this 
+function is used, converting between magnetic fields in different 
+:ref:`unit_systems` will be handled automatically. 
+
 Data Localization Structures
 ----------------------------
 


https://bitbucket.org/yt_analysis/yt/commits/2d122cf29e32/
Changeset:   2d122cf29e32
Branch:      yt
User:        jzuhone
Date:        2015-12-19 17:07:12+00:00
Summary:     If we set units="auto" in add_field we must now specify dimensions
Affected #:  4 files

diff -r 59afad399bb1c9574b4919e10e5daf4488718217 -r 2d122cf29e32425d5f0fd69bc4833d63a756a845 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -33,7 +33,10 @@
 In this example, the ``density`` field will return data with units of
 ``g/cm**3`` and the ``thermal_energy`` field will return data units of
 ``erg/g``, so the result will automatically have units of pressure,
-``erg/cm**3``.
+``erg/cm**3``. This assumes the unit system is set to the default, which is
+CGS: if a different unit system is selected, the result will be in the same
+dimensions of pressure but different units. See :ref:`unit_systems` for more
+information.
 
 Once we've defined our function, we need to notify yt that the field is
 available.  The :func:`add_field` function is the means of doing this; it has a
@@ -47,7 +50,7 @@
 
 .. code-block:: python
 
-   yt.add_field("pressure", function=_pressure, units="dyne/cm**2")
+   yt.add_field(("gas", "pressure"), function=_pressure, units="dyne/cm**2")
 
 We feed it the name of the field, the name of the function, and the
 units.  Note that the units parameter is a "raw" string, in the format that yt 
@@ -67,7 +70,7 @@
 function returns data in the correct units. Often, this means applying units to
 a dimensionless float or array.
 
-If your field definition influcdes physical constants rather than defining a
+If your field definition includes physical constants rather than defining a
 constant as a float, you can import it from ``yt.utilities.physical_constants``
 to get a predefined version of the constant with the correct units. If you know
 the units your data is supposed to have ahead of time, you can import unit
@@ -82,7 +85,29 @@
 Lastly, if you do not know the units of your field ahead of time, you can
 specify ``units='auto'`` in the call to ``add_field`` for your field.  This will
 automatically determine the appropriate units based on the units of the data
-returned by the field function.
+returned by the field function. This is also a good way to let your derived fields
+be automatically converted to the units of the :ref:`unit system <unit_systems>` in 
+your dataset. 
+
+If ``units='auto'`` is set, it is also required to set the ``dimensions`` keyword
+argument so that error-checking can be done on the derived field to make sure that
+the dimensionality of the returned array and the field are the same:
+
+.. code-block:: python
+
+    import yt
+    from yt.units import dimensions
+    
+    def _pressure(field, data):
+        return (data.ds.gamma - 1.0) * \
+              data["density"] * data["thermal_energy"]
+              
+    yt.add_field(("gas","pressure"), function=_pressure, units="auto",
+                 dimensions=dimensions.pressure)
+
+If ``dimensions`` is not set, an error will be thrown. The ``dimensions`` keyword
+can be a SymPy ``symbol`` object imported from ``yt.units.dimensions``, a compound
+dimension of these, or a string corresponding to one of these objects. 
 
 :func:`add_field` can be invoked in two other ways. The first is by the 
 function decorator :func:`derived_field`. The following code is equivalent to 
@@ -111,7 +136,15 @@
 .. code-block:: python
 
    ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   ds.add_field("pressure", function=_pressure, units="dyne/cm**2")
+   ds.add_field(("gas", "pressure"), function=_pressure, units="dyne/cm**2")
+
+If you specify fields in this way, you can take advantage of the dataset's 
+:ref:`unit system <unit_systems>` to define the units for you, so that
+the units will be returned in the units of that system:
+
+.. code-block:: python
+
+    ds.add_field(("gas", "pressure"), function=_pressure, units=ds.unit_system["pressure"])
 
 If you find yourself using the same custom-defined fields over and over, you
 should put them in your plugins file as described in :ref:`plugin-file`.
@@ -148,7 +181,7 @@
        y_hat /= r
        z_hat /= r
        return xv*x_hat + yv*y_hat + zv*z_hat
-   yt.add_field("my_radial_velocity",
+   yt.add_field(("gas","my_radial_velocity"),
                 function=_my_radial_velocity,
                 units="cm/s",
                 take_log=False,
@@ -195,8 +228,11 @@
 ``function``
      This is a function handle that defines the field
 ``units``
-     This is a string that describes the units. Powers must be in
-     Python syntax (``**`` instead of ``^``).
+     This is a string that describes the units, or a query to a :ref:`UnitSystem <unit_systems>` 
+     object, e.g. ``ds.unit_system["energy"]``. Powers must be in Python syntax (``**`` 
+     instead of ``^``). Alternatively, it may be set to ``"auto"`` to have the units 
+     determined automatically. In this case, the ``dimensions`` keyword must be set to the
+     correct dimensions of the field. 
 ``display_name``
      This is a name used in the plots, for instance ``"Divergence of
      Velocity"``.  If not supplied, the ``name`` value is used.
@@ -219,6 +255,9 @@
 ``force_override``
      (*Advanced*) Overrides the definition of an old field if a field with the
      same name has already been defined.
+``dimensions``
+     Set this if ``units="auto"``. Can be either a string or a dimension object from
+     ``yt.units.dimensions``.
 
 Debugging a Derived Field
 -------------------------

diff -r 59afad399bb1c9574b4919e10e5daf4488718217 -r 2d122cf29e32425d5f0fd69bc4833d63a756a845 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -45,7 +45,8 @@
     YTFieldNotParseable, \
     YTFieldNotFound, \
     YTFieldTypeNotFound, \
-    YTDataSelectorNotImplemented
+    YTDataSelectorNotImplemented, \
+    YTDimensionalityError
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -1206,8 +1207,10 @@
                         # infer the units from the units of the data we get back
                         # from the field function and use these units for future
                         # field accesses
-                        units = str(getattr(fd, 'units', ''))
-                        fi.units = units
+                        units = getattr(fd, 'units', '')
+                        if fi.dimensions != units.dimensions:
+                            raise YTDimensionalityError(fi.dimensions, units.dimensions)
+                        fi.units = str(units)
                         self.field_data[field] = self.ds.arr(fd, units)
                         msg = ("Field %s was added without specifying units, "
                                "assuming units are %s")

diff -r 59afad399bb1c9574b4919e10e5daf4488718217 -r 2d122cf29e32425d5f0fd69bc4833d63a756a845 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -28,6 +28,7 @@
     FieldDetector
 from yt.units.unit_object import \
     Unit
+import yt.units.dimensions as ytdims
 from yt.utilities.exceptions import \
     YTFieldNotFound
 
@@ -55,9 +56,11 @@
        A function handle that defines the field.  Should accept
        arguments (field, data)
     units : str
-       A plain text string encoding the unit.  Powers must be in
-       python syntax (** instead of ^). If set to "auto" the units will be
-       inferred from the units of the return value of the field function.
+       A plain text string encoding the unit, or a query to a unit system of
+       a dataset. Powers must be in python syntax (** instead of ^). If set
+       to "auto" the units will be inferred from the units of the return
+       value of the field function, and the dimensions keyword must also be
+       set (see below).
     take_log : bool
        Describes whether the field should be logged
     validators : list
@@ -76,11 +79,15 @@
        For fields that exist on disk, which we may want to convert to other
        fields or that get aliased to themselves, we can specify a different
        desired output unit than the unit found on disk.
+    dimensions : str or object from yt.units.dimensions
+       The dimensions of the field, only needed if units="auto" and only used
+       for error checking.
     """
     def __init__(self, name, function, units=None,
                  take_log=True, validators=None,
                  particle_type=False, vector_field=False, display_field=True,
-                 not_in_all=False, display_name=None, output_units=None):
+                 not_in_all=False, display_name=None, output_units=None,
+                 dimensions=None):
         self.name = name
         self.take_log = take_log
         self.display_name = display_name
@@ -101,6 +108,9 @@
             self.units = ''
         elif isinstance(units, string_types):
             if units.lower() == 'auto':
+                if dimensions is None:
+                    raise RuntimeError("To set units='auto', please specify the dimensions "
+                                       "of the field with dimensions=<dimensions of field>!")
                 self.units = None
             else:
                 self.units = units
@@ -114,6 +124,10 @@
             output_units = self.units
         self.output_units = output_units
 
+        if isinstance(dimensions, string_types):
+            dimensions = getattr(ytdims, dimensions)
+        self.dimensions = dimensions
+
     def _copy_def(self):
         dd = {}
         dd['name'] = self.name

diff -r 59afad399bb1c9574b4919e10e5daf4488718217 -r 2d122cf29e32425d5f0fd69bc4833d63a756a845 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -19,7 +19,8 @@
     YTArray, YTQuantity
 from yt.utilities.exceptions import \
     YTFieldUnitError, \
-    YTFieldUnitParseError
+    YTFieldUnitParseError, \
+    YTDimensionalityError
 
 base_ds = None
 
@@ -250,23 +251,25 @@
 
     ds.add_field(('gas','density_alias_no_units'), function=density_alias)
     ds.add_field(('gas','density_alias_auto'), function=density_alias,
-                 units='auto')
+                 units='auto', dimensions='density')
     ds.add_field(('gas','density_alias_wrong_units'), function=density_alias,
                  units='m/s')
     ds.add_field(('gas','density_alias_unparseable_units'), function=density_alias,
                  units='dragons')
-
+    ds.add_field(('gas','density_alias_auto_wrong_dims'), function=density_alias,
+                 units='auto', dimensions="temperature")
     assert_raises(YTFieldUnitError, get_data, ds, 'density_alias_no_units')
     assert_raises(YTFieldUnitError, get_data, ds, 'density_alias_wrong_units')
     assert_raises(YTFieldUnitParseError, get_data, ds,
                   'density_alias_unparseable_units')
+    assert_raises(YTDimensionalityError, get_data, ds, 'density_alias_auto_wrong_dims')
 
     dens = ad['density_alias_auto']
     assert_equal(str(dens.units), 'g/cm**3')
 
     ds.add_field(('gas','dimensionless'), function=unitless_data)
     ds.add_field(('gas','dimensionless_auto'), function=unitless_data,
-                 units='auto')
+                 units='auto', dimensions='dimensionless')
     ds.add_field(('gas','dimensionless_explicit'), function=unitless_data, units='')
     ds.add_field(('gas','dimensionful'), function=unitless_data, units='g/cm**3')
 


https://bitbucket.org/yt_analysis/yt/commits/cc6373fcdde2/
Changeset:   cc6373fcdde2
Branch:      yt
User:        jzuhone
Date:        2015-12-19 18:05:17+00:00
Summary:     Patching up auto fields to work
Affected #:  1 file

diff -r 2d122cf29e32425d5f0fd69bc4833d63a756a845 -r cc6373fcdde22ef5b2161b2bb54f3fe63aed2bdb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1210,12 +1210,11 @@
                         units = getattr(fd, 'units', '')
                         if fi.dimensions != units.dimensions:
                             raise YTDimensionalityError(fi.dimensions, units.dimensions)
-                        fi.units = str(units)
+                        fi.units = str(units.get_base_equivalent(self.ds.unit_system.name))
                         self.field_data[field] = self.ds.arr(fd, units)
                         msg = ("Field %s was added without specifying units, "
                                "assuming units are %s")
                         mylog.warn(msg % (fi.name, units))
-                        continue
                     try:
                         fd.convert_to_units(fi.units)
                     except AttributeError:


https://bitbucket.org/yt_analysis/yt/commits/8f65f7661b78/
Changeset:   8f65f7661b78
Branch:      yt
User:        jzuhone
Date:        2015-12-19 18:21:40+00:00
Summary:     More unit system docs
Affected #:  7 files

diff -r cc6373fcdde22ef5b2161b2bb54f3fe63aed2bdb -r 8f65f7661b789daa271903d63e278f2419b3a26e doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -241,15 +241,15 @@
 
 .. _bfields:
 
-Special Handling for Magnetic Fields
-------------------------------------
+Magnetic Fields
+---------------
 
 Magnetic fields require special handling, because their dimensions are different in
 different systems of units, in particular between the CGS and MKS (SI) systems of units.
-Superficially, it would appear that it is easy to convert between the two, since the units 
+Superficially, it would appear that they are in the same dimensions, since the units 
 of the magnetic field in the CGS and MKS system are gauss (:math:`\rm{G}`) and tesla 
 (:math:`\rm{T}`), respectively, and numerically :math:`1~\rm{G} = 10^{-4}~\rm{T}`. However, 
-if we examine the base units, we find that they have different dimensions:
+if we examine the base units, we find that they do indeed have different dimensions:
 
 .. math::
 
@@ -266,8 +266,9 @@
 
 where :math:`\mu_0 = 4\pi \times 10^{-7}~\rm{N/A^2}` is the vacuum permeability. yt automatically
 detects on a per-frontend basis what units the magnetic should be in, and allows conversion between 
-different magnetic field units in the different :ref:`unit_systems` as well. To determine how to
-set up special magnetic field handling when designing a new frontend, check out :ref:`bfields_frontend`.
+different magnetic field units in the different :ref:`unit systems <unit_systems>` as well. To 
+determine how to set up special magnetic field handling when designing a new frontend, check out 
+:ref:`bfields_frontend`.
 
 Particle Fields
 ---------------

diff -r cc6373fcdde22ef5b2161b2bb54f3fe63aed2bdb -r 8f65f7661b789daa271903d63e278f2419b3a26e doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:6a06d5720eb6316ac0d322ef0898ec20f33d65ea3eeeacef35ae1d869af12607"
+  "signature": "sha256:3f07f66604a670af4a8b4dd463f56a5f83ca87b570efeea95e024d02364b15f5"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -107,14 +107,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "YTArray defines several user-visible member functions that allow data to be converted from one unit system to another:\n",
+      "`YTArray` defines several user-visible member functions that allow data to be converted from one unit system to another:\n",
       "\n",
       "* `in_units`\n",
       "* `in_cgs`\n",
       "* `in_mks`\n",
+      "* `in_base`\n",
       "* `convert_to_units`\n",
       "* `convert_to_cgs`\n",
-      "* `convert_to_mks`"
+      "* `convert_to_mks`\n",
+      "* `convert_to_base`"
      ]
     },
     {
@@ -157,7 +159,51 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The next two methods do in-place conversions:"
+      "`in_cgs` and `in_mks` are just special cases of the more general `in_base`, which can convert a `YTArray` to a number of different unit systems:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print (dd['pressure']).in_base('imperial') # Imperial/English base units\n",
+      "print (dd['pressure']).in_base('galactic') # Base units of kpc, Msun, Myr\n",
+      "print (dd['pressure']).in_base('planck') # Base units in the Planck system\n",
+      "print (dd['pressure']).in_base() # defaults to cgs if no argument given"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`in_base` can even take a dataset as the argument to convert the `YTArray` into the base units of the dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print (dd['pressure']).in_base(ds) # The IsolatedGalaxy dataset from above"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt defines a number of unit systems, and new unit systems may be added by the user, which can also be passed to `in_base`. To learn more about the unit systems, how to use them with datasets and other objects, and how to add new ones, see [Unit Systems](unit_systems.html)."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The rest of the methods do in-place conversions:"
      ]
     },
     {
@@ -201,7 +247,7 @@
      "source": [
       "Since the unit metadata is preserved and the array values are still correct in the new unit system, all numerical operations will still be correct.\n",
       "\n",
-      "One of the nicest aspects of this new unit system is that the symbolic algebra for mathematical operations on data with units is performed automatically by sympy.  This example shows how we can construct a field with density units from two other fields that have units of mass and volume:"
+      "One of the nicest aspects of this new unit system is that the symbolic algebra for mathematical operations on data with units is performed automatically by SymPy.  This example shows how we can construct a field with density units from two other fields that have units of mass and volume:"
      ]
     },
     {

diff -r cc6373fcdde22ef5b2161b2bb54f3fe63aed2bdb -r 8f65f7661b789daa271903d63e278f2419b3a26e doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- /dev/null
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -0,0 +1,449 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:d5d9b704673851beaedcc3bf4bda890eb6ef3e938e2b127cd10bc55d2b79384c"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "By default, the results of most calculations in yt are expressed in a \"centimeters-grams-seconds\" (CGS) set of units. This includes the values of derived fields and aliased fields.\n",
+      "\n",
+      "However, this system of units may not be the most natural for a given dataset or an entire class of datasets. For this reason, yt provides the ability to define new unit systems and use them in a way that is highly configurable by the end-user. "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Unit Systems Available in yt"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Several unit systems are already supplied for use within yt. They are:\n",
+      "\n",
+      "* `\"cgs\"`: Centimeters-grams-seconds unit system, with base of `(cm, g, s, K, radian)`. Uses the Gaussian normalization for electromagnetic units. \n",
+      "* `\"mks\"`: Meters-kilograms-seconds unit system, with base of `(m, kg, s, K, radian, A)`.\n",
+      "* `\"imperial\"`: Imperial unit system, with base of `(mile, lbm, s, R, radian)`.\n",
+      "* `\"galactic\"`: \"Galactic\" unit system, with base of `(kpc, Msun, Myr, K, radian)`.\n",
+      "* `\"solar\"`: \"Solar\" unit system, with base of `(AU, Mearth, yr, K, radian)`. \n",
+      "* `\"planck\"`: Planck natural units $(\\hbar = c = G = k_B = 1)$, with base of `(l_pl, m_pl, t_pl, T_pl, radian)`. \n",
+      "* `\"geometrized\"`: Geometrized natural units $(c = G = 1)$, with base of `(l_geom, m_geom, t_geom, K, radian)`. \n",
+      "\n",
+      "We can examine these unit systems by querying them from the `unit_system_registry`. For example, we can look at the default CGS system:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "yt.unit_system_registry[\"cgs\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can see that we have two sets of units that this system defines: \"base\" and \"other\" units. The \"base\" units are the set of units from which all other units in the system are composed of, such as centimeters, grams, and seconds. The \"other\" units are compound units which fields with specific dimensionalities are converted to, such as ergs, dynes, gauss, and electrostatic units (esu). \n",
+      "\n",
+      "We see a similar setup for the MKS system, except that in this case, there is a base unit of current, the Ampere:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "yt.unit_system_registry[\"mks\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also look at the imperial system:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "yt.unit_system_registry[\"imperial\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "and the \"galactic\" system as well:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "yt.unit_system_registry[\"galactic\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Converting `YTArrays` to the Different Unit Systems"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Choosing a Unit System When Loading a Dataset"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When a dataset is `load`ed, a unit system may be specified. When this happens, all aliased and derived fields will be converted to the units of the given system. The default is `\"cgs\"`.\n",
+      "\n",
+      "For example, we can specify that the fields from a FLASH dataset can be expressed in MKS units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds_flash = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100\", unit_system=\"mks\")\n",
+      "sp = ds_flash.sphere(\"c\", (100.,\"kpc\"))\n",
+      "print (sp[\"density\"]) # This is an alias for (\"flash\",\"dens\")\n",
+      "print (sp[\"pressure\"]) # This is an alias for (\"flash\",\"pres\")\n",
+      "print (sp[\"angular_momentum_x\"]) # This is a derived field\n",
+      "print (sp[\"kinetic_energy\"]) # This is also a derived field"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Aliased fields are converted to the requested unit system, but the on-disk fields that they correspond to remain in their original (code) units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print (sp[\"flash\",\"dens\"]) # This is aliased to (\"gas\", \"density\")\n",
+      "print (sp[\"flash\",\"pres\"]) # This is aliased to (\"gas\", \"pressure\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can take an `Enzo` dataset and express it in `\"galactic\"` units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds_enzo = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\", unit_system=\"galactic\")\n",
+      "sp = ds_enzo.sphere(\"c\", (20.,\"kpc\"))\n",
+      "print (sp[\"density\"])\n",
+      "print (sp[\"pressure\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also express all of the fields associated with a dataset in that dataset's system of \"code\" units. Though the on-disk fields are already in these units, this means that we can express even derived fields in code units as well:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds_chombo = yt.load(\"KelvinHelmholtz/data.0004.hdf5\", unit_system=\"code\")\n",
+      "dd = ds_chombo.all_data()\n",
+      "print (dd[\"density\"])\n",
+      "print (dd[\"kinetic_energy\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Defining Fields So That They Can Use the Different Unit Systems"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If you define a new derived field for use in yt and wish to make the different unit systems available to it, you will need to specify this when calling `add_field`. Suppose I defined a new field called `\"momentum_x\"` and wanted it to have general units. I would have to set it up in this fashion, using the `unit_system` attribute of the dataset and querying it for the appropriate dimensions:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "mom_units = ds_flash.unit_system[\"velocity\"]*ds_flash.unit_system[\"density\"]\n",
+      "def _momentum_x(field, data):\n",
+      "    return data[\"density\"]*data[\"velocity_x\"]\n",
+      "ds_flash.add_field((\"gas\",\"momentum_x\"), function=_momentum_x, units=mom_units)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, the field will automatically be expressed in whatever units the dataset was called with. In this case, it was MKS:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds_flash, \"z\", [\"momentum_x\"], width=(300.,\"kpc\"))\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Note that the momentum density has been plotted with the correct MKS units of $\\mathrm{kg/(m^2\\cdot{s})}$."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If you don't create a derived field from a dataset but instead use `yt.add_field`, and still want to use the unit system of that dataset for the units, the only option at present is to set `units=\"auto\"` in the call to `yt.add_field` and the `dimensions` keyword to the correct dimensions for the field:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.units import clight\n",
+      "\n",
+      "def _rest_energy(field, data):\n",
+      "    return data[\"cell_mass\"]*clight*clight\n",
+      "yt.add_field((\"gas\",\"rest_energy\"), function=_rest_energy, units=\"auto\", dimensions=\"energy\")\n",
+      "\n",
+      "ds_flash2 = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\", unit_system=\"galactic\")\n",
+      "\n",
+      "sp = ds_flash2.sphere(\"c\", (100.,\"kpc\"))\n",
+      "sp[\"rest_energy\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Obtaining Physical Constants in a Specific Unit System"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Each unit system provides the ability to obtain any physical constant in yt's physical constants database in the base units of that system via the `constants` attribute of the unit system. For example, to obtain the value of Newton's universal constant of gravitation in different base units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "for name in [\"cgs\", \"mks\", \"imperial\", \"planck\", \"geometrized\"]:\n",
+      "    unit_system = yt.unit_system_registry[name]\n",
+      "    print (name, unit_system.constants.G)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Defining Your Own Unit System"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "You are not limited to using the unit systems already defined by yt. A new unit system can be defined by creating a new `UnitSystem` instance. For example, to create a unit system where the default units are in millimeters, centigrams, and microseconds:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "small_unit_system = yt.UnitSystem(\"small\", \"mm\", \"cg\", \"us\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where the required arguments are a `name` for the unit system, and the `length_unit`, `mass_unit`, and `time_unit` for the unit system, which serve as the \"base\" units to convert everything else to. Once a unit system instance is created, it is automatically added to the `unit_system_registry` so that it may be used throughout yt:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "yt.unit_system_registry[\"small\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Note that the base units for the dimensions of angle and temperature have been automatically set to radians and Kelvin, respectively. If desired, these can be specified using optional arguments when creating the `UnitSystem` object:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "wacky_unit_system = yt.UnitSystem(\"wacky\", \"mile\", \"kg\", \"day\", temperature_unit=\"R\", angle_unit=\"deg\")\n",
+      "wacky_unit_system"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Though it will rarely be necessary, an MKS-style system of units where a unit of current can be specified as a base unit can also be created using the `current_mks` optional argument:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "mksish_unit_system = yt.UnitSystem(\"mksish\", \"dm\", \"ug\", \"ks\", current_mks_unit=\"mA\")\n",
+      "mksish_unit_system"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Initializing a `UnitSystem` object only sets up the base units. In this case, all fields will be converted to combinations of these base units based on their dimensionality. However, you may want to specify that fields of a given dimensionality use a compound unit by default instead. For example, you might prefer that in the `\"small\"` unit system that pressures be represented in microdynes per millimeter squared. To do this, set these to be the units of the `\"pressure\"` dimension explicitly:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "small_unit_system[\"pressure\"] = \"udyne/mm**2\""
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can now look at the `small_unit_system` object and see that these units are now defined for pressure in the \"Other Units\" category:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "small_unit_system"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do the same for a few other dimensionalities:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "small_unit_system[\"magnetic_field_cgs\"] = \"mG\"\n",
+      "small_unit_system[\"specific_energy\"] = \"cerg/ug\"\n",
+      "small_unit_system[\"velocity\"] = \"cm/s\"\n",
+      "small_unit_system"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r cc6373fcdde22ef5b2161b2bb54f3fe63aed2bdb -r 8f65f7661b789daa271903d63e278f2419b3a26e doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -34,6 +34,7 @@
    comparing_units_from_different_datasets
    units_and_plotting
    unit_equivalencies
+   unit_systems
 
 .. note::
 

diff -r cc6373fcdde22ef5b2161b2bb54f3fe63aed2bdb -r 8f65f7661b789daa271903d63e278f2419b3a26e doc/source/analyzing/units/unit_systems.rst
--- /dev/null
+++ b/doc/source/analyzing/units/unit_systems.rst
@@ -0,0 +1,7 @@
+.. _unit_systems:
+
+Unit Systems
+============
+
+.. notebook:: 7)_Unit_Systems.ipynb
+:skip_exceptions:

diff -r cc6373fcdde22ef5b2161b2bb54f3fe63aed2bdb -r 8f65f7661b789daa271903d63e278f2419b3a26e doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -62,7 +62,7 @@
 as in the ``_pressure`` example above.
 
 Field definitions return array data with units. If the field function returns
-data in a dimensionally equivalent unit (e.g. a ``dyne`` versus a ``N``), the
+data in a dimensionally equivalent unit (e.g. a ``"dyne"`` versus a ``"N"``), the
 field data will be converted to the units specified in ``add_field`` before
 being returned in a data object selection. If the field function returns data
 with dimensions that are incompatibible with units specified in ``add_field``,
@@ -275,7 +275,7 @@
 
 .. code-block:: python
 
-   @yt.derived_field(name = "funthings")
+   @yt.derived_field(name = ("gas","funthings"))
    def funthings(field, data):
        return data["sillythings"] + data["humorousthings"]**2.0
 
@@ -283,7 +283,7 @@
 
 .. code-block:: python
 
-   @yt.derived_field(name = "funthings")
+   @yt.derived_field(name = ("gas","funthings"))
    def funthings(field, data):
        data._debug()
        return data["sillythings"] + data["humorousthings"]**2.0

diff -r cc6373fcdde22ef5b2161b2bb54f3fe63aed2bdb -r 8f65f7661b789daa271903d63e278f2419b3a26e doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -104,9 +104,9 @@
 have a display name of ``r"\rho"``.  Omitting the ``"display_name"``
 will result in using a capitalized version of the ``"name"``.
 
-.. _bfields:
+.. _bfields_frontend:
 
-Special Handling for Magnetic Fields
+Creating Aliases for Magnetic Fields
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Setting up access to the magnetic fields in your dataset requires special
@@ -139,7 +139,7 @@
 This function should always be imported and called from within the 
 ``setup_fluid_fields`` method of the ``FieldInfoContainer``. If this 
 function is used, converting between magnetic fields in different 
-:ref:`unit_systems` will be handled automatically. 
+:ref:`unit systems <unit_systems>` will be handled automatically. 
 
 Data Localization Structures
 ----------------------------


https://bitbucket.org/yt_analysis/yt/commits/83431b243911/
Changeset:   83431b243911
Branch:      yt
User:        jzuhone
Date:        2015-12-19 18:45:31+00:00
Summary:     Fixing refs
Affected #:  2 files

diff -r 8f65f7661b789daa271903d63e278f2419b3a26e -r 83431b243911326e04cbb6a0e32ded509b9aacd4 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -268,7 +268,7 @@
 detects on a per-frontend basis what units the magnetic should be in, and allows conversion between 
 different magnetic field units in the different :ref:`unit systems <unit_systems>` as well. To 
 determine how to set up special magnetic field handling when designing a new frontend, check out 
-:ref:`bfields_frontend`.
+:ref:`bfields-frontend`.
 
 Particle Fields
 ---------------

diff -r 8f65f7661b789daa271903d63e278f2419b3a26e -r 83431b243911326e04cbb6a0e32ded509b9aacd4 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -104,7 +104,7 @@
 have a display name of ``r"\rho"``.  Omitting the ``"display_name"``
 will result in using a capitalized version of the ``"name"``.
 
-.. _bfields_frontend:
+.. _bfields-frontend:
 
 Creating Aliases for Magnetic Fields
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^


https://bitbucket.org/yt_analysis/yt/commits/125dd2b81632/
Changeset:   125dd2b81632
Branch:      yt
User:        jzuhone
Date:        2015-12-20 14:24:50+00:00
Summary:     Cleaning up references in the docs about everything having to be in CGS units.
Affected #:  4 files

diff -r 83431b243911326e04cbb6a0e32ded509b9aacd4 -r 125dd2b81632371b12e52ae001258639d81cf4ef doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
--- a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
+++ b/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:273a23e3a20b277a9e5ea7117b48cf19013c331d0893e6e9d21896e97f59aceb"
+  "signature": "sha256:b94d6334db51f30f01fd031be52de8e80e878d396366bb24efd5f4e122a831d9"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -12,7 +12,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets.  Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional arrays instances produced by different unit systems.  Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n",
+      "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets.  Depending on how units are implemented in a dataset, this could add an element of uncertainty when we compare dimensional arrays instances from different datasets.  Fortunately, this is not a problem for `YTArray` since all `YTArray` code units are defined in terms of physical CGS units.\n",
       "\n",
       "As an example, let's load up two enzo datasets from different redshifts in the same cosmology simulation."
      ]
@@ -117,4 +117,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 83431b243911326e04cbb6a0e32ded509b9aacd4 -r 125dd2b81632371b12e52ae001258639d81cf4ef doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:5a62a9f151e691e242c1f5043e9211e166d70fd35a83f61278083c361fb07f12"
+  "signature": "sha256:d18be0966641800d5a33a0f70ee686bea930c6e585542bf60ee3f9d1102d2699"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -638,7 +638,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "* Units will be incorrect unless the data has already been converted to cgs.\n",
       "* Particles may be difficult to integrate.\n",
       "* Data must already reside in memory before loading it in to yt, whether it is generated at runtime or loaded from disk. \n",
       "* Some functions may behave oddly, and parallelism will be disappointing or non-existent in most cases.\n",

diff -r 83431b243911326e04cbb6a0e32ded509b9aacd4 -r 125dd2b81632371b12e52ae001258639d81cf4ef doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -742,7 +742,8 @@
 
 .. rubric:: Caveats
 
-* Please be careful that the units are correctly utilized; yt assumes cgs.
+* Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to
+  other :ref:`unit systems <unit_systems>` is also possible. 
 
 .. _loading-gadget-data:
 
@@ -1004,7 +1005,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Some functions may behave oddly, and parallelism will be disappointing or
   non-existent in most cases.
 * No consistency checks are performed on the index
@@ -1062,7 +1062,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Particles may be difficult to integrate.
 * Data must already reside in memory.
 
@@ -1115,7 +1114,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Integration is not implemented.
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.
@@ -1169,7 +1167,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Integration is not implemented.
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.

diff -r 83431b243911326e04cbb6a0e32ded509b9aacd4 -r 125dd2b81632371b12e52ae001258639d81cf4ef doc/source/reference/field_list.rst
--- a/doc/source/reference/field_list.rst
+++ b/doc/source/reference/field_list.rst
@@ -3084,10 +3084,10 @@
       def _vorticity_x(field, data):
           f  = (data[ftype, "velocity_z"][sl_center,sl_right,sl_center] -
                 data[ftype, "velocity_z"][sl_center,sl_left,sl_center]) \
-                / (div_fac*just_one(data["index", "dy"]).in_cgs())
+                / (div_fac*just_one(data["index", "dy"]))
           f -= (data[ftype, "velocity_y"][sl_center,sl_center,sl_right] -
                 data[ftype, "velocity_y"][sl_center,sl_center,sl_left]) \
-                / (div_fac*just_one(data["index", "dz"].in_cgs()))
+                / (div_fac*just_one(data["index", "dz"]))
           new_field = data.ds.arr(np.zeros_like(data[ftype, "velocity_z"],
                                                 dtype=np.float64),
                                   f.units)
@@ -3220,7 +3220,7 @@
       def _cylindrical_r(field, data):
           normal = data.get_field_parameter("normal")
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
+          return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_base(unit_system.name)
   
 
 ('index', 'cylindrical_theta')
@@ -3251,7 +3251,7 @@
       def _cylindrical_z(field, data):
           normal = data.get_field_parameter("normal")
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
+          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_base(unit_system.name)
   
 
 ('index', 'disk_angle')
@@ -3424,7 +3424,7 @@
 
       def _spherical_r(field, data):
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
+          return data.ds.arr(get_sph_r(coords), "code_length").in_base(unit_system.name)
   
 
 ('index', 'spherical_theta')


https://bitbucket.org/yt_analysis/yt/commits/dbf9099106a6/
Changeset:   dbf9099106a6
Branch:      yt
User:        jzuhone
Date:        2015-12-20 16:01:10+00:00
Summary:     Bug fix when things are dimensionless
Affected #:  1 file

diff -r 125dd2b81632371b12e52ae001258639d81cf4ef -r dbf9099106a620559d8a5da6f640fd91e9705396 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,7 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
+import yt.units.dimensions as ytdims
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
     YTFieldUnitError, \
@@ -1208,8 +1209,12 @@
                         # from the field function and use these units for future
                         # field accesses
                         units = getattr(fd, 'units', '')
-                        if fi.dimensions != units.dimensions:
-                            raise YTDimensionalityError(fi.dimensions, units.dimensions)
+                        if units == '':
+                            dimensions = ytdims.dimensionless
+                        else:
+                            dimensions = units.dimensions
+                        if fi.dimensions != dimensions:
+                            raise YTDimensionalityError(fi.dimensions, dimensions)
                         fi.units = str(units.get_base_equivalent(self.ds.unit_system.name))
                         self.field_data[field] = self.ds.arr(fd, units)
                         msg = ("Field %s was added without specifying units, "


https://bitbucket.org/yt_analysis/yt/commits/f6b8dc0e9dc9/
Changeset:   f6b8dc0e9dc9
Branch:      yt
User:        jzuhone
Date:        2015-12-20 17:45:26+00:00
Summary:     bugfix
Affected #:  1 file

diff -r dbf9099106a620559d8a5da6f640fd91e9705396 -r f6b8dc0e9dc98f05c37af4bc8878eb25f8859916 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1211,11 +1211,13 @@
                         units = getattr(fd, 'units', '')
                         if units == '':
                             dimensions = ytdims.dimensionless
+                            units = 'dimensionless'
                         else:
                             dimensions = units.dimensions
+                            units = str(units.get_base_equivalent(self.ds.unit_system.name))
                         if fi.dimensions != dimensions:
                             raise YTDimensionalityError(fi.dimensions, dimensions)
-                        fi.units = str(units.get_base_equivalent(self.ds.unit_system.name))
+                        fi.units = units
                         self.field_data[field] = self.ds.arr(fd, units)
                         msg = ("Field %s was added without specifying units, "
                                "assuming units are %s")


https://bitbucket.org/yt_analysis/yt/commits/df2f4ef0f1de/
Changeset:   df2f4ef0f1de
Branch:      yt
User:        jzuhone
Date:        2015-12-20 19:35:13+00:00
Summary:     Another bugfix
Affected #:  1 file

diff -r f6b8dc0e9dc98f05c37af4bc8878eb25f8859916 -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1211,7 +1211,6 @@
                         units = getattr(fd, 'units', '')
                         if units == '':
                             dimensions = ytdims.dimensionless
-                            units = 'dimensionless'
                         else:
                             dimensions = units.dimensions
                             units = str(units.get_base_equivalent(self.ds.unit_system.name))


https://bitbucket.org/yt_analysis/yt/commits/d2d293eb7a25/
Changeset:   d2d293eb7a25
Branch:      yt
User:        jzuhone
Date:        2016-01-28 20:41:09+00:00
Summary:     Merge from upstream
Affected #:  191 files

diff -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 -r d2d293eb7a253cd3e660bd216222f6596f70d754 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ /dev/null
@@ -1,241 +0,0 @@
-import errno
-import os
-import shutil
-import string
-import re
-import tempfile
-import uuid
-from sphinx.util.compat import Directive
-from docutils import nodes
-from docutils.parsers.rst import directives
-from IPython.config import Config
-from IPython.nbconvert import html, python
-from IPython.nbformat import current as nbformat
-from runipy.notebook_runner import NotebookRunner, NotebookError
-
-class NotebookDirective(Directive):
-    """Insert an evaluated notebook into a document
-
-    This uses runipy and nbconvert to transform a path to an unevaluated notebook
-    into html suitable for embedding in a Sphinx document.
-    """
-    required_arguments = 1
-    optional_arguments = 1
-    option_spec = {'skip_exceptions': directives.flag}
-    final_argument_whitespace = True
-
-    def run(self): # check if there are spaces in the notebook name
-        nb_path = self.arguments[0]
-        if ' ' in nb_path: raise ValueError(
-            "Due to issues with docutils stripping spaces from links, white "
-            "space is not allowed in notebook filenames '{0}'".format(nb_path))
-        # check if raw html is supported
-        if not self.state.document.settings.raw_enabled:
-            raise self.warning('"%s" directive disabled.' % self.name)
-
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        os.chdir(tmpdir)
-
-        # get path to notebook
-        nb_filename = self.arguments[0]
-        nb_basename = os.path.basename(nb_filename)
-        rst_file = self.state_machine.document.attributes['source']
-        rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
-
-        # Move files around.
-        rel_dir = os.path.relpath(rst_dir, setup.confdir)
-        dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
-        dest_path = os.path.join(dest_dir, nb_basename)
-
-        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
-        # Ensure desination build directory exists
-        thread_safe_mkdir(os.path.dirname(dest_path))
-
-        # Copy unevaluated notebook
-        shutil.copyfile(nb_abs_path, dest_path)
-
-        # Construct paths to versions getting copied over
-        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
-        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
-        rel_path_eval = string.replace(nb_basename, '.ipynb', '_evaluated.ipynb')
-        rel_path_script = string.replace(nb_basename, '.ipynb', '.py')
-
-        # Create python script vesion
-        script_text = nb_to_python(nb_abs_path)
-        f = open(dest_path_script, 'w')
-        f.write(script_text.encode('utf8'))
-        f.close()
-
-        skip_exceptions = 'skip_exceptions' in self.options
-
-        ret = evaluate_notebook(
-            nb_abs_path, dest_path_eval, skip_exceptions=skip_exceptions)
-
-        try:
-            evaluated_text, resources = ret
-            evaluated_text = write_notebook_output(
-                resources, image_dir, image_rel_dir, evaluated_text)
-        except ValueError:
-            # This happens when a notebook raises an unhandled exception
-            evaluated_text = ret
-
-        # Create link to notebook and script files
-        link_rst = "(" + \
-                   formatted_link(nb_basename) + "; " + \
-                   formatted_link(rel_path_eval) + "; " + \
-                   formatted_link(rel_path_script) + \
-                   ")"
-
-        self.state_machine.insert_input([link_rst], rst_file)
-
-        # create notebook node
-        attributes = {'format': 'html', 'source': 'nb_path'}
-        nb_node = notebook_node('', evaluated_text, **attributes)
-        (nb_node.source, nb_node.line) = \
-            self.state_machine.get_source_and_line(self.lineno)
-
-        # add dependency
-        self.state.document.settings.record_dependencies.add(nb_abs_path)
-
-        # clean up
-        os.chdir(cwd)
-        shutil.rmtree(tmpdir, True)
-
-        return [nb_node]
-
-
-class notebook_node(nodes.raw):
-    pass
-
-def nb_to_python(nb_path):
-    """convert notebook to python script"""
-    exporter = python.PythonExporter()
-    output, resources = exporter.from_filename(nb_path)
-    return output
-
-def nb_to_html(nb_path):
-    """convert notebook to html"""
-    c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
-
-    exporter = html.HTMLExporter(template_file='full', config=c)
-    notebook = nbformat.read(open(nb_path), 'json')
-    output, resources = exporter.from_notebook_node(notebook)
-    header = output.split('<head>', 1)[1].split('</head>',1)[0]
-    body = output.split('<body>', 1)[1].split('</body>',1)[0]
-
-    # http://imgur.com/eR9bMRH
-    header = header.replace('<style', '<style scoped="scoped"')
-    header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n',
-                            '')
-    header = header.replace("code,pre{", "code{")
-
-    # Filter out styles that conflict with the sphinx theme.
-    filter_strings = [
-        'navbar',
-        'body{',
-        'alert{',
-        'uneditable-input{',
-        'collapse{',
-    ]
-
-    filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
-
-    line_begin = [
-        'pre{',
-        'p{margin'
-    ]
-
-    filterfunc = lambda x: not any([s in x for s in filter_strings])
-    header_lines = filter(filterfunc, header.split('\n'))
-
-    filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
-    header_lines = filter(filterfunc, header_lines)
-
-    header = '\n'.join(header_lines)
-
-    # concatenate raw html lines
-    lines = ['<div class="ipynotebook">']
-    lines.append(header)
-    lines.append(body)
-    lines.append('</div>')
-    return '\n'.join(lines), resources
-
-def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
-    # Create evaluated version and save it to the dest path.
-    notebook = nbformat.read(open(nb_path), 'json')
-    nb_runner = NotebookRunner(notebook, pylab=False)
-    try:
-        nb_runner.run_notebook(skip_exceptions=skip_exceptions)
-    except NotebookError as e:
-        print('')
-        print(e)
-        # Return the traceback, filtering out ANSI color codes.
-        # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
-        return "Notebook conversion failed with the " \
-               "following traceback: \n%s" % \
-            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '',
-                   str(e))
-
-    if dest_path is None:
-        dest_path = 'temp_evaluated.ipynb'
-    nbformat.write(nb_runner.nb, open(dest_path, 'w'), 'json')
-    ret = nb_to_html(dest_path)
-    if dest_path is 'temp_evaluated.ipynb':
-        os.remove(dest_path)
-    return ret
-
-def formatted_link(path):
-    return "`%s <%s>`__" % (os.path.basename(path), path)
-
-def visit_notebook_node(self, node):
-    self.visit_raw(node)
-
-def depart_notebook_node(self, node):
-    self.depart_raw(node)
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_node(notebook_node,
-                 html=(visit_notebook_node, depart_notebook_node))
-
-    app.add_directive('notebook', NotebookDirective)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-def make_image_dir(setup, rst_dir):
-    image_dir = setup.app.builder.outdir + os.path.sep + '_images'
-    rel_dir = os.path.relpath(setup.confdir, rst_dir)
-    image_rel_dir = rel_dir + os.path.sep + '_images'
-    thread_safe_mkdir(image_dir)
-    return image_dir, image_rel_dir
-
-def write_notebook_output(resources, image_dir, image_rel_dir, evaluated_text):
-    my_uuid = uuid.uuid4().hex
-
-    for output in resources['outputs']:
-        new_name = image_dir + os.path.sep + my_uuid + output
-        new_relative_name = image_rel_dir + os.path.sep + my_uuid + output
-        evaluated_text = evaluated_text.replace(output, new_relative_name)
-        with open(new_name, 'wb') as f:
-            f.write(resources['outputs'][output])
-    return evaluated_text
-
-def thread_safe_mkdir(dirname):
-    try:
-        os.makedirs(dirname)
-    except OSError as e:
-        if e.errno != errno.EEXIST:
-            raise
-        pass

diff -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 -r d2d293eb7a253cd3e660bd216222f6596f70d754 doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import shutil
-import io
-import tempfile
-from sphinx.util.compat import Directive
-from docutils.parsers.rst import directives
-from IPython.nbformat import current
-from notebook_sphinxext import \
-    notebook_node, visit_notebook_node, depart_notebook_node, \
-    evaluate_notebook, make_image_dir, write_notebook_output
-
-
-class NotebookCellDirective(Directive):
-    """Insert an evaluated notebook cell into a document
-
-    This uses runipy and nbconvert to transform an inline python
-    script into html suitable for embedding in a Sphinx document.
-    """
-    required_arguments = 0
-    optional_arguments = 1
-    has_content = True
-    option_spec = {'skip_exceptions': directives.flag}
-
-    def run(self):
-        # check if raw html is supported
-        if not self.state.document.settings.raw_enabled:
-            raise self.warning('"%s" directive disabled.' % self.name)
-
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        os.chdir(tmpdir)
-
-        rst_file = self.state_machine.document.attributes['source']
-        rst_dir = os.path.abspath(os.path.dirname(rst_file))
-
-        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
-        # Construct notebook from cell content
-        content = "\n".join(self.content)
-        with open("temp.py", "w") as f:
-            f.write(content)
-
-        convert_to_ipynb('temp.py', 'temp.ipynb')
-
-        skip_exceptions = 'skip_exceptions' in self.options
-
-        evaluated_text, resources = evaluate_notebook(
-            'temp.ipynb', skip_exceptions=skip_exceptions)
-
-        evaluated_text = write_notebook_output(
-            resources, image_dir, image_rel_dir, evaluated_text)
-
-        # create notebook node
-        attributes = {'format': 'html', 'source': 'nb_path'}
-        nb_node = notebook_node('', evaluated_text, **attributes)
-        (nb_node.source, nb_node.line) = \
-            self.state_machine.get_source_and_line(self.lineno)
-
-        # clean up
-        os.chdir(cwd)
-        shutil.rmtree(tmpdir, True)
-
-        return [nb_node]
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_node(notebook_node,
-                 html=(visit_notebook_node, depart_notebook_node))
-
-    app.add_directive('notebook-cell', NotebookCellDirective)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-def convert_to_ipynb(py_file, ipynb_file):
-    with io.open(py_file, 'r', encoding='utf-8') as f:
-        notebook = current.reads(f.read(), format='py')
-    with io.open(ipynb_file, 'w', encoding='utf-8') as f:
-        current.write(notebook, f, format='ipynb')

diff -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 -r d2d293eb7a253cd3e660bd216222f6596f70d754 doc/extensions/pythonscript_sphinxext.py
--- a/doc/extensions/pythonscript_sphinxext.py
+++ b/doc/extensions/pythonscript_sphinxext.py
@@ -4,9 +4,9 @@
 import shutil
 import subprocess
 import uuid
+import errno
 from sphinx.util.compat import Directive
 from docutils import nodes
-from notebook_sphinxext import make_image_dir
 
 
 class PythonScriptDirective(Directive):
@@ -82,3 +82,20 @@
     shutil.move(filename, image_dir + os.path.sep + my_uuid + filename)
     relative_filename = image_rel_dir + os.path.sep + my_uuid + filename
     return '<img src="%s" width="600"><br>' % relative_filename
+
+
+def make_image_dir(setup, rst_dir):
+    image_dir = setup.app.builder.outdir + os.path.sep + '_images'
+    rel_dir = os.path.relpath(setup.confdir, rst_dir)
+    image_rel_dir = rel_dir + os.path.sep + '_images'
+    thread_safe_mkdir(image_dir)
+    return image_dir, image_rel_dir
+
+
+def thread_safe_mkdir(dirname):
+    try:
+        os.makedirs(dirname)
+    except OSError as e:
+        if e.errno != errno.EEXIST:
+            raise
+        pass

diff -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 -r d2d293eb7a253cd3e660bd216222f6596f70d754 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -23,7 +23,8 @@
 DEST_SUFFIX="yt-conda"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
-INST_YT_SOURCE=0 # Do we do a source install of yt?
+INST_YT_SOURCE=1 # Do we do a source install of yt?
+INST_UNSTRUCTURED=1 # Do we want to build with unstructured mesh support?
 
 ##################################################################
 #                                                                #
@@ -40,6 +41,25 @@
 MINICONDA_VERSION="latest"
 YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
 
+if [ $INST_UNSTRUCTURED -eq 1 ]
+then
+  if [ $INST_YT_SOURCE -eq 0 ]
+  then
+      echo "yt must be compiled from source to use the unstructured mesh support."
+      echo "Please set INST_YT_SOURCE to 1 and re-run."
+      exit 1
+  fi
+  if [ `uname` = "Darwin" ]
+  then
+      EMBREE="embree-2.8.0.x86_64.macosx"
+      EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
+  else
+      EMBREE="embree-2.8.0.x86_64.linux"
+      EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
+  fi
+  PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
+fi
+
 function do_exit
 {
     echo "********************************************"
@@ -276,6 +296,11 @@
 YT_DEPS+=('mercurial')
 YT_DEPS+=('sympy')
 
+if [ $INST_UNSTRUCTURED -eq 1 ]
+then
+  YT_DEPS+=('netcdf4')   
+fi
+
 # Here is our dependency list for yt
 log_cmd conda update --yes conda
 
@@ -285,6 +310,32 @@
     log_cmd conda install --yes ${YT_DEP}
 done
 
+if [ $INST_UNSTRUCTURED -eq 1 ]
+then
+
+  echo "Installing embree"
+  mkdir ${DEST_DIR}/src
+  cd ${DEST_DIR}/src
+  ( ${GETFILE} "$EMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
+  log_cmd tar xfz ${EMBREE}.tar.gz
+  log_cmd mv ${DEST_DIR}/src/${EMBREE}/include/embree2 ${DEST_DIR}/include
+  log_cmd mv ${DEST_DIR}/src/${EMBREE}/lib/lib*.* ${DEST_DIR}/lib
+  if [ `uname` = "Darwin" ]
+  then
+    ln -s ${DEST_DIR}/lib/libembree.2.dylib ${DEST_DIR}/lib/libembree.dylib
+    install_name_tool -id ${DEST_DIR}/lib/libembree.2.dylib ${DEST_DIR}/lib/libembree.2.dylib
+  else
+    ln -s ${DEST_DIR}/lib/libembree.so.2 ${DEST_DIR}/lib/libembree.so
+  fi
+
+  echo "Installing pyembree from source"
+  ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
+  log_cmd unzip ${DEST_DIR}/src/master.zip
+  pushd ${DEST_DIR}/src/pyembree-master
+  log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
+  popd
+fi
+
 if [ $INST_YT_SOURCE -eq 0 ]
 then
   echo "Installing yt"
@@ -294,6 +345,10 @@
     echo "Installing yt from source"
     YT_DIR="${DEST_DIR}/src/yt-hg"
     log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+if [ $INST_UNSTRUCTURED -eq 1 ]
+then
+    echo $DEST_DIR > ${YT_DIR}/embree.cfg
+fi
     pushd ${YT_DIR}
     log_cmd python setup.py develop
     popd
@@ -310,7 +365,7 @@
 echo "   $DEST_DIR/bin"
 echo
 echo "On Bash-style shells you can copy/paste the following command to "
-echo "temporarily activate the yt installtion:"
+echo "temporarily activate the yt installation:"
 echo
 echo "    export PATH=$DEST_DIR/bin:\$PATH"
 echo

diff -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 -r d2d293eb7a253cd3e660bd216222f6596f70d754 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -440,10 +440,6 @@
 get_willwont ${INST_SCIPY}
 echo "be installing scipy"
 
-printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
-get_willwont ${INST_0MQ}
-echo "be installing ZeroMQ"
-
 printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
 get_willwont ${INST_ROCKSTAR}
 echo "be installing Rockstar"
@@ -627,7 +623,6 @@
 FREETYPE_VER='freetype-2.4.12' 
 H5PY='h5py-2.5.0'
 HDF5='hdf5-1.8.14' 
-IPYTHON='ipython-2.4.1'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.4.3'
@@ -635,13 +630,10 @@
 NOSE='nose-1.3.6'
 NUMPY='numpy-1.9.2'
 PYTHON_HGLIB='python-hglib-1.6'
-PYZMQ='pyzmq-14.5.0'
 ROCKSTAR='rockstar-0.99.6'
 SCIPY='scipy-0.15.1'
 SQLITE='sqlite-autoconf-3071700'
 SYMPY='sympy-0.7.6'
-TORNADO='tornado-4.0.2'
-ZEROMQ='zeromq-4.0.5'
 ZLIB='zlib-1.2.8'
 SETUPTOOLS='setuptools-18.0.1'
 
@@ -655,7 +647,6 @@
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
 echo '4a83f9ae1855a7fad90133b327d426201c8ccfd2e7fbe9f39b2d61a2eee2f3ebe2ea02cf80f3d4e1ad659f8e790c173df8cc99b87d0b7ce63d34aa88cfdc7939  h5py-2.5.0.tar.gz' > h5py-2.5.0.tar.gz.sha512
 echo '4073fba510ccadaba41db0939f909613c9cb52ba8fb6c1062fc9118edc601394c75e102310be1af4077d07c9b327e6bbb1a6359939a7268dc140382d0c1e0199  hdf5-1.8.14.tar.gz' > hdf5-1.8.14.tar.gz.sha512
-echo 'a9cffc08ba10c47b0371b05664e55eee0562a30ef0d4bbafae79e52e5b9727906c45840c0918122c06c5672ac65e6eb381399f103e1a836aca003eda81b2acde  ipython-2.4.1.tar.gz' > ipython-2.4.1.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
 echo '51b0f58b2618b47b653e17e4f6b6a1215d3a3b0f1331ce3555cc7435e365d9c75693f289ce12fe3bf8f69fd57b663e545f0f1c2c94e81eaa661cac0689e125f5  matplotlib-1.4.3.tar.gz' > matplotlib-1.4.3.tar.gz.sha512
@@ -663,12 +654,9 @@
 echo 'd0cede08dc33a8ac0af0f18063e57f31b615f06e911edb5ca264575174d8f4adb4338448968c403811d9dcc60f38ade3164662d6c7b69b499f56f0984bb6283c  nose-1.3.6.tar.gz' > nose-1.3.6.tar.gz.sha512
 echo '70470ebb9afef5dfd0c83ceb7a9d5f1b7a072b1a9b54b04f04f5ed50fbaedd5b4906bd500472268d478f94df9e749a88698b1ff30f2d80258e7f3fec040617d9  numpy-1.9.2.tar.gz' > numpy-1.9.2.tar.gz.sha512
 echo 'bfd10455e74e30df568c4c4827140fb6cc29893b0e062ce1764bd52852ec7487a70a0f5ea53c3fca7886f5d36365c9f4db52b8c93cad35fb67beeb44a2d56f2d  python-hglib-1.6.tar.gz' > python-hglib-1.6.tar.gz.sha512
-echo '20164f7b05c308e0f089c07fc46b1c522094f3ac136f2e0bba84f19cb63dfd36152a2465df723dd4d93c6fbd2de4f0d94c160e2bbc353a92cfd680eb03cbdc87  pyzmq-14.5.0.tar.gz' > pyzmq-14.5.0.tar.gz.sha512
 echo 'fff4412d850c431a1b4e6ee3b17958ee5ab3beb81e6cb8a8e7d56d368751eaa8781d7c3e69d932dc002d718fddc66a72098acfe74cfe29ec80b24e6736317275  scipy-0.15.1.tar.gz' > scipy-0.15.1.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
 echo 'ce0f1a17ac01eb48aec31fc0ad431d9d7ed9907f0e8584a6d79d0ffe6864fe62e203fe3f2a3c3e4e3d485809750ce07507a6488e776a388a7a9a713110882fcf  sympy-0.7.6.tar.gz' > sympy-0.7.6.tar.gz.sha512
-echo '93591068dc63af8d50a7925d528bc0cccdd705232c529b6162619fe28dddaf115e8a460b1842877d35160bd7ed480c1bd0bdbec57d1f359085bd1814e0c1c242  tornado-4.0.2.tar.gz' > tornado-4.0.2.tar.gz.sha512
-echo '0d928ed688ed940d460fa8f8d574a9819dccc4e030d735a8c7db71b59287ee50fa741a08249e356c78356b03c2174f2f2699f05aa7dc3d380ed47d8d7bab5408  zeromq-4.0.5.tar.gz' > zeromq-4.0.5.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 echo '9b318ce2ee2cf787929dcb886d76c492b433e71024fda9452d8b4927652a298d6bd1bdb7a4c73883a98e100024f89b46ea8aa14b250f896e549e6dd7e10a6b41  setuptools-18.0.1.tar.gz' > setuptools-18.0.1.tar.gz.sha512
 # Individual processes
@@ -679,9 +667,6 @@
 [ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
 [ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
 [ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
@@ -690,7 +675,6 @@
 get_ytproject $PYTHON2.tgz
 get_ytproject $NUMPY.tar.gz
 get_ytproject $MATPLOTLIB.tar.gz
-get_ytproject $IPYTHON.tar.gz
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject $NOSE.tar.gz
@@ -976,25 +960,9 @@
 [ -n "${OLD_CXXFLAGS}" ] && export CXXFLAGS=${OLD_CXXFLAGS}
 [ -n "${OLD_CFLAGS}" ] && export CFLAGS=${OLD_CFLAGS}
 
-# Now we do our IPython installation, which has two optional dependencies.
-if [ $INST_0MQ -eq 1 ]
-then
-    if [ ! -e $ZEROMQ/done ]
-    then
-        [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
-        echo "Installing ZeroMQ"
-        cd $ZEROMQ
-        ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
-        touch done
-        cd ..
-    fi
-    do_setup_py $PYZMQ --zmq=${DEST_DIR}
-    do_setup_py $TORNADO
-fi
+echo "Installing Jupyter"
+( ${DEST_DIR}/bin/pip install "jupyter<2.0.0" 2>&1 ) 1>> ${LOG_FILE}
 
-do_setup_py $IPYTHON
 do_setup_py $CYTHON
 do_setup_py $H5PY
 do_setup_py $NOSE

diff -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 -r d2d293eb7a253cd3e660bd216222f6596f70d754 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -112,5 +112,5 @@
 
 
 .navbar-form.navbar-right:last-child {
-    margin-right: -20px;
+    margin-right: -60px;
 }

diff -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 -r d2d293eb7a253cd3e660bd216222f6596f70d754 doc/source/analyzing/_static/axes_calculator.pyx
--- a/doc/source/analyzing/_static/axes_calculator.pyx
+++ b/doc/source/analyzing/_static/axes_calculator.pyx
@@ -1,7 +1,7 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-from stdlib cimport malloc, free
+from libc.stdlib cimport malloc, free
 
 cdef extern from "axes.h":
     ctypedef struct ParticleCollection:
@@ -16,7 +16,9 @@
 def examine_axes(np.ndarray[np.float64_t, ndim=1] xpos,
                  np.ndarray[np.float64_t, ndim=1] ypos,
                  np.ndarray[np.float64_t, ndim=1] zpos):
-    cdef double ax1[3], ax2[3], ax3[3]
+    cdef double ax1[3]
+    cdef double ax2[3]
+    cdef double ax3[3]
     cdef ParticleCollection particles
     cdef int i
 

diff -r df2f4ef0f1dedbb24f6ff83b3a5c5ce333ef85d9 -r d2d293eb7a253cd3e660bd216222f6596f70d754 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,423 +1,455 @@
 {
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables \"data cubes\" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to \"mock-up\" what would be seen in observations."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.config import ytcfg\n",
+    "\n",
+    "import yt\n",
+    "import numpy as np\n",
+    "from yt.analysis_modules.ppv_cube.api import PPVCube\n",
+    "import yt.units as u"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions:"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Density: $\\rho(r) \\propto r^{\\alpha}$"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Velocity: $v_{\\theta}(r) \\propto \\frac{r}{1+(r/r_0)^{\\beta}}$"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "where for simplicity we won't worry about the normalizations of these profiles. "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First, we'll set up the grid and the parameters of the profiles:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# increasing the resolution will make the images in this notebook more visually appealing\n",
+    "nx,ny,nz = (64, 64, 64) # domain dimensions\n",
+    "R = 10. # outer radius of disk, kpc\n",
+    "r_0 = 3. # scale radius, kpc\n",
+    "beta = 1.4 # for the tangential velocity profile\n",
+    "alpha = -1. # for the radial density profile\n",
+    "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
+    "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+    "theta = np.arctan2(y, x) # polar coordinates"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Second, we'll construct the data arrays for the density, temperature, and velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero.  "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "dens = np.zeros((nx,ny,nz))\n",
+    "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
+    "temp = np.zeros((nx,ny,nz))\n",
+    "temp[:,:,nz/2-3:nz/2+3] = 1.0e5 # Isothermal\n",
+    "vel_theta = 100.*r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
+    "velx = np.zeros((nx,ny,nz))\n",
+    "vely = np.zeros((nx,ny,nz))\n",
+    "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+    "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+    "dens[r > R] = 0.0\n",
+    "temp[r > R] = 0.0\n",
+    "velx[r > R] = 0.0\n",
+    "vely[r > R] = 0.0"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1  `code_length`. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "data = {}\n",
+    "data[\"density\"] = (dens,\"g/cm**3\")\n",
+    "data[\"temperature\"] = (temp, \"K\")\n",
+    "data[\"velocity_x\"] = (velx, \"km/s\")\n",
+    "data[\"velocity_y\"] = (vely, \"km/s\")\n",
+    "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
+    "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
+    "ds = yt.load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To get a sense of what the data looks like, we'll take a slice through the middle of the disk:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "slc = yt.SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "slc.set_log(\"velocity_x\", False)\n",
+    "slc.set_log(\"velocity_y\", False)\n",
+    "slc.set_log(\"velocity_magnitude\", False)\n",
+    "slc.set_unit(\"velocity_magnitude\", \"km/s\")\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the x-axis. We'll create a normal vector:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "i = 60.*np.pi/180.\n",
+    "L = [np.sin(i),0.0,np.cos(i)]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next, we need to specify a field that will serve as the \"intensity\" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to choose the bounds in line-of-sight velocity that the data will be binned into, which is a 4-tuple in the shape of `(vmin, vmax, nbins, units)`, which specifies a linear range of `nbins` velocity bins from `vmin` to `vmax` in units of `units`. We may also optionally specify the dimensions of the data cube with the `dims` argument."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "cube = PPVCube(ds, L, \"density\", (-150.,150.,50,\"km/s\"), dims=200, method=\"sum\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Following this, we can now write this cube to a FITS file. The x and y axes of the file can be in length units, which can be optionally specified by `length_unit`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "cube.write_fits(\"cube.fits\", clobber=True, length_unit=\"kpc\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Or one can use the `sky_scale` and `sky_center` keywords to set up the coordinates in RA and Dec:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "sky_scale = (1.0, \"arcsec/kpc\")\n",
+    "sky_center = (30., 45.) # RA, Dec in degrees\n",
+    "cube.write_fits(\"cube_sky.fits\", clobber=True, sky_scale=sky_scale, sky_center=sky_center)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the \"z\" axis:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_cube = yt.load(\"cube.fits\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Specifying no center gives us the center slice\n",
+    "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"])\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Picking different velocities for the slices\n",
+    "new_center = ds_cube.domain_center\n",
+    "new_center[2] = ds_cube.spec2pixel(-100.*u.km/u.s)\n",
+    "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "new_center[2] = ds_cube.spec2pixel(70.0*u.km/u.s)\n",
+    "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "new_center[2] = ds_cube.spec2pixel(-30.0*u.km/u.s)\n",
+    "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "prj = yt.ProjectionPlot(ds_cube, \"z\", [\"density\"], method=\"sum\")\n",
+    "prj.set_log(\"density\", True)\n",
+    "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
+    "prj.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `thermal_broad` keyword allows one to simulate thermal line broadening based on the temperature, and the `atomic_weight` argument is used to specify the atomic weight of the particle that is doing the emitting."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "cube2 = PPVCube(ds, L, \"density\", (-150.,150.,50,\"km/s\"), dims=200, thermal_broad=True, \n",
+    "                atomic_weight=12.0, method=\"sum\")\n",
+    "cube2.write_fits(\"cube2.fits\", clobber=True, length_unit=\"kpc\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Taking a slice of this cube shows:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_cube2 = yt.load(\"cube2.fits\")\n",
+    "new_center = ds_cube2.domain_center\n",
+    "new_center[2] = ds_cube2.spec2pixel(70.0*u.km/u.s)\n",
+    "slc = yt.SlicePlot(ds_cube2, \"z\", [\"density\"], center=new_center)\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "new_center[2] = ds_cube2.spec2pixel(-100.*u.km/u.s)\n",
+    "slc = yt.SlicePlot(ds_cube2, \"z\", [\"density\"], center=new_center)\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "where we can see the emission has been smeared into this velocity slice from neighboring slices due to the thermal broadening. \n",
+    "\n",
+    "Finally, the \"velocity\" or \"spectral\" axis of the cube can be changed to a different unit, such as wavelength, frequency, or energy: "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (cube2.vbins[0], cube2.vbins[-1])\n",
+    "cube2.transform_spectral_axis(400.0,\"nm\")\n",
+    "print (cube2.vbins[0], cube2.vbins[-1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If a FITS file is now written from the cube, the spectral axis will be in the new units. To reset the spectral axis back to the original velocity units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "cube2.reset_spectral_axis()\n",
+    "print (cube2.vbins[0], cube2.vbins[-1])"
+   ]
+  }
+ ],
  "metadata": {
-  "name": "",
-  "signature": "sha256:67e4297cbc32716b2481c71659305687cb5bdadad648a0acf6b48960267bb069"
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.1"
+  }
  },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables \"data cubes\" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to \"mock-up\" what would be seen in observations."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.config import ytcfg\n",
-      "ytcfg[\"yt\",\"loglevel\"] = 30\n",
-      "\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from yt.analysis_modules.ppv_cube.api import PPVCube\n",
-      "import yt.units as u"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions:"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Density: $\\rho(r) \\propto r^{\\alpha}$"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Velocity: $v_{\\theta}(r) \\propto \\frac{r}{1+(r/r_0)^{\\beta}}$"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "where for simplicity we won't worry about the normalizations of these profiles. "
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "First, we'll set up the grid and the parameters of the profiles:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "nx,ny,nz = (256,256,256) # domain dimensions\n",
-      "R = 10. # outer radius of disk, kpc\n",
-      "r_0 = 3. # scale radius, kpc\n",
-      "beta = 1.4 # for the tangential velocity profile\n",
-      "alpha = -1. # for the radial density profile\n",
-      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
-      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
-      "theta = np.arctan2(y, x) # polar coordinates"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Second, we'll construct the data arrays for the density, temperature, and velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero.  "
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "dens = np.zeros((nx,ny,nz))\n",
-      "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
-      "temp = np.zeros((nx,ny,nz))\n",
-      "temp[:,:,nz/2-3:nz/2+3] = 1.0e5 # Isothermal\n",
-      "vel_theta = 100.*r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
-      "velx = np.zeros((nx,ny,nz))\n",
-      "vely = np.zeros((nx,ny,nz))\n",
-      "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
-      "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
-      "dens[r > R] = 0.0\n",
-      "temp[r > R] = 0.0\n",
-      "velx[r > R] = 0.0\n",
-      "vely[r > R] = 0.0"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1  `code_length`. "
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "data = {}\n",
-      "data[\"density\"] = (dens,\"g/cm**3\")\n",
-      "data[\"temperature\"] = (temp, \"K\")\n",
-      "data[\"velocity_x\"] = (velx, \"km/s\")\n",
-      "data[\"velocity_y\"] = (vely, \"km/s\")\n",
-      "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
-      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
-      "ds = yt.load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To get a sense of what the data looks like, we'll take a slice through the middle of the disk:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = yt.SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc.set_log(\"velocity_x\", False)\n",
-      "slc.set_log(\"velocity_y\", False)\n",
-      "slc.set_log(\"velocity_magnitude\", False)\n",
-      "slc.set_unit(\"velocity_magnitude\", \"km/s\")\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the x-axis. We'll create a normal vector:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "i = 60.*np.pi/180.\n",
-      "L = [np.sin(i),0.0,np.cos(i)]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Next, we need to specify a field that will serve as the \"intensity\" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to choose the bounds in line-of-sight velocity that the data will be binned into, which is a 4-tuple in the shape of `(vmin, vmax, nbins, units)`, which specifies a linear range of `nbins` velocity bins from `vmin` to `vmax` in units of `units`. We may also optionally specify the dimensions of the data cube with the `dims` argument."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cube = PPVCube(ds, L, \"density\", (-150.,150.,50,\"km/s\"), dims=200, method=\"sum\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Following this, we can now write this cube to a FITS file. The x and y axes of the file can be in length units, which can be optionally specified by `length_unit`:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cube.write_fits(\"cube.fits\", clobber=True, length_unit=\"kpc\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Or one can use the `sky_scale` and `sky_center` keywords to set up the coordinates in RA and Dec:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sky_scale = (1.0, \"arcsec/kpc\")\n",
-      "sky_center = (30., 45.) # RA, Dec in degrees\n",
-      "cube.write_fits(\"cube_sky.fits\", clobber=True, sky_scale=sky_scale, sky_center=sky_center)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the \"z\" axis:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds_cube = yt.load(\"cube.fits\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# Specifying no center gives us the center slice\n",
-      "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"])\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# Picking different velocities for the slices\n",
-      "new_center = ds_cube.domain_center\n",
-      "new_center[2] = ds_cube.spec2pixel(-100.*u.km/u.s)\n",
-      "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "new_center[2] = ds_cube.spec2pixel(70.0*u.km/u.s)\n",
-      "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "new_center[2] = ds_cube.spec2pixel(-30.0*u.km/u.s)\n",
-      "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj = yt.ProjectionPlot(ds_cube, \"z\", [\"density\"], method=\"sum\")\n",
-      "prj.set_log(\"density\", True)\n",
-      "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
-      "prj.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The `thermal_broad` keyword allows one to simulate thermal line broadening based on the temperature, and the `atomic_weight` argument is used to specify the atomic weight of the particle that is doing the emitting."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cube2 = PPVCube(ds, L, \"density\", (-150.,150.,50,\"km/s\"), dims=200, thermal_broad=True, \n",
-      "                atomic_weight=12.0, method=\"sum\")\n",
-      "cube2.write_fits(\"cube2.fits\", clobber=True, length_unit=\"kpc\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Taking a slice of this cube shows:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds_cube2 = yt.load(\"cube2.fits\")\n",
-      "new_center = ds_cube2.domain_center\n",
-      "new_center[2] = ds_cube2.spec2pixel(70.0*u.km/u.s)\n",
-      "slc = yt.SlicePlot(ds_cube2, \"z\", [\"density\"], center=new_center)\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "new_center[2] = ds_cube2.spec2pixel(-100.*u.km/u.s)\n",
-      "slc = yt.SlicePlot(ds_cube2, \"z\", [\"density\"], center=new_center)\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "where we can see the emission has been smeared into this velocity slice from neighboring slices due to the thermal broadening. \n",
-      "\n",
-      "Finally, the \"velocity\" or \"spectral\" axis of the cube can be changed to a different unit, such as wavelength, frequency, or energy: "
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print cube2.vbins[0], cube2.vbins[-1]\n",
-      "cube2.transform_spectral_axis(400.0,\"nm\")\n",
-      "print cube2.vbins[0], cube2.vbins[-1]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If a FITS file is now written from the cube, the spectral axis will be in the new units. To reset the spectral axis back to the original velocity units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cube2.reset_spectral_axis()\n",
-      "print cube2.vbins[0], cube2.vbins[-1]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/ecf1c74a3360/
Changeset:   ecf1c74a3360
Branch:      yt
User:        jzuhone
Date:        2016-01-29 15:14:55+00:00
Summary:     Putting the underscore back here
Affected #:  2 files

diff -r d2d293eb7a253cd3e660bd216222f6596f70d754 -r ecf1c74a33607c2b14eee4159e1f4278a103231b yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -402,7 +402,7 @@
         """
         Create and return dimensionally-equivalent units in a specified base.
         """
-        yt_base_unit_string = get_system_unit_string(self.dimensions, default_base_units)
+        yt_base_unit_string = _get_system_unit_string(self.dimensions, default_base_units)
         yt_base_unit = Unit(yt_base_unit_string, base_value=1.0,
                             dimensions=self.dimensions, registry=self.registry)
         if unit_system == "cgs":
@@ -415,7 +415,7 @@
                                    r'code unit system. Try again with unit_system=ds instead, '
                                    r'where \'ds\' is your dataset.')
             unit_system = unit_system_registry[str(unit_system)]
-            units_string = get_system_unit_string(self.dimensions, unit_system.base_units)
+            units_string = _get_system_unit_string(self.dimensions, unit_system.base_units)
             u = Unit(units_string, registry=self.registry)
             base_value = get_conversion_factor(self, yt_base_unit)[0]
             base_value /= get_conversion_factor(self, u)[0]
@@ -611,7 +611,7 @@
     elif not isinstance(dimensions, Basic):
         raise UnitParseError("Bad dimensionality expression '%s'." % dimensions)
 
-def get_system_unit_string(dimensions, base_units):
+def _get_system_unit_string(dimensions, base_units):
     # The dimensions of a unit object is the product of the base dimensions.
     # Use sympy to factor the dimensions into base CGS unit symbols.
     units = []

diff -r d2d293eb7a253cd3e660bd216222f6596f70d754 -r ecf1c74a33607c2b14eee4159e1f4278a103231b yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -13,7 +13,7 @@
 
 from yt.extern.six import string_types
 from yt.units import dimensions
-from yt.units.unit_object import Unit, unit_system_registry, get_system_unit_string
+from yt.units.unit_object import Unit, unit_system_registry, _get_system_unit_string
 from yt.utilities import physical_constants as pc
 
 class UnitSystemConstants(object):
@@ -59,7 +59,7 @@
                 self._dims.append(key)
             key = getattr(dimensions, key)
         if key not in self.units_map:
-            units = get_system_unit_string(key, self.units_map)
+            units = _get_system_unit_string(key, self.units_map)
             self.units_map[key] = Unit(units, registry=self.registry)
         return self.units_map[key]
 


https://bitbucket.org/yt_analysis/yt/commits/5cf12f8754e0/
Changeset:   5cf12f8754e0
Branch:      yt
User:        jzuhone
Date:        2016-01-29 15:20:04+00:00
Summary:     Small nits here and there
Affected #:  3 files

diff -r ecf1c74a33607c2b14eee4159e1f4278a103231b -r 5cf12f8754e09a804e7650a61dbd2582476c885e yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -293,10 +293,10 @@
                 nice_name, tex_label = _nice_species_name(field)
                 display_name = r'\dot{\omega}\left[%s\right]' % tex_label
                 # Overwrite field to use nicer tex_label'ed display_name
-                self.add_output_field(("boxlib", field), units=unit_system["rate"],
+                self.add_output_field(("boxlib", field), units=unit_system["frequency"],
                                       display_name=display_name)
                 self.alias(("gas", "%s_creation_rate" % nice_name),
-                           ("boxlib", field), units=unit_system["rate"])
+                           ("boxlib", field), units=unit_system["frequency"])
 
 
 def _nice_species_name(field):

diff -r ecf1c74a33607c2b14eee4159e1f4278a103231b -r 5cf12f8754e09a804e7650a61dbd2582476c885e yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -40,26 +40,26 @@
                    "velocity_magnitude"]
 
     test_units = {}
-    test_units["mks"] = {"density":"kg/m**3",
-                         "kinetic_energy":"Pa",
-                         "velocity_magnitude":"m/s",
-                         "velocity_divergence":"1/s",
-                         "density_gradient_x":"kg/m**4"}
-    test_units["imperial"] = {"density":"lbm/ft**3",
-                              "kinetic_energy":"lbf/ft**2",
-                              "velocity_magnitude":"ft/s",
-                              "velocity_divergence":"1/s",
-                              "density_gradient_x":"lbm/ft**4"}
-    test_units["galactic"] = {"density":"Msun/kpc**3",
-                              "kinetic_energy":"Msun/(Myr**2*kpc)",
-                              "velocity_magnitude":"kpc/Myr",
-                              "velocity_divergence":"1/Myr",
-                              "density_gradient_x":"Msun/kpc**4"}
-    test_units["code"] = {"density":"code_mass/code_length**3",
-                          "kinetic_energy":"code_mass/(code_length*code_time**2)",
-                          "velocity_magnitude":"code_velocity",
-                          "velocity_divergence":"code_velocity/code_length",
-                          "density_gradient_x":"code_mass/code_length**4"}
+    test_units["mks"] = {"density": "kg/m**3",
+                         "kinetic_energy": "Pa",
+                         "velocity_magnitude": "m/s",
+                         "velocity_divergence": "1/s",
+                         "density_gradient_x": "kg/m**4"}
+    test_units["imperial"] = {"density": "lbm/ft**3",
+                              "kinetic_energy": "lbf/ft**2",
+                              "velocity_magnitude": "ft/s",
+                              "velocity_divergence": "1/s",
+                              "density_gradient_x": "lbm/ft**4"}
+    test_units["galactic"] = {"density": "Msun/kpc**3",
+                              "kinetic_energy": "Msun/(Myr**2*kpc)",
+                              "velocity_magnitude": "kpc/Myr",
+                              "velocity_divergence": "1/Myr",
+                              "density_gradient_x": "Msun/kpc**4"}
+    test_units["code"] = {"density": "code_mass/code_length**3",
+                          "kinetic_energy": "code_mass/(code_length*code_time**2)",
+                          "velocity_magnitude": "code_velocity",
+                          "velocity_divergence": "code_velocity/code_length",
+                          "density_gradient_x": "code_mass/code_length**4"}
 
     ds_cgs = load(gslr)
     dd_cgs = ds_cgs.sphere("c", (100., "kpc"))

diff -r ecf1c74a33607c2b14eee4159e1f4278a103231b -r 5cf12f8754e09a804e7650a61dbd2582476c885e yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -49,7 +49,7 @@
     omega_curvature : the fraction of the energy density of the Universe in 
         curvature.
         Default: 0.0.
-    unit_system : UnitSystem, optional 
+    unit_system : :class:`yt.units.unit_systems.UnitSystem`, optional
         The units system to use when making calculations. If not specified,
         cgs units are assumed.
 


https://bitbucket.org/yt_analysis/yt/commits/ea31fe3a8bb1/
Changeset:   ea31fe3a8bb1
Branch:      yt
User:        jzuhone
Date:        2016-01-29 15:22:12+00:00
Summary:     Move this import to the top
Affected #:  1 file

diff -r 5cf12f8754e09a804e7650a61dbd2582476c885e -r ea31fe3a8bb13d249f8a7ff23b3d13e015e52b76 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -15,6 +15,8 @@
 
 import numpy as np
 
+from yt.units.unit_object import Unit
+
 from .derived_field import \
     ValidateSpatial
 
@@ -174,7 +176,6 @@
                           weight="cell_mass")
 
 def setup_gradient_fields(registry, grad_field, field_units, slice_info = None):
-    from yt.units.unit_object import Unit
     assert(isinstance(grad_field, tuple))
     ftype, fname = grad_field
     if slice_info is None:


https://bitbucket.org/yt_analysis/yt/commits/09da6be5eca3/
Changeset:   09da6be5eca3
Branch:      yt
User:        jzuhone
Date:        2016-01-29 16:54:19+00:00
Summary:     Test another dataset here
Affected #:  1 file

diff -r ea31fe3a8bb13d249f8a7ff23b3d13e015e52b76 -r 09da6be5eca3d7e918cbfff52a3aaf3a667ad75e yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -29,37 +29,38 @@
     assert goofy_unit_system["magnetic_field_mks"] == Unit("lbm/(hr**2*mA)")
     assert "goofy" in unit_system_registry
 
+test_units = {}
+test_units["mks"] = {"density": "kg/m**3",
+                     "kinetic_energy": "Pa",
+                     "velocity_magnitude": "m/s",
+                     "velocity_divergence": "1/s",
+                     "density_gradient_x": "kg/m**4"}
+test_units["imperial"] = {"density": "lbm/ft**3",
+                          "kinetic_energy": "lbf/ft**2",
+                          "velocity_magnitude": "ft/s",
+                          "velocity_divergence": "1/s",
+                          "density_gradient_x": "lbm/ft**4"}
+test_units["galactic"] = {"density": "Msun/kpc**3",
+                          "kinetic_energy": "Msun/(Myr**2*kpc)",
+                          "velocity_magnitude": "kpc/Myr",
+                          "velocity_divergence": "1/Myr",
+                          "density_gradient_x": "Msun/kpc**4"}
+test_units["code"] = {"density": "code_mass/code_length**3",
+                      "kinetic_energy": "code_mass/(code_length*code_time**2)",
+                      "velocity_magnitude": "code_velocity",
+                      "velocity_divergence": "code_velocity/code_length",
+                      "density_gradient_x": "code_mass/code_length**4"}
+
+test_fields = ["density",
+               "kinetic_energy",
+               "velocity_divergence",
+               "density_gradient_x",
+               "velocity_magnitude"]
+
 gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
 @requires_file(gslr)
-def test_fields_diff_systems():
+def test_fields_diff_systems_sloshing():
     ytcfg["yt","skip_dataset_cache"] = "True"
-    test_fields = ["density",
-                   "kinetic_energy",
-                   "velocity_divergence",
-                   "density_gradient_x",
-                   "velocity_magnitude"]
-
-    test_units = {}
-    test_units["mks"] = {"density": "kg/m**3",
-                         "kinetic_energy": "Pa",
-                         "velocity_magnitude": "m/s",
-                         "velocity_divergence": "1/s",
-                         "density_gradient_x": "kg/m**4"}
-    test_units["imperial"] = {"density": "lbm/ft**3",
-                              "kinetic_energy": "lbf/ft**2",
-                              "velocity_magnitude": "ft/s",
-                              "velocity_divergence": "1/s",
-                              "density_gradient_x": "lbm/ft**4"}
-    test_units["galactic"] = {"density": "Msun/kpc**3",
-                              "kinetic_energy": "Msun/(Myr**2*kpc)",
-                              "velocity_magnitude": "kpc/Myr",
-                              "velocity_divergence": "1/Myr",
-                              "density_gradient_x": "Msun/kpc**4"}
-    test_units["code"] = {"density": "code_mass/code_length**3",
-                          "kinetic_energy": "code_mass/(code_length*code_time**2)",
-                          "velocity_magnitude": "code_velocity",
-                          "velocity_divergence": "code_velocity/code_length",
-                          "density_gradient_x": "code_mass/code_length**4"}
 
     ds_cgs = load(gslr)
     dd_cgs = ds_cgs.sphere("c", (100., "kpc"))
@@ -75,4 +76,24 @@
                 v1 = dd_cgs[field].in_base(us)
             v2 = dd[field]
             assert_almost_equal(v1.v, v2.v)
+            assert str(v2.units) == test_units[us][field]
+
+etc = "enzo_tiny_cosmology/DD0046/DD0046"
+ at requires_file(etc)
+def test_fields_diff_systems_etc():
+    ytcfg["yt","skip_dataset_cache"] = "True"
+
+    ds_cgs = load(etc)
+    dd_cgs = ds_cgs.sphere("max", (500., "kpc"))
+
+    for us in test_units:
+        ds = load(etc, unit_system=us)
+        dd = ds.sphere("max", (500.,"kpc"))
+        for field in test_fields:
+            if us == "code":
+                v1 = dd_cgs[field].in_units(test_units["code"][field])
+            else:
+                v1 = dd_cgs[field].in_base(us)
+            v2 = dd[field]
+            assert_almost_equal(v1.v, v2.v)
             assert str(v2.units) == test_units[us][field]
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/6fb5bb5d9304/
Changeset:   6fb5bb5d9304
Branch:      yt
User:        jzuhone
Date:        2016-01-29 17:05:13+00:00
Summary:     Docstring here to explain how this works
Affected #:  1 file

diff -r 09da6be5eca3d7e918cbfff52a3aaf3a667ad75e -r 6fb5bb5d9304ed6f9f8bd442f77969c1459bbd0a yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -115,6 +115,31 @@
                        units="dimensionless")
 
 def setup_magnetic_field_aliases(registry, ds_ftype, ds_fields, ftype="gas"):
+    r"""
+    This routine sets up special aliases between dataset-specific magnetic fields
+    and the default magnetic fields in yt so that unit conversions between different
+    unit systems can be handled properly. This is only called from the `setup_fluid_fields`
+    method of a frontend's :class:`FieldInfoContainer` instance.
+
+    Parameters
+    ----------
+    registry : :class:`FieldInfoContainer`
+        The field registry that these definitions will be installed into.
+    ds_ftype : string
+        The field type for the fields we're going to alias, e.g. "flash", "enzo", "athena", etc.
+    ds_fields : list of strings
+        The fields that will be aliased.
+    ftype : string, optional
+        The resulting field type of the fields. Default "gas".
+
+    Examples
+    --------
+    >>> class PlutoFieldInfo(ChomboFieldInfo):
+    ...     def setup_fluid_fields(self):
+    ...         from yt.fields.magnetic_field import \
+    ...             setup_magnetic_field_aliases
+    ...         setup_magnetic_field_aliases(self, "chombo", ["bx%s" % ax for ax in [1,2,3]])
+    """
     unit_system = registry.ds.unit_system
     ds_fields = [(ds_ftype, fd) for fd in ds_fields]
     if ds_fields[0] not in registry:


https://bitbucket.org/yt_analysis/yt/commits/8b7e6e1c645b/
Changeset:   8b7e6e1c645b
Branch:      yt
User:        jzuhone
Date:        2016-01-29 17:11:48+00:00
Summary:     Some explanation on how to define field units with arithmetic
Affected #:  1 file

diff -r 6fb5bb5d9304ed6f9f8bd442f77969c1459bbd0a -r 8b7e6e1c645b5fc076e0aeebd074d9a72b8cc88c doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -146,8 +146,17 @@
 
     ds.add_field(("gas", "pressure"), function=_pressure, units=ds.unit_system["pressure"])
 
-If you find yourself using the same custom-defined fields over and over, you
-should put them in your plugins file as described in :ref:`plugin-file`.
+Since the :class:`yt.units.unit_systems.UnitSystem` object returns a :class:`yt.units.unit_object.Unit` object when
+queried, you're not limited to specifying units in terms of those already available. You can specify units for fields
+using basic arithmetic if necessary:
+
+.. code-block:: python
+
+    ds.add_field(("gas", "my_acceleration"), function=_my_acceleration,
+                 units=ds.unit_system["length"]/ds.unit_system["time"]**2)
+
+If you find yourself using the same custom-defined fields over and over, you should put them in your plugins file as
+described in :ref:`plugin-file`.
 
 A More Complicated Example
 --------------------------


https://bitbucket.org/yt_analysis/yt/commits/7e69492b6a4d/
Changeset:   7e69492b6a4d
Branch:      yt
User:        jzuhone
Date:        2016-02-04 15:53:27+00:00
Summary:     Merge from upstream
Affected #:  146 files

diff -r 8b7e6e1c645b5fc076e0aeebd074d9a72b8cc88c -r 7e69492b6a4d54fb151eb1d2550e978570e9849a .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -28,40 +28,37 @@
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/basic_octree.c
 yt/utilities/lib/bitarray.c
-yt/utilities/lib/CICDeposit.c
-yt/utilities/lib/ContourFinding.c
-yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/contour_finding.c
+yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/element_mappings.c
-yt/utilities/lib/FixedInterpolator.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
 yt/utilities/lib/image_utilities.c
-yt/utilities/lib/Interpolators.c
+yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
 yt/utilities/lib/line_integral_convolution.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_intersection.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
-yt/utilities/lib/Octree.c
-yt/utilities/lib/GridTree.c
+yt/utilities/lib/particle_mesh_operations.c
 yt/utilities/lib/origami.c
+yt/utilities/lib/particle_mesh_operations.c
 yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
-yt/utilities/lib/PointsInVolume.c
-yt/utilities/lib/QuadTree.c
-yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/points_in_volume.c
+yt/utilities/lib/quad_tree.c
+yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
-yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
-yt/utilities/lib/element_mappings.c
-yt/utilities/lib/mesh_construction.cpp
-yt/utilities/lib/mesh_samplers.cpp
-yt/utilities/lib/mesh_traversal.cpp
-yt/utilities/lib/mesh_intersection.cpp
 syntax: glob
 *.pyc
 .*.swp

diff -r 8b7e6e1c645b5fc076e0aeebd074d9a72b8cc88c -r 7e69492b6a4d54fb151eb1d2550e978570e9849a doc/extensions/numpydocmod/__init__.py
--- a/doc/extensions/numpydocmod/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-from numpydoc import setup

diff -r 8b7e6e1c645b5fc076e0aeebd074d9a72b8cc88c -r 7e69492b6a4d54fb151eb1d2550e978570e9849a doc/extensions/numpydocmod/comment_eater.py
--- a/doc/extensions/numpydocmod/comment_eater.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from cStringIO import StringIO
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from compiler_unparse import unparse
-
-
-class Comment(object):
-    """ A comment block.
-    """
-    is_comment = True
-    def __init__(self, start_lineno, end_lineno, text):
-        # int : The first line number in the block. 1-indexed.
-        self.start_lineno = start_lineno
-        # int : The last line number. Inclusive!
-        self.end_lineno = end_lineno
-        # str : The text block including '#' character but not any leading spaces.
-        self.text = text
-
-    def add(self, string, start, end, line):
-        """ Add a new comment line.
-        """
-        self.start_lineno = min(self.start_lineno, start[0])
-        self.end_lineno = max(self.end_lineno, end[0])
-        self.text += string
-
-    def __repr__(self):
-        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno, self.text)
-
-
-class NonComment(object):
-    """ A non-comment block of code.
-    """
-    is_comment = False
-    def __init__(self, start_lineno, end_lineno):
-        self.start_lineno = start_lineno
-        self.end_lineno = end_lineno
-
-    def add(self, string, start, end, line):
-        """ Add lines to the block.
-        """
-        if string.strip():
-            # Only add if not entirely whitespace.
-            self.start_lineno = min(self.start_lineno, start[0])
-            self.end_lineno = max(self.end_lineno, end[0])
-
-    def __repr__(self):
-        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno)
-
-
-class CommentBlocker(object):
-    """ Pull out contiguous comment blocks.
-    """
-    def __init__(self):
-        # Start with a dummy.
-        self.current_block = NonComment(0, 0)
-
-        # All of the blocks seen so far.
-        self.blocks = []
-
-        # The index mapping lines of code to their associated comment blocks.
-        self.index = {}
-
-    def process_file(self, file):
-        """ Process a file object.
-        """
-        for token in tokenize.generate_tokens(file.next):
-            self.process_token(*token)
-        self.make_index()
-
-    def process_token(self, kind, string, start, end, line):
-        """ Process a single token.
-        """
-        if self.current_block.is_comment:
-            if kind == tokenize.COMMENT:
-                self.current_block.add(string, start, end, line)
-            else:
-                self.new_noncomment(start[0], end[0])
-        else:
-            if kind == tokenize.COMMENT:
-                self.new_comment(string, start, end, line)
-            else:
-                self.current_block.add(string, start, end, line)
-
-    def new_noncomment(self, start_lineno, end_lineno):
-        """ We are transitioning from a noncomment to a comment.
-        """
-        block = NonComment(start_lineno, end_lineno)
-        self.blocks.append(block)
-        self.current_block = block
-
-    def new_comment(self, string, start, end, line):
-        """ Possibly add a new comment.
-        
-        Only adds a new comment if this comment is the only thing on the line.
-        Otherwise, it extends the noncomment block.
-        """
-        prefix = line[:start[1]]
-        if prefix.strip():
-            # Oops! Trailing comment, not a comment block.
-            self.current_block.add(string, start, end, line)
-        else:
-            # A comment block.
-            block = Comment(start[0], end[0], string)
-            self.blocks.append(block)
-            self.current_block = block
-
-    def make_index(self):
-        """ Make the index mapping lines of actual code to their associated
-        prefix comments.
-        """
-        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
-            if not block.is_comment:
-                self.index[block.start_lineno] = prev
-
-    def search_for_comment(self, lineno, default=None):
-        """ Find the comment block just before the given line number.
-
-        Returns None (or the specified default) if there is no such block.
-        """
-        if not self.index:
-            self.make_index()
-        block = self.index.get(lineno, None)
-        text = getattr(block, 'text', default)
-        return text
-
-
-def strip_comment_marker(text):
-    """ Strip # markers at the front of a block of comment text.
-    """
-    lines = []
-    for line in text.splitlines():
-        lines.append(line.lstrip('#'))
-    text = textwrap.dedent('\n'.join(lines))
-    return text
-
-
-def get_class_traits(klass):
-    """ Yield all of the documentation for trait definitions on a class object.
-    """
-    # FIXME: gracefully handle errors here or in the caller?
-    source = inspect.getsource(klass)
-    cb = CommentBlocker()
-    cb.process_file(StringIO(source))
-    mod_ast = compiler.parse(source)
-    class_ast = mod_ast.node.nodes[0]
-    for node in class_ast.code.nodes:
-        # FIXME: handle other kinds of assignments?
-        if isinstance(node, compiler.ast.Assign):
-            name = node.nodes[0].name
-            rhs = unparse(node.expr).strip()
-            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
-            yield name, rhs, doc
-

diff -r 8b7e6e1c645b5fc076e0aeebd074d9a72b8cc88c -r 7e69492b6a4d54fb151eb1d2550e978570e9849a doc/extensions/numpydocmod/compiler_unparse.py
--- a/doc/extensions/numpydocmod/compiler_unparse.py
+++ /dev/null
@@ -1,860 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
-    The unparse method takes a compiler.ast tree and transforms it back into
-    valid python code.  It is incomplete and currently only works for
-    import statements, function calls, function definitions, assignments, and
-    basic expressions.
-
-    Inspired by python-2.5-svn/Demo/parser/unparse.py
-
-    fixme: We may want to move to using _ast trees because the compiler for
-           them is about 6 times faster than compiler.compile.
-"""
-
-import sys
-import cStringIO
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-def unparse(ast, single_line_functions=False):
-    s = cStringIO.StringIO()
-    UnparseCompilerAst(ast, s, single_line_functions)
-    return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
-                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
-    """ Methods in this class recursively traverse an AST and
-        output source code for the abstract syntax; original formatting
-        is disregarged.
-    """
-
-    #########################################################################
-    # object interface.
-    #########################################################################
-
-    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
-        """ Unparser(tree, file=sys.stdout) -> None.
-
-            Print the source for tree to file.
-        """
-        self.f = file
-        self._single_func = single_line_functions
-        self._do_indent = True
-        self._indent = 0
-        self._dispatch(tree)
-        self._write("\n")
-        self.f.flush()
-
-    #########################################################################
-    # Unparser private interface.
-    #########################################################################
-
-    ### format, output, and dispatch methods ################################
-
-    def _fill(self, text = ""):
-        "Indent a piece of text, according to the current indentation level"
-        if self._do_indent:
-            self._write("\n"+"    "*self._indent + text)
-        else:
-            self._write(text)
-
-    def _write(self, text):
-        "Append a piece of text to the current line."
-        self.f.write(text)
-
-    def _enter(self):
-        "Print ':', and increase the indentation."
-        self._write(": ")
-        self._indent += 1
-
-    def _leave(self):
-        "Decrease the indentation level."
-        self._indent -= 1
-
-    def _dispatch(self, tree):
-        "_dispatcher function, _dispatching tree type T to method _T."
-        if isinstance(tree, list):
-            for t in tree:
-                self._dispatch(t)
-            return
-        meth = getattr(self, "_"+tree.__class__.__name__)
-        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
-            return
-        meth(tree)
-
-
-    #########################################################################
-    # compiler.ast unparsing methods.
-    #
-    # There should be one method per concrete grammar type. They are
-    # organized in alphabetical order.
-    #########################################################################
-
-    def _Add(self, t):
-        self.__binary_op(t, '+')
-
-    def _And(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") and (")
-        self._write(")")
-               
-    def _AssAttr(self, t):
-        """ Handle assigning an attribute of an object
-        """
-        self._dispatch(t.expr)
-        self._write('.'+t.attrname)
- 
-    def _Assign(self, t):
-        """ Expression Assignment such as "a = 1".
-
-            This only handles assignment in expressions.  Keyword assignment
-            is handled separately.
-        """
-        self._fill()
-        for target in t.nodes:
-            self._dispatch(target)
-            self._write(" = ")
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write('; ')
-
-    def _AssName(self, t):
-        """ Name on left hand side of expression.
-
-            Treat just like a name on the right side of an expression.
-        """
-        self._Name(t)
-
-    def _AssTuple(self, t):
-        """ Tuple on left hand side of an expression.
-        """
-
-        # _write each elements, separated by a comma.
-        for element in t.nodes[:-1]:
-            self._dispatch(element)
-            self._write(", ")
-
-        # Handle the last one without writing comma
-        last_element = t.nodes[-1]
-        self._dispatch(last_element)
-
-    def _AugAssign(self, t):
-        """ +=,-=,*=,/=,**=, etc. operations
-        """
-        
-        self._fill()
-        self._dispatch(t.node)
-        self._write(' '+t.op+' ')
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write(';')
-            
-    def _Bitand(self, t):
-        """ Bit and operation.
-        """
-        
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" & ")
-                
-    def _Bitor(self, t):
-        """ Bit or operation
-        """
-        
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" | ")
-                
-    def _CallFunc(self, t):
-        """ Function call.
-        """
-        self._dispatch(t.node)
-        self._write("(")
-        comma = False
-        for e in t.args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._dispatch(e)
-        if t.star_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("*")
-            self._dispatch(t.star_args)
-        if t.dstar_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("**")
-            self._dispatch(t.dstar_args)
-        self._write(")")
-
-    def _Compare(self, t):
-        self._dispatch(t.expr)
-        for op, expr in t.ops:
-            self._write(" " + op + " ")
-            self._dispatch(expr)
-
-    def _Const(self, t):
-        """ A constant value such as an integer value, 3, or a string, "hello".
-        """
-        self._dispatch(t.value)
-
-    def _Decorators(self, t):
-        """ Handle function decorators (eg. @has_units)
-        """
-        for node in t.nodes:
-            self._dispatch(node)
-
-    def _Dict(self, t):
-        self._write("{")
-        for  i, (k, v) in enumerate(t.items):
-            self._dispatch(k)
-            self._write(": ")
-            self._dispatch(v)
-            if i < len(t.items)-1:
-                self._write(", ")
-        self._write("}")
-
-    def _Discard(self, t):
-        """ Node for when return value is ignored such as in "foo(a)".
-        """
-        self._fill()
-        self._dispatch(t.expr)
-
-    def _Div(self, t):
-        self.__binary_op(t, '/')
-
-    def _Ellipsis(self, t):
-        self._write("...")
-
-    def _From(self, t):
-        """ Handle "from xyz import foo, bar as baz".
-        """
-        # fixme: Are From and ImportFrom handled differently?
-        self._fill("from ")
-        self._write(t.modname)
-        self._write(" import ")
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-                
-    def _Function(self, t):
-        """ Handle function definitions
-        """
-        if t.decorators is not None:
-            self._fill("@")
-            self._dispatch(t.decorators)
-        self._fill("def "+t.name + "(")
-        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
-        for i, arg in enumerate(zip(t.argnames, defaults)):
-            self._write(arg[0])
-            if arg[1] is not None:
-                self._write('=')
-                self._dispatch(arg[1])
-            if i < len(t.argnames)-1:
-                self._write(', ')
-        self._write(")")
-        if self._single_func:
-            self._do_indent = False
-        self._enter()
-        self._dispatch(t.code)
-        self._leave()
-        self._do_indent = True
-
-    def _Getattr(self, t):
-        """ Handle getting an attribute of an object
-        """
-        if isinstance(t.expr, (Div, Mul, Sub, Add)):
-            self._write('(')
-            self._dispatch(t.expr)
-            self._write(')')
-        else:
-            self._dispatch(t.expr)
-            
-        self._write('.'+t.attrname)
-        
-    def _If(self, t):
-        self._fill()
-        
-        for i, (compare,code) in enumerate(t.tests):
-            if i == 0:
-                self._write("if ")
-            else:
-                self._write("elif ")
-            self._dispatch(compare)
-            self._enter()
-            self._fill()
-            self._dispatch(code)
-            self._leave()
-            self._write("\n")
-
-        if t.else_ is not None:
-            self._write("else")
-            self._enter()
-            self._fill()
-            self._dispatch(t.else_)
-            self._leave()
-            self._write("\n")
-            
-    def _IfExp(self, t):
-        self._dispatch(t.then)
-        self._write(" if ")
-        self._dispatch(t.test)
-
-        if t.else_ is not None:
-            self._write(" else (")
-            self._dispatch(t.else_)
-            self._write(")")
-
-    def _Import(self, t):
-        """ Handle "import xyz.foo".
-        """
-        self._fill("import ")
-        
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-
-    def _Keyword(self, t):
-        """ Keyword value assignment within function calls and definitions.
-        """
-        self._write(t.name)
-        self._write("=")
-        self._dispatch(t.expr)
-        
-    def _List(self, t):
-        self._write("[")
-        for  i,node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i < len(t.nodes)-1:
-                self._write(", ")
-        self._write("]")
-
-    def _Module(self, t):
-        if t.doc is not None:
-            self._dispatch(t.doc)
-        self._dispatch(t.node)
-
-    def _Mul(self, t):
-        self.__binary_op(t, '*')
-
-    def _Name(self, t):
-        self._write(t.name)
-
-    def _NoneType(self, t):
-        self._write("None")
-        
-    def _Not(self, t):
-        self._write('not (')
-        self._dispatch(t.expr)
-        self._write(')')
-        
-    def _Or(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") or (")
-        self._write(")")
-                
-    def _Pass(self, t):
-        self._write("pass\n")
-
-    def _Printnl(self, t):
-        self._fill("print ")
-        if t.dest:
-            self._write(">> ")
-            self._dispatch(t.dest)
-            self._write(", ")
-        comma = False
-        for node in t.nodes:
-            if comma: self._write(', ')
-            else: comma = True
-            self._dispatch(node)
-
-    def _Power(self, t):
-        self.__binary_op(t, '**')
-
-    def _Return(self, t):
-        self._fill("return ")
-        if t.value:
-            if isinstance(t.value, Tuple):
-                text = ', '.join([ name.name for name in t.value.asList() ])
-                self._write(text)
-            else:
-                self._dispatch(t.value)
-            if not self._do_indent:
-                self._write('; ')
-
-    def _Slice(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        if t.lower:
-            self._dispatch(t.lower)
-        self._write(":")
-        if t.upper:
-            self._dispatch(t.upper)
-        #if t.step:
-        #    self._write(":")
-        #    self._dispatch(t.step)
-        self._write("]")
-
-    def _Sliceobj(self, t):
-        for i, node in enumerate(t.nodes):
-            if i != 0:
-                self._write(":")
-            if not (isinstance(node, Const) and node.value is None):
-                self._dispatch(node)
-
-    def _Stmt(self, tree):
-        for node in tree.nodes:
-            self._dispatch(node)
-
-    def _Sub(self, t):
-        self.__binary_op(t, '-')
-
-    def _Subscript(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        for i, value in enumerate(t.subs):
-            if i != 0:
-                self._write(",")
-            self._dispatch(value)
-        self._write("]")
-
-    def _TryExcept(self, t):
-        self._fill("try")
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-
-        for handler in t.handlers:
-            self._fill('except ')
-            self._dispatch(handler[0])
-            if handler[1] is not None:
-                self._write(', ')
-                self._dispatch(handler[1])
-            self._enter()
-            self._dispatch(handler[2])
-            self._leave()
-            
-        if t.else_:
-            self._fill("else")
-            self._enter()
-            self._dispatch(t.else_)
-            self._leave()
-
-    def _Tuple(self, t):
-
-        if not t.nodes:
-            # Empty tuple.
-            self._write("()")
-        else:
-            self._write("(")
-
-            # _write each elements, separated by a comma.
-            for element in t.nodes[:-1]:
-                self._dispatch(element)
-                self._write(", ")
-
-            # Handle the last one without writing comma
-            last_element = t.nodes[-1]
-            self._dispatch(last_element)
-
-            self._write(")")
-            
-    def _UnaryAdd(self, t):
-        self._write("+")
-        self._dispatch(t.expr)
-        
-    def _UnarySub(self, t):
-        self._write("-")
-        self._dispatch(t.expr)        
-
-    def _With(self, t):
-        self._fill('with ')
-        self._dispatch(t.expr)
-        if t.vars:
-            self._write(' as ')
-            self._dispatch(t.vars.name)
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-        self._write('\n')
-        
-    def _int(self, t):
-        self._write(repr(t))
-
-    def __binary_op(self, t, symbol):
-        # Check if parenthesis are needed on left side and then dispatch
-        has_paren = False
-        left_class = str(t.left.__class__)
-        if (left_class in op_precedence.keys() and
-            op_precedence[left_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.left)
-        if has_paren:
-            self._write(')')
-        # Write the appropriate symbol for operator
-        self._write(symbol)
-        # Check if parenthesis are needed on the right side and then dispatch
-        has_paren = False
-        right_class = str(t.right.__class__)
-        if (right_class in op_precedence.keys() and
-            op_precedence[right_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.right)
-        if has_paren:
-            self._write(')')
-
-    def _float(self, t):
-        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
-        # We prefer str here.
-        self._write(str(t))
-
-    def _str(self, t):
-        self._write(repr(t))
-        
-    def _tuple(self, t):
-        self._write(str(t))
-
-    #########################################################################
-    # These are the methods from the _ast modules unparse.
-    #
-    # As our needs to handle more advanced code increase, we may want to
-    # modify some of the methods below so that they work for compiler.ast.
-    #########################################################################
-
-#    # stmt
-#    def _Expr(self, tree):
-#        self._fill()
-#        self._dispatch(tree.value)
-#
-#    def _Import(self, t):
-#        self._fill("import ")
-#        first = True
-#        for a in t.names:
-#            if first:
-#                first = False
-#            else:
-#                self._write(", ")
-#            self._write(a.name)
-#            if a.asname:
-#                self._write(" as "+a.asname)
-#
-##    def _ImportFrom(self, t):
-##        self._fill("from ")
-##        self._write(t.module)
-##        self._write(" import ")
-##        for i, a in enumerate(t.names):
-##            if i == 0:
-##                self._write(", ")
-##            self._write(a.name)
-##            if a.asname:
-##                self._write(" as "+a.asname)
-##        # XXX(jpe) what is level for?
-##
-#
-#    def _Break(self, t):
-#        self._fill("break")
-#
-#    def _Continue(self, t):
-#        self._fill("continue")
-#
-#    def _Delete(self, t):
-#        self._fill("del ")
-#        self._dispatch(t.targets)
-#
-#    def _Assert(self, t):
-#        self._fill("assert ")
-#        self._dispatch(t.test)
-#        if t.msg:
-#            self._write(", ")
-#            self._dispatch(t.msg)
-#
-#    def _Exec(self, t):
-#        self._fill("exec ")
-#        self._dispatch(t.body)
-#        if t.globals:
-#            self._write(" in ")
-#            self._dispatch(t.globals)
-#        if t.locals:
-#            self._write(", ")
-#            self._dispatch(t.locals)
-#
-#    def _Print(self, t):
-#        self._fill("print ")
-#        do_comma = False
-#        if t.dest:
-#            self._write(">>")
-#            self._dispatch(t.dest)
-#            do_comma = True
-#        for e in t.values:
-#            if do_comma:self._write(", ")
-#            else:do_comma=True
-#            self._dispatch(e)
-#        if not t.nl:
-#            self._write(",")
-#
-#    def _Global(self, t):
-#        self._fill("global")
-#        for i, n in enumerate(t.names):
-#            if i != 0:
-#                self._write(",")
-#            self._write(" " + n)
-#
-#    def _Yield(self, t):
-#        self._fill("yield")
-#        if t.value:
-#            self._write(" (")
-#            self._dispatch(t.value)
-#            self._write(")")
-#
-#    def _Raise(self, t):
-#        self._fill('raise ')
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.inst:
-#            self._write(", ")
-#            self._dispatch(t.inst)
-#        if t.tback:
-#            self._write(", ")
-#            self._dispatch(t.tback)
-#
-#
-#    def _TryFinally(self, t):
-#        self._fill("try")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#        self._fill("finally")
-#        self._enter()
-#        self._dispatch(t.finalbody)
-#        self._leave()
-#
-#    def _excepthandler(self, t):
-#        self._fill("except ")
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.name:
-#            self._write(", ")
-#            self._dispatch(t.name)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _ClassDef(self, t):
-#        self._write("\n")
-#        self._fill("class "+t.name)
-#        if t.bases:
-#            self._write("(")
-#            for a in t.bases:
-#                self._dispatch(a)
-#                self._write(", ")
-#            self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _FunctionDef(self, t):
-#        self._write("\n")
-#        for deco in t.decorators:
-#            self._fill("@")
-#            self._dispatch(deco)
-#        self._fill("def "+t.name + "(")
-#        self._dispatch(t.args)
-#        self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _For(self, t):
-#        self._fill("for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    def _While(self, t):
-#        self._fill("while ")
-#        self._dispatch(t.test)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    # expr
-#    def _Str(self, tree):
-#        self._write(repr(tree.s))
-##
-#    def _Repr(self, t):
-#        self._write("`")
-#        self._dispatch(t.value)
-#        self._write("`")
-#
-#    def _Num(self, t):
-#        self._write(repr(t.n))
-#
-#    def _ListComp(self, t):
-#        self._write("[")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write("]")
-#
-#    def _GeneratorExp(self, t):
-#        self._write("(")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write(")")
-#
-#    def _comprehension(self, t):
-#        self._write(" for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        for if_clause in t.ifs:
-#            self._write(" if ")
-#            self._dispatch(if_clause)
-#
-#    def _IfExp(self, t):
-#        self._dispatch(t.body)
-#        self._write(" if ")
-#        self._dispatch(t.test)
-#        if t.orelse:
-#            self._write(" else ")
-#            self._dispatch(t.orelse)
-#
-#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-#    def _UnaryOp(self, t):
-#        self._write(self.unop[t.op.__class__.__name__])
-#        self._write("(")
-#        self._dispatch(t.operand)
-#        self._write(")")
-#
-#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-#                    "FloorDiv":"//", "Pow": "**"}
-#    def _BinOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.left)
-#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-#        self._dispatch(t.right)
-#        self._write(")")
-#
-#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
-#    def _BoolOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.values[0])
-#        for v in t.values[1:]:
-#            self._write(" %s " % self.boolops[t.op.__class__])
-#            self._dispatch(v)
-#        self._write(")")
-#
-#    def _Attribute(self,t):
-#        self._dispatch(t.value)
-#        self._write(".")
-#        self._write(t.attr)
-#
-##    def _Call(self, t):
-##        self._dispatch(t.func)
-##        self._write("(")
-##        comma = False
-##        for e in t.args:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        for e in t.keywords:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        if t.starargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("*")
-##            self._dispatch(t.starargs)
-##        if t.kwargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("**")
-##            self._dispatch(t.kwargs)
-##        self._write(")")
-#
-#    # slice
-#    def _Index(self, t):
-#        self._dispatch(t.value)
-#
-#    def _ExtSlice(self, t):
-#        for i, d in enumerate(t.dims):
-#            if i != 0:
-#                self._write(': ')
-#            self._dispatch(d)
-#
-#    # others
-#    def _arguments(self, t):
-#        first = True
-#        nonDef = len(t.args)-len(t.defaults)
-#        for a in t.args[0:nonDef]:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a)
-#        for a,d in zip(t.args[nonDef:], t.defaults):
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a),
-#            self._write("=")
-#            self._dispatch(d)
-#        if t.vararg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("*"+t.vararg)
-#        if t.kwarg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("**"+t.kwarg)
-#
-##    def _keyword(self, t):
-##        self._write(t.arg)
-##        self._write("=")
-##        self._dispatch(t.value)
-#
-#    def _Lambda(self, t):
-#        self._write("lambda ")
-#        self._dispatch(t.args)
-#        self._write(": ")
-#        self._dispatch(t.body)
-
-
-

diff -r 8b7e6e1c645b5fc076e0aeebd074d9a72b8cc88c -r 7e69492b6a4d54fb151eb1d2550e978570e9849a doc/extensions/numpydocmod/docscrape.py
--- a/doc/extensions/numpydocmod/docscrape.py
+++ /dev/null
@@ -1,500 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-
-import inspect
-import textwrap
-import re
-import pydoc
-from StringIO import StringIO
-from warnings import warn
-
-class Reader(object):
-    """A line-based string reader.
-
-    """
-    def __init__(self, data):
-        """
-        Parameters
-        ----------
-        data : str
-           String with lines separated by '\n'.
-
-        """
-        if isinstance(data,list):
-            self._str = data
-        else:
-            self._str = data.split('\n') # store string as list of lines
-
-        self.reset()
-
-    def __getitem__(self, n):
-        return self._str[n]
-
-    def reset(self):
-        self._l = 0 # current line nr
-
-    def read(self):
-        if not self.eof():
-            out = self[self._l]
-            self._l += 1
-            return out
-        else:
-            return ''
-
-    def seek_next_non_empty_line(self):
-        for l in self[self._l:]:
-            if l.strip():
-                break
-            else:
-                self._l += 1
-
-    def eof(self):
-        return self._l >= len(self._str)
-
-    def read_to_condition(self, condition_func):
-        start = self._l
-        for line in self[start:]:
-            if condition_func(line):
-                return self[start:self._l]
-            self._l += 1
-            if self.eof():
-                return self[start:self._l+1]
-        return []
-
-    def read_to_next_empty_line(self):
-        self.seek_next_non_empty_line()
-        def is_empty(line):
-            return not line.strip()
-        return self.read_to_condition(is_empty)
-
-    def read_to_next_unindented_line(self):
-        def is_unindented(line):
-            return (line.strip() and (len(line.lstrip()) == len(line)))
-        return self.read_to_condition(is_unindented)
-
-    def peek(self,n=0):
-        if self._l + n < len(self._str):
-            return self[self._l + n]
-        else:
-            return ''
-
-    def is_empty(self):
-        return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
-    def __init__(self, docstring, config={}):
-        docstring = textwrap.dedent(docstring).split('\n')
-
-        self._doc = Reader(docstring)
-        self._parsed_data = {
-            'Signature': '',
-            'Summary': [''],
-            'Extended Summary': [],
-            'Parameters': [],
-            'Returns': [],
-            'Raises': [],
-            'Warns': [],
-            'Other Parameters': [],
-            'Attributes': [],
-            'Methods': [],
-            'See Also': [],
-            'Notes': [],
-            'Warnings': [],
-            'References': '',
-            'Examples': '',
-            'index': {}
-            }
-
-        self._parse()
-
-    def __getitem__(self,key):
-        return self._parsed_data[key]
-
-    def __setitem__(self,key,val):
-        if not self._parsed_data.has_key(key):
-            warn("Unknown section %s" % key)
-        else:
-            self._parsed_data[key] = val
-
-    def _is_at_section(self):
-        self._doc.seek_next_non_empty_line()
-
-        if self._doc.eof():
-            return False
-
-        l1 = self._doc.peek().strip()  # e.g. Parameters
-
-        if l1.startswith('.. index::'):
-            return True
-
-        l2 = self._doc.peek(1).strip() #    ---------- or ==========
-        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
-    def _strip(self,doc):
-        i = 0
-        j = 0
-        for i,line in enumerate(doc):
-            if line.strip(): break
-
-        for j,line in enumerate(doc[::-1]):
-            if line.strip(): break
-
-        return doc[i:len(doc)-j]
-
-    def _read_to_next_section(self):
-        section = self._doc.read_to_next_empty_line()
-
-        while not self._is_at_section() and not self._doc.eof():
-            if not self._doc.peek(-1).strip(): # previous line was empty
-                section += ['']
-
-            section += self._doc.read_to_next_empty_line()
-
-        return section
-
-    def _read_sections(self):
-        while not self._doc.eof():
-            data = self._read_to_next_section()
-            name = data[0].strip()
-
-            if name.startswith('..'): # index section
-                yield name, data[1:]
-            elif len(data) < 2:
-                yield StopIteration
-            else:
-                yield name, self._strip(data[2:])
-
-    def _parse_param_list(self,content):
-        r = Reader(content)
-        params = []
-        while not r.eof():
-            header = r.read().strip()
-            if ' : ' in header:
-                arg_name, arg_type = header.split(' : ')[:2]
-            else:
-                arg_name, arg_type = header, ''
-
-            desc = r.read_to_next_unindented_line()
-            desc = dedent_lines(desc)
-
-            params.append((arg_name,arg_type,desc))
-
-        return params
-
-
-    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
-                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
-    def _parse_see_also(self, content):
-        """
-        func_name : Descriptive text
-            continued text
-        another_func_name : Descriptive text
-        func_name1, func_name2, :meth:`func_name`, func_name3
-
-        """
-        items = []
-
-        def parse_item_name(text):
-            """Match ':role:`name`' or 'name'"""
-            m = self._name_rgx.match(text)
-            if m:
-                g = m.groups()
-                if g[1] is None:
-                    return g[3], None
-                else:
-                    return g[2], g[1]
-            raise ValueError("%s is not a item name" % text)
-
-        def push_item(name, rest):
-            if not name:
-                return
-            name, role = parse_item_name(name)
-            items.append((name, list(rest), role))
-            del rest[:]
-
-        current_func = None
-        rest = []
-
-        for line in content:
-            if not line.strip(): continue
-
-            m = self._name_rgx.match(line)
-            if m and line[m.end():].strip().startswith(':'):
-                push_item(current_func, rest)
-                current_func, line = line[:m.end()], line[m.end():]
-                rest = [line.split(':', 1)[1].strip()]
-                if not rest[0]:
-                    rest = []
-            elif not line.startswith(' '):
-                push_item(current_func, rest)
-                current_func = None
-                if ',' in line:
-                    for func in line.split(','):
-                        if func.strip():
-                            push_item(func, [])
-                elif line.strip():
-                    current_func = line
-            elif current_func is not None:
-                rest.append(line.strip())
-        push_item(current_func, rest)
-        return items
-
-    def _parse_index(self, section, content):
-        """
-        .. index: default
-           :refguide: something, else, and more
-
-        """
-        def strip_each_in(lst):
-            return [s.strip() for s in lst]
-
-        out = {}
-        section = section.split('::')
-        if len(section) > 1:
-            out['default'] = strip_each_in(section[1].split(','))[0]
-        for line in content:
-            line = line.split(':')
-            if len(line) > 2:
-                out[line[1]] = strip_each_in(line[2].split(','))
-        return out
-
-    def _parse_summary(self):
-        """Grab signature (if given) and summary"""
-        if self._is_at_section():
-            return
-
-        summary = self._doc.read_to_next_empty_line()
-        summary_str = " ".join([s.strip() for s in summary]).strip()
-        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
-            self['Signature'] = summary_str
-            if not self._is_at_section():
-                self['Summary'] = self._doc.read_to_next_empty_line()
-        else:
-            self['Summary'] = summary
-
-        if not self._is_at_section():
-            self['Extended Summary'] = self._read_to_next_section()
-
-    def _parse(self):
-        self._doc.reset()
-        self._parse_summary()
-
-        for (section,content) in self._read_sections():
-            if not section.startswith('..'):
-                section = ' '.join([s.capitalize() for s in section.split(' ')])
-            if section in ('Parameters', 'Returns', 'Raises', 'Warns',
-                           'Other Parameters', 'Attributes', 'Methods'):
-                self[section] = self._parse_param_list(content)
-            elif section.startswith('.. index::'):
-                self['index'] = self._parse_index(section, content)
-            elif section == 'See Also':
-                self['See Also'] = self._parse_see_also(content)
-            else:
-                self[section] = content
-
-    # string conversion routines
-
-    def _str_header(self, name, symbol='-'):
-        return [name, len(name)*symbol]
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        if self['Signature']:
-            return [self['Signature'].replace('*','\*')] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        if self['Summary']:
-            return self['Summary'] + ['']
-        else:
-            return []
-
-    def _str_extended_summary(self):
-        if self['Extended Summary']:
-            return self['Extended Summary'] + ['']
-        else:
-            return []
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            for param,param_type,desc in self[name]:
-                out += ['%s : %s' % (param, param_type)]
-                out += self._str_indent(desc)
-            out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += self[name]
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        if not self['See Also']: return []
-        out = []
-        out += self._str_header("See Also")
-        last_had_desc = True
-        for func, desc, role in self['See Also']:
-            if role:
-                link = ':%s:`%s`' % (role, func)
-            elif func_role:
-                link = ':%s:`%s`' % (func_role, func)
-            else:
-                link = "`%s`_" % func
-            if desc or last_had_desc:
-                out += ['']
-                out += [link]
-            else:
-                out[-1] += ", %s" % link
-            if desc:
-                out += self._str_indent([' '.join(desc)])
-                last_had_desc = True
-            else:
-                last_had_desc = False
-        out += ['']
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.iteritems():
-            if section == 'default':
-                continue
-            out += ['   :%s: %s' % (section, ', '.join(references))]
-        return out
-
-    def __str__(self, func_role=''):
-        out = []
-        out += self._str_signature()
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Returns', 'Other Parameters',
-                           'Raises', 'Warns'):
-            out += self._str_param_list(param_list)
-        out += self._str_section('Warnings')
-        out += self._str_see_also(func_role)
-        for s in ('Notes','References','Examples'):
-            out += self._str_section(s)
-        for param_list in ('Attributes', 'Methods'):
-            out += self._str_param_list(param_list)
-        out += self._str_index()
-        return '\n'.join(out)
-
-
-def indent(str,indent=4):
-    indent_str = ' '*indent
-    if str is None:
-        return indent_str
-    lines = str.split('\n')
-    return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
-    """Deindent a list of lines maximally"""
-    return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
-    return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
-    def __init__(self, func, role='func', doc=None, config={}):
-        self._f = func
-        self._role = role # e.g. "func" or "meth"
-
-        if doc is None:
-            if func is None:
-                raise ValueError("No function or docstring given")
-            doc = inspect.getdoc(func) or ''
-        NumpyDocString.__init__(self, doc)
-
-        if not self['Signature'] and func is not None:
-            func, func_name = self.get_func()
-            try:
-                # try to read signature
-                argspec = inspect.getargspec(func)
-                argspec = inspect.formatargspec(*argspec)
-                argspec = argspec.replace('*','\*')
-                signature = '%s%s' % (func_name, argspec)
-            except TypeError, e:
-                signature = '%s()' % func_name
-            self['Signature'] = signature
-
-    def get_func(self):
-        func_name = getattr(self._f, '__name__', self.__class__.__name__)
-        if inspect.isclass(self._f):
-            func = getattr(self._f, '__call__', self._f.__init__)
-        else:
-            func = self._f
-        return func, func_name
-
-    def __str__(self):
-        out = ''
-
-        func, func_name = self.get_func()
-        signature = self['Signature'].replace('*', '\*')
-
-        roles = {'func': 'function',
-                 'meth': 'method'}
-
-        if self._role:
-            if not roles.has_key(self._role):
-                print("Warning: invalid role %s" % self._role)
-            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
-                                             func_name)
-
-        out += super(FunctionDoc, self).__str__(func_role=self._role)
-        return out
-
-
-class ClassDoc(NumpyDocString):
-    def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
-                 config={}):
-        if not inspect.isclass(cls) and cls is not None:
-            raise ValueError("Expected a class or None, but got %r" % cls)
-        self._cls = cls
-
-        if modulename and not modulename.endswith('.'):
-            modulename += '.'
-        self._mod = modulename
-
-        if doc is None:
-            if cls is None:
-                raise ValueError("No class or documentation string given")
-            doc = pydoc.getdoc(cls)
-
-        NumpyDocString.__init__(self, doc)
-
-        if config.get('show_class_members', True):
-            if not self['Methods']:
-                self['Methods'] = [(name, '', '')
-                                   for name in sorted(self.methods)]
-            if not self['Attributes']:
-                self['Attributes'] = [(name, '', '')
-                                      for name in sorted(self.properties)]
-
-    @property
-    def methods(self):
-        if self._cls is None:
-            return []
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and callable(func)]
-
-    @property
-    def properties(self):
-        if self._cls is None:
-            return []
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and func is None]

diff -r 8b7e6e1c645b5fc076e0aeebd074d9a72b8cc88c -r 7e69492b6a4d54fb151eb1d2550e978570e9849a doc/extensions/numpydocmod/docscrape_sphinx.py
--- a/doc/extensions/numpydocmod/docscrape_sphinx.py
+++ /dev/null
@@ -1,227 +0,0 @@
-import re, inspect, textwrap, pydoc
-import sphinx
-from docscrape import NumpyDocString, FunctionDoc, ClassDoc
-
-class SphinxDocString(NumpyDocString):
-    def __init__(self, docstring, config={}):
-        self.use_plots = config.get('use_plots', False)
-        NumpyDocString.__init__(self, docstring, config=config)
-
-    # string conversion routines
-    def _str_header(self, name, symbol='`'):
-        return ['.. rubric:: ' + name, '']
-
-    def _str_field_list(self, name):
-        return [':' + name + ':']
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        return ['']
-        if self['Signature']:
-            return ['``%s``' % self['Signature']] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        return self['Summary'] + ['']
-
-    def _str_extended_summary(self):
-        return self['Extended Summary'] + ['']
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_field_list(name)
-            out += ['']
-            for param,param_type,desc in self[name]:
-                out += self._str_indent(['**%s** : %s' % (param.strip(),
-                                                          param_type)])
-                out += ['']
-                out += self._str_indent(desc,8)
-                out += ['']
-        return out
-
-    @property
-    def _obj(self):
-        if hasattr(self, '_cls'):
-            return self._cls
-        elif hasattr(self, '_f'):
-            return self._f
-        return None
-
-    def _str_member_list(self, name):
-        """
-        Generate a member listing, autosummary:: table where possible,
-        and a table where not.
-
-        """
-        out = []
-        if self[name]:
-            out += ['.. rubric:: %s' % name, '']
-            prefix = getattr(self, '_name', '')
-
-            if prefix:
-                prefix = '~%s.' % prefix
-
-            autosum = []
-            others = []
-            for param, param_type, desc in self[name]:
-                param = param.strip()
-                if not self._obj or hasattr(self._obj, param):
-                    autosum += ["   %s%s" % (prefix, param)]
-                else:
-                    others.append((param, param_type, desc))
-
-            if 0:#autosum:
-                out += ['.. autosummary::', '   :toctree:', '']
-                out += autosum
-                
-            if others:
-                maxlen_0 = max([len(x[0]) for x in others])
-                maxlen_1 = max([len(x[1]) for x in others])
-                hdr = "="*maxlen_0 + "  " + "="*maxlen_1 + "  " + "="*10
-                fmt = '%%%ds  %%%ds  ' % (maxlen_0, maxlen_1)
-                n_indent = maxlen_0 + maxlen_1 + 4
-                out += [hdr]
-                for param, param_type, desc in others:
-                    out += [fmt % (param.strip(), param_type)]
-                    out += self._str_indent(desc, n_indent)
-                out += [hdr]
-            out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += ['']
-            content = textwrap.dedent("\n".join(self[name])).split("\n")
-            out += content
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        out = []
-        if self['See Also']:
-            see_also = super(SphinxDocString, self)._str_see_also(func_role)
-            out = ['.. seealso::', '']
-            out += self._str_indent(see_also[2:])
-        return out
-
-    def _str_warnings(self):
-        out = []
-        if self['Warnings']:
-            out = ['.. warning::', '']
-            out += self._str_indent(self['Warnings'])
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        if len(idx) == 0:
-            return out
-
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.iteritems():
-            if section == 'default':
-                continue
-            elif section == 'refguide':
-                out += ['   single: %s' % (', '.join(references))]
-            else:
-                out += ['   %s: %s' % (section, ','.join(references))]
-        return out
-
-    def _str_references(self):
-        out = []
-        if self['References']:
-            out += self._str_header('References')
-            if isinstance(self['References'], str):
-                self['References'] = [self['References']]
-            out.extend(self['References'])
-            out += ['']
-            # Latex collects all references to a separate bibliography,
-            # so we need to insert links to it
-            if sphinx.__version__ >= "0.6":
-                out += ['.. only:: latex','']
-            else:
-                out += ['.. latexonly::','']
-            items = []
-            for line in self['References']:
-                m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
-                if m:
-                    items.append(m.group(1))
-            out += ['   ' + ", ".join(["[%s]_" % item for item in items]), '']
-        return out
-
-    def _str_examples(self):
-        examples_str = "\n".join(self['Examples'])
-
-        if (self.use_plots and 'import matplotlib' in examples_str
-                and 'plot::' not in examples_str):
-            out = []
-            out += self._str_header('Examples')
-            out += ['.. plot::', '']
-            out += self._str_indent(self['Examples'])
-            out += ['']
-            return out
-        else:
-            return self._str_section('Examples')
-
-    def __str__(self, indent=0, func_role="obj"):
-        out = []
-        out += self._str_signature()
-        out += self._str_index() + ['']
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Returns', 'Other Parameters',
-                           'Raises', 'Warns'):
-            out += self._str_param_list(param_list)
-        out += self._str_warnings()
-        out += self._str_see_also(func_role)
-        out += self._str_section('Notes')
-        out += self._str_references()
-        out += self._str_examples()
-        for param_list in ('Attributes', 'Methods'):
-            out += self._str_member_list(param_list)
-        out = self._str_indent(out,indent)
-        return '\n'.join(out)
-
-class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
-    def __init__(self, obj, doc=None, config={}):
-        self.use_plots = config.get('use_plots', False)
-        FunctionDoc.__init__(self, obj, doc=doc, config=config)
-
-class SphinxClassDoc(SphinxDocString, ClassDoc):
-    def __init__(self, obj, doc=None, func_doc=None, config={}):
-        self.use_plots = config.get('use_plots', False)
-        ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
-
-class SphinxObjDoc(SphinxDocString):
-    def __init__(self, obj, doc=None, config={}):
-        self._f = obj
-        SphinxDocString.__init__(self, doc, config=config)
-
-def get_doc_object(obj, what=None, doc=None, config={}):
-    if what is None:
-        if inspect.isclass(obj):
-            what = 'class'
-        elif inspect.ismodule(obj):
-            what = 'module'
-        elif callable(obj):
-            what = 'function'
-        else:
-            what = 'object'
-    if what == 'class':
-        return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
-                              config=config)
-    elif what in ('function', 'method'):
-        return SphinxFunctionDoc(obj, doc=doc, config=config)
-    else:
-        if doc is None:
-            doc = pydoc.getdoc(obj)
-        return SphinxObjDoc(obj, doc, config=config)

diff -r 8b7e6e1c645b5fc076e0aeebd074d9a72b8cc88c -r 7e69492b6a4d54fb151eb1d2550e978570e9849a doc/extensions/numpydocmod/numpydoc.py
--- a/doc/extensions/numpydocmod/numpydoc.py
+++ /dev/null
@@ -1,173 +0,0 @@
-"""
-========
-numpydoc
-========
-
-Sphinx extension that handles docstrings in the Numpy standard format. [1]
-
-It will:
-
-- Convert Parameters etc. sections to field lists.
-- Convert See Also section to a See also entry.
-- Renumber references.
-- Extract the signature from the docstring, if it can't be determined otherwise.
-
-.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
-
-"""
-
-import os, re, pydoc
-from docscrape_sphinx import get_doc_object, SphinxDocString
-from sphinx.util.compat import Directive
-import inspect
-
-def mangle_docstrings(app, what, name, obj, options, lines,
-                      reference_offset=[0]):
-
-    cfg = dict(use_plots=app.config.numpydoc_use_plots,
-               show_class_members=app.config.numpydoc_show_class_members)
-
-    if what == 'module':
-        # Strip top title
-        title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
-                              re.I|re.S)
-        lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
-    else:
-        doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
-        lines[:] = unicode(doc).split(u"\n")
-
-    if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
-           obj.__name__:
-        if hasattr(obj, '__module__'):
-            v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
-        else:
-            v = dict(full_name=obj.__name__)
-        lines += [u'', u'.. htmlonly::', '']
-        lines += [u'    %s' % x for x in
-                  (app.config.numpydoc_edit_link % v).split("\n")]
-
-    # replace reference numbers so that there are no duplicates
-    references = []
-    for line in lines:
-        line = line.strip()
-        m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
-        if m:
-            references.append(m.group(1))
-
-    # start renaming from the longest string, to avoid overwriting parts
-    references.sort(key=lambda x: -len(x))
-    if references:
-        for i, line in enumerate(lines):
-            for r in references:
-                if re.match(ur'^\d+$', r):
-                    new_r = u"R%d" % (reference_offset[0] + int(r))
-                else:
-                    new_r = u"%s%d" % (r, reference_offset[0])
-                lines[i] = lines[i].replace(u'[%s]_' % r,
-                                            u'[%s]_' % new_r)
-                lines[i] = lines[i].replace(u'.. [%s]' % r,
-                                            u'.. [%s]' % new_r)
-
-    reference_offset[0] += len(references)
-
-def mangle_signature(app, what, name, obj, options, sig, retann):
-    # Do not try to inspect classes that don't define `__init__`
-    if (inspect.isclass(obj) and
-        (not hasattr(obj, '__init__') or
-        'initializes x; see ' in pydoc.getdoc(obj.__init__))):
-        return '', ''
-
-    if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
-    if not hasattr(obj, '__doc__'): return
-
-    doc = SphinxDocString(pydoc.getdoc(obj))
-    if doc['Signature']:
-        sig = re.sub(u"^[^(]*", u"", doc['Signature'])
-        return sig, u''
-
-def setup(app, get_doc_object_=get_doc_object):
-    global get_doc_object
-    get_doc_object = get_doc_object_
-
-    app.connect('autodoc-process-docstring', mangle_docstrings)
-    app.connect('autodoc-process-signature', mangle_signature)
-    app.add_config_value('numpydoc_edit_link', None, False)
-    app.add_config_value('numpydoc_use_plots', None, False)
-    app.add_config_value('numpydoc_show_class_members', True, True)
-
-    # Extra mangling domains
-    app.add_domain(NumpyPythonDomain)
-    app.add_domain(NumpyCDomain)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-
-#------------------------------------------------------------------------------
-# Docstring-mangling domains
-#------------------------------------------------------------------------------
-
-from docutils.statemachine import ViewList
-from sphinx.domains.c import CDomain
-from sphinx.domains.python import PythonDomain
-
-class ManglingDomainBase(object):
-    directive_mangling_map = {}
-
-    def __init__(self, *a, **kw):
-        super(ManglingDomainBase, self).__init__(*a, **kw)
-        self.wrap_mangling_directives()
-
-    def wrap_mangling_directives(self):
-        for name, objtype in self.directive_mangling_map.items():
-            self.directives[name] = wrap_mangling_directive(
-                self.directives[name], objtype)
-
-class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
-    name = 'np'
-    directive_mangling_map = {
-        'function': 'function',
-        'class': 'class',
-        'exception': 'class',
-        'method': 'function',
-        'classmethod': 'function',
-        'staticmethod': 'function',
-        'attribute': 'attribute',
-    }
-
-class NumpyCDomain(ManglingDomainBase, CDomain):
-    name = 'np-c'
-    directive_mangling_map = {
-        'function': 'function',
-        'member': 'attribute',
-        'macro': 'function',
-        'type': 'class',
-        'var': 'object',
-    }
-
-def wrap_mangling_directive(base_directive, objtype):
-    class directive(base_directive):
-        def run(self):
-            env = self.state.document.settings.env
-
-            name = None
-            if self.arguments:
-                m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
-                name = m.group(2).strip()
-
-            if not name:
-                name = self.arguments[0]
-
-            lines = list(self.content)
-            mangle_docstrings(env.app, objtype, name, None, None, lines)
-            self.content = ViewList(lines, self.content.parent)
-
-            return base_directive.run(self)
-
-    return directive
-

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/345bc90272fd/
Changeset:   345bc90272fd
Branch:      yt
User:        jzuhone
Date:        2016-03-02 20:14:04+00:00
Summary:     Merge
Affected #:  44 files

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -30,6 +30,7 @@
 yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/basic_octree.c
 yt/utilities/lib/bitarray.c
+yt/utilities/lib/bounding_volume_hierarchy.c
 yt/utilities/lib/contour_finding.c
 yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/element_mappings.c

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 clean.sh
--- a/clean.sh
+++ b/clean.sh
@@ -1,4 +1,1 @@
-find . -name "*.so" -exec rm -v {} \;
-find . -name "*.pyc" -exec rm -v {} \;
-find . -name "__config__.py" -exec rm -v {} \;
-rm -rvf build dist
+hg --config extensions.purge= purge --all yt

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -453,12 +453,19 @@
    @requires_ds(my_ds)
    def test_my_ds():
        ds = data_dir_load(my_ds)
+
        def create_image(filename_prefix):
            plt.plot([1, 2], [1, 2])
            plt.savefig(filename_prefix)
        test = GenericImageTest(ds, create_image, 12)
+
+       # this ensures the test has a unique key in the
+       # answer test storage file
+       test.prefix = "my_unique_name"
+
        # this ensures a nice test name in nose's output
        test_my_ds.__description__ = test.description
+
        yield test_my_ds
 
 Another good example of an image comparison test is the

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -434,8 +434,8 @@
 positions with time. For analysis and visualization, it is often useful to turn 
 these displacements on or off, and to be able to scale them arbitrarily to 
 emphasize certain features of the solution. To allow this, if ``yt`` detects 
-displacement fields in an Exodus II dataset (using the convention that they will
- be named ``disp_x``, ``disp_y``, etc...), it will add optionally add these to 
+displacement fields in an Exodus II dataset (using the convention that they will 
+be named ``disp_x``, ``disp_y``, etc...), it will optionally add these to 
 the mesh vertex positions for the purposes of visualization. Displacement fields 
 can be controlled when a dataset is loaded by passing in an optional dictionary 
 to the ``yt.load`` command. This feature is turned off by default, meaning that 

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -14,7 +14,7 @@
 
 .. code-block:: bash
 
-    conda install -c http://use.yt/with_conda/ yt
+    conda install -c http://use.yt/with_conda/ yt=3.3_dev
 
 If you want to install from source, you can use the ``get_yt.sh`` script.
 Be sure to set the INST_YT_SOURCE and INST_UNSTRUCTURED flags to 1 at the 

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,6 +5,7 @@
 from setuptools import setup, find_packages
 from setuptools.extension import Extension
 from setuptools.command.build_ext import build_ext as _build_ext
+from setuptools.command.sdist import sdist as _sdist
 from setuptools.command.build_py import build_py as _build_py
 from setupext import \
     check_for_openmp, check_for_pyembree, read_embree_location, \
@@ -122,6 +123,11 @@
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"]),
+    Extension("yt.utilities.lib.bounding_volume_hierarchy",
+              ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+              extra_compile_args=omp_args,
+              extra_link_args=omp_args,
+              libraries=["m"], depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
@@ -322,6 +328,16 @@
         import numpy
         self.include_dirs.append(numpy.get_include())
 
+class sdist(_sdist):
+    # subclass setuptools source distribution builder to ensure cython
+    # generated C files are included in source distribution.
+    # See http://stackoverflow.com/a/18418524/1382869
+    def run(self):
+        # Make sure the compiled Cython files in the distribution are up-to-date
+        from Cython.Build import cythonize
+        cythonize(cython_extensions)
+        _sdist.run(self)
+
 setup(
     name="yt",
     version=VERSION,
@@ -352,15 +368,17 @@
     packages=find_packages(),
     setup_requires=[
         'numpy',
-        'cython>=0.22'
+        'cython>=0.22',
     ],
     install_requires=[
         # 'matplotlib',  # messes up nosetests will be fixed in future PRs
+        'setuptools>=18.0',
         'sympy',
         'numpy',
         'IPython',
+        'cython',
     ],
-    cmdclass={'build_ext': build_ext, 'build_py': build_py},
+    cmdclass={'sdist': sdist, 'build_ext': build_ext, 'build_py': build_py},
     author="The yt project",
     author_email="yt-dev at lists.spacepope.org",
     url="http://yt-project.org/",

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -178,3 +178,6 @@
 
 from yt.units.unit_systems import UnitSystem
 from yt.units.unit_object import unit_system_registry
+
+from yt.analysis_modules.list_modules import \
+    amods

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -13,7 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.data_point_utilities import FindBindingEnergy
+from yt.utilities.lib.misc_utilities import \
+    gravitational_binding_energy
 from yt.utilities.operator_registry import \
     OperatorRegistry
 from yt.utilities.physical_constants import \
@@ -64,11 +65,12 @@
              (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
 
     potential = clump.data.ds.quan(G *
-        FindBindingEnergy(clump["gas", "cell_mass"].in_cgs(),
-                          clump["index", "x"].in_cgs(),
-                          clump["index", "y"].in_cgs(),
-                          clump["index", "z"].in_cgs(),
-                          truncate, (kinetic / G).in_cgs()),
+        gravitational_binding_energy(
+            clump["gas", "cell_mass"].in_cgs(),
+            clump["index", "x"].in_cgs(),
+            clump["index", "y"].in_cgs(),
+            clump["index", "z"].in_cgs(),
+            truncate, (kinetic / G).in_cgs()),
         kinetic.in_cgs().units)
     
     if truncate and potential >= kinetic:
@@ -76,7 +78,7 @@
 
     if use_particles:
         potential += clump.data.ds.quan(G *
-            FindBindingEnergy(
+            gravitational_binding_energy(
                 clump["all", "particle_mass"].in_cgs(),
                 clump["all", "particle_position_x"].in_cgs(),
                 clump["all", "particle_position_y"].in_cgs(),

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -39,7 +39,7 @@
     storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoreddatasets = '500',
-    skip_dataset_cache = 'False',
+    skip_dataset_cache = 'True',
     loadfieldplugins = 'True',
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -702,11 +702,11 @@
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
         # We allocate number of zones, not number of octs
-        op = cls(self.ActiveDimensions.prod(), kernel_name)
+        op = cls(self.ActiveDimensions, kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
-        return vals.reshape(self.ActiveDimensions, order="C")
+        return vals.copy(order="C")
 
     def write_to_gdf(self, gdf_path, fields, nprocs=1, field_units=None,
                      **kwargs):

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1056,6 +1056,7 @@
                                    "of lower dimensionality (%u vs %u)" %
                                     (data_source._dimensionality, self._dimensionality))
             self.field_parameters.update(data_source.field_parameters)
+        self.quantities = DerivedQuantityCollection(self)
 
     @property
     def selector(self):
@@ -1465,7 +1466,6 @@
         self._set_center(center)
         self.coords = None
         self._grids = None
-        self.quantities = DerivedQuantityCollection(self)
 
     def cut_region(self, field_cuts, field_parameters=None):
         """

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -327,12 +327,13 @@
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
         # We allocate number of zones, not number of octs
-        op = cls(self.ActiveDimensions.prod(), kernel_name)
+        # Everything inside this is fortran ordered, so we reverse it here.
+        op = cls(tuple(self.ActiveDimensions)[::-1], kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
         if vals is None: return
-        return vals.reshape(self.ActiveDimensions, order="C")
+        return vals.transpose() # Fortran-ordered, so transpose.
 
     def select_blocks(self, selector):
         mask = self._get_selector_mask(selector)

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -69,7 +69,11 @@
     _con_args = ('p',)
     def __init__(self, p, ds=None, field_parameters=None, data_source=None):
         super(YTPoint, self).__init__(ds, field_parameters, data_source)
-        self.p = p
+        if isinstance(p, YTArray):
+            # we pass p through ds.arr to ensure code units are attached
+            self.p = self.ds.arr(p)
+        else:
+            self.p = self.ds.arr(p, 'code_length')
 
 class YTOrthoRay(YTSelectionContainer1D):
     """

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -442,7 +442,9 @@
         if "all" not in self.particle_types:
             mylog.debug("Creating Particle Union 'all'")
             pu = ParticleUnion("all", list(self.particle_types_raw))
-            self.add_particle_union(pu)
+            nfields = self.add_particle_union(pu)
+            if nfields == 0:
+                mylog.debug("zero common fields: skipping particle union 'all'")
         self.field_info.setup_extra_union_fields()
         mylog.debug("Loading field plugins.")
         self.field_info.load_all_plugins()
@@ -494,9 +496,17 @@
     def add_particle_union(self, union):
         # No string lookups here, we need an actual union.
         f = self.particle_fields_by_type
-        fields = set_intersection([f[s] for s in union
-                                   if s in self.particle_types_raw
-                                   and len(f[s]) > 0])
+
+        # find fields common to all particle types in the union
+        fields = set_intersection(
+            [f[s] for s in union if s in self.particle_types_raw]
+        )
+
+        if len(fields) == 0:
+            # don't create this union if no fields are common to all
+            # particle types
+            return len(fields)
+
         for field in fields:
             units = set([])
             for s in union:
@@ -515,6 +525,7 @@
         # ...if we can't find them, we set them up as defaults.
         new_fields = self._setup_particle_types([union.name])
         self.field_info.find_dependencies(new_fields)
+        return len(new_fields)
 
     def add_particle_filter(self, filter):
         # This requires an index
@@ -995,7 +1006,8 @@
         deps, _ = self.field_info.check_derived_fields([name])
         self.field_dependencies.update(deps)
 
-    def add_deposited_particle_field(self, deposit_field, method, kernel_name='cubic'):
+    def add_deposited_particle_field(self, deposit_field, method, kernel_name='cubic',
+                                     weight_field='particle_mass'):
         """Add a new deposited particle field
 
         Creates a new deposited field based on the particle *deposit_field*.
@@ -1010,13 +1022,15 @@
         method : string
            This is the "method name" which will be looked up in the
            `particle_deposit` namespace as `methodname_deposit`.  Current
-           methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
-           `weighted_mean`, `mesh_id`, and `nearest`.
+           methods include `simple_smooth`, `sum`, `std`, `cic`, `weighted_mean`,
+           `mesh_id`, and `nearest`.
         kernel_name : string, default 'cubic'
            This is the name of the smoothing kernel to use. It is only used for
            the `simple_smooth` method and is otherwise ignored. Current
            supported kernel names include `cubic`, `quartic`, `quintic`,
            `wendland2`, `wendland4`, and `wendland6`.
+        weight_field : string, default 'particle_mass'
+           Weighting field name for deposition method `weighted_mean`.
 
         Returns
         -------
@@ -1028,38 +1042,45 @@
             ptype, deposit_field = deposit_field[0], deposit_field[1]
         else:
             raise RuntimeError
+
         units = self.field_info[ptype, deposit_field].units
+        take_log = self.field_info[ptype, deposit_field].take_log
+        name_map = {"sum": "sum", "std":"std", "cic": "cic", "weighted_mean": "avg",
+                    "nearest": "nn", "simple_smooth": "ss", "count": "count"}
+        field_name = "%s_" + name_map[method] + "_%s"
+        field_name = field_name % (ptype, deposit_field.replace('particle_', ''))
+
+        if method == "count":
+            field_name = "%s_count" % ptype
+            if ("deposit", field_name) in self.field_info:
+                mylog.warning("The deposited field %s already exists" % field_name)
+                return ("deposit", field_name)
+            else:
+                units = "dimensionless"
+                take_log = False
 
         def _deposit_field(field, data):
             """
-            Create a grid field for particle wuantities weighted by particle
-            mass, using cloud-in-cell deposition.
+            Create a grid field for particle quantities using given method.
             """
             pos = data[ptype, "particle_position"]
-            # get back into density
-            if method != 'count':
-                pden = data[ptype, "particle_mass"]
-                top = data.deposit(pos, [data[(ptype, deposit_field)]*pden],
-                                   method=method, kernel_name=kernel_name)
-                bottom = data.deposit(pos, [pden], method=method,
-                                      kernel_name=kernel_name)
-                top[bottom == 0] = 0.0
-                bnz = bottom.nonzero()
-                top[bnz] /= bottom[bnz]
-                d = data.ds.arr(top, input_units=units)
+            if method == 'weighted_mean':
+                d = data.ds.arr(data.deposit(pos, [data[ptype, deposit_field],
+                                                   data[ptype, weight_field]],
+                                             method=method, kernel_name=kernel_name),
+                                             input_units=units)
+                d[np.isnan(d)] = 0.0
             else:
                 d = data.ds.arr(data.deposit(pos, [data[ptype, deposit_field]],
-                                             method=method,
-                                             kernel_name=kernel_name))
+                                             method=method, kernel_name=kernel_name),
+                                             input_units=units)
             return d
-        name_map = {"cic": "cic", "sum": "nn", "count": "count"}
-        field_name = "%s_" + name_map[method] + "_%s"
-        field_name = field_name % (ptype, deposit_field.replace('particle_', ''))
+
         self.add_field(
             ("deposit", field_name),
             function=_deposit_field,
             units=units,
-            take_log=False,
+            take_log=take_log,
             validators=[ValidateSpatial()])
         return ("deposit", field_name)
 

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -13,109 +13,109 @@
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs, fields = ("density",
                 "velocity_x", "velocity_y", "velocity_z"))
-        sp = ds.sphere("c", (0.25, 'unitary'))
-        mi, ma = sp.quantities["Extrema"]("density")
-        yield assert_equal, mi, np.nanmin(sp["density"])
-        yield assert_equal, ma, np.nanmax(sp["density"])
-        dd = ds.all_data()
-        mi, ma = dd.quantities["Extrema"]("density")
-        yield assert_equal, mi, np.nanmin(dd["density"])
-        yield assert_equal, ma, np.nanmax(dd["density"])
-        sp = ds.sphere("max", (0.25, 'unitary'))
-        yield assert_equal, np.any(np.isnan(sp["radial_velocity"])), False
-        mi, ma = dd.quantities["Extrema"]("radial_velocity")
-        yield assert_equal, mi, np.nanmin(dd["radial_velocity"])
-        yield assert_equal, ma, np.nanmax(dd["radial_velocity"])
+        for sp in [ds.sphere("c", (0.25, 'unitary')), ds.r[0.5,:,:]]:
+            mi, ma = sp.quantities["Extrema"]("density")
+            yield assert_equal, mi, np.nanmin(sp["density"])
+            yield assert_equal, ma, np.nanmax(sp["density"])
+            dd = ds.all_data()
+            mi, ma = dd.quantities["Extrema"]("density")
+            yield assert_equal, mi, np.nanmin(dd["density"])
+            yield assert_equal, ma, np.nanmax(dd["density"])
+            sp = ds.sphere("max", (0.25, 'unitary'))
+            yield assert_equal, np.any(np.isnan(sp["radial_velocity"])), False
+            mi, ma = dd.quantities["Extrema"]("radial_velocity")
+            yield assert_equal, mi, np.nanmin(dd["radial_velocity"])
+            yield assert_equal, ma, np.nanmax(dd["radial_velocity"])
 
 def test_average():
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs, fields = ("density",))
-        ad = ds.all_data()
+        for ad in [ds.all_data(), ds.r[0.5, :, :]]:
         
-        my_mean = ad.quantities["WeightedAverageQuantity"]("density", "ones")
-        yield assert_rel_equal, my_mean, ad["density"].mean(), 12
+            my_mean = ad.quantities["WeightedAverageQuantity"]("density", "ones")
+            yield assert_rel_equal, my_mean, ad["density"].mean(), 12
 
-        my_mean = ad.quantities["WeightedAverageQuantity"]("density", "cell_mass")
-        a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum()
-        yield assert_rel_equal, my_mean, a_mean, 12
+            my_mean = ad.quantities["WeightedAverageQuantity"]("density", "cell_mass")
+            a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum()
+            yield assert_rel_equal, my_mean, a_mean, 12
 
 def test_variance():
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", ))
-        ad = ds.all_data()
+        for ad in [ds.all_data(), ds.r[0.5, :, :]]:
         
-        my_std, my_mean = ad.quantities["WeightedVariance"]("density", "ones")
-        yield assert_rel_equal, my_mean, ad["density"].mean(), 12
-        yield assert_rel_equal, my_std, ad["density"].std(), 12
+            my_std, my_mean = ad.quantities["WeightedVariance"]("density", "ones")
+            yield assert_rel_equal, my_mean, ad["density"].mean(), 12
+            yield assert_rel_equal, my_std, ad["density"].std(), 12
 
-        my_std, my_mean = ad.quantities["WeightedVariance"]("density", "cell_mass")        
-        a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum()
-        yield assert_rel_equal, my_mean, a_mean, 12
-        a_std = np.sqrt((ad["cell_mass"] * (ad["density"] - a_mean)**2).sum() / 
-                        ad["cell_mass"].sum())
-        yield assert_rel_equal, my_std, a_std, 12
+            my_std, my_mean = ad.quantities["WeightedVariance"]("density", "cell_mass")        
+            a_mean = (ad["density"] * ad["cell_mass"]).sum() / ad["cell_mass"].sum()
+            yield assert_rel_equal, my_mean, a_mean, 12
+            a_std = np.sqrt((ad["cell_mass"] * (ad["density"] - a_mean)**2).sum() / 
+                            ad["cell_mass"].sum())
+            yield assert_rel_equal, my_std, a_std, 12
 
 def test_max_location():
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", ))
-        ad = ds.all_data()
+        for ad in [ds.all_data(), ds.r[0.5, :, :]]:
 
-        mv, x, y, z = ad.quantities.max_location(("gas", "density"))
+            mv, x, y, z = ad.quantities.max_location(("gas", "density"))
 
-        yield assert_equal, mv, ad["density"].max()
+            yield assert_equal, mv, ad["density"].max()
 
-        mi = np.argmax(ad["density"])
+            mi = np.argmax(ad["density"])
 
-        yield assert_equal, ad["x"][mi], x
-        yield assert_equal, ad["y"][mi], y
-        yield assert_equal, ad["z"][mi], z
+            yield assert_equal, ad["x"][mi], x
+            yield assert_equal, ad["y"][mi], y
+            yield assert_equal, ad["z"][mi], z
 
 def test_min_location():
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", ))
-        ad = ds.all_data()
+        for ad in [ds.all_data(), ds.r[0.5, :, :]]:
 
-        mv, x, y, z = ad.quantities.min_location(("gas", "density"))
+            mv, x, y, z = ad.quantities.min_location(("gas", "density"))
 
-        yield assert_equal, mv, ad["density"].min()
+            yield assert_equal, mv, ad["density"].min()
 
-        mi = np.argmin(ad["density"])
+            mi = np.argmin(ad["density"])
 
-        yield assert_equal, ad["x"][mi], x
-        yield assert_equal, ad["y"][mi], y
-        yield assert_equal, ad["z"][mi], z
+            yield assert_equal, ad["x"][mi], x
+            yield assert_equal, ad["y"][mi], y
+            yield assert_equal, ad["z"][mi], z
 
 def test_sample_at_min_field_values():
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs,
             fields = ("density", "temperature", "velocity_x"))
-        ad = ds.all_data()
+        for ad in [ds.all_data(), ds.r[0.5, :, :]]:
 
-        mv, temp, vm = ad.quantities.sample_at_min_field_values(
-            "density", ["temperature", "velocity_x"])
+            mv, temp, vm = ad.quantities.sample_at_min_field_values(
+                "density", ["temperature", "velocity_x"])
 
-        yield assert_equal, mv, ad["density"].min()
+            yield assert_equal, mv, ad["density"].min()
 
-        mi = np.argmin(ad["density"])
+            mi = np.argmin(ad["density"])
 
-        yield assert_equal, ad["temperature"][mi], temp
-        yield assert_equal, ad["velocity_x"][mi], vm
+            yield assert_equal, ad["temperature"][mi], temp
+            yield assert_equal, ad["velocity_x"][mi], vm
 
 def test_sample_at_max_field_values():
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs,
             fields = ("density", "temperature", "velocity_x"))
-        ad = ds.all_data()
+        for ad in [ds.all_data(), ds.r[0.5, :, :]]:
 
-        mv, temp, vm = ad.quantities.sample_at_max_field_values(
-            "density", ["temperature", "velocity_x"])
+            mv, temp, vm = ad.quantities.sample_at_max_field_values(
+                "density", ["temperature", "velocity_x"])
 
-        yield assert_equal, mv, ad["density"].max()
+            yield assert_equal, mv, ad["density"].max()
 
-        mi = np.argmax(ad["density"])
+            mi = np.argmax(ad["density"])
 
-        yield assert_equal, ad["temperature"][mi], temp
-        yield assert_equal, ad["velocity_x"][mi], vm
+            yield assert_equal, ad["temperature"][mi], temp
+            yield assert_equal, ad["velocity_x"][mi], vm
 
 if __name__ == "__main__":
     for i in test_extrema():

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/data_objects/tests/test_points.py
--- a/yt/data_objects/tests/test_points.py
+++ b/yt/data_objects/tests/test_points.py
@@ -10,6 +10,17 @@
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+def test_point_creation():
+    ds = fake_random_ds(16)
+    p1 = ds.point(ds.domain_center)
+    p2 = ds.point([0.5, 0.5, 0.5])
+    p3 = ds.point([0.5, 0.5, 0.5]*yt.units.cm)
+
+    # ensure all three points are really at the same position
+    for fname in 'xyz':
+        assert_equal(p1[fname], p2[fname])
+        assert_equal(p1[fname], p3[fname])
+
 def test_domain_point():
     nparticles = 3
     ds = fake_random_ds(16, particles=nparticles)

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/extensions/__init__.py
--- /dev/null
+++ b/yt/extensions/__init__.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+"""
+    yt.extensions
+    ~~~~~~~~~~~~~
+
+    Redirect imports for extensions.  This module basically makes it possible
+    for us to transition from ytext.foo to ytext_foo without having to
+    force all extensions to upgrade at the same time.
+
+    When a user does ``from yt.extensions.foo import bar`` it will attempt to
+    import ``from yt_foo import bar`` first and when that fails it will
+    try to import ``from ytext.foo import bar``.
+
+    We're switching from namespace packages because it was just too painful for
+    everybody involved.
+
+    :copyright: (c) 2015 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+
+# This source code is originally from flask, in the flask/ext/__init__.py file.
+
+
+def setup():
+    from ..exthook import ExtensionImporter
+    importer = ExtensionImporter(['yt_%s', 'ytext.%s'], __name__)
+    importer.install()
+
+
+setup()
+del setup

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/exthook.py
--- /dev/null
+++ b/yt/exthook.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+"""
+    yt.exthook
+    ~~~~~~~~~~
+
+    Redirect imports for extensions.  This module basically makes it possible
+    for us to transition from ytext.foo to yt_foo without having to
+    force all extensions to upgrade at the same time.
+
+    When a user does ``from yt.extensions.foo import bar`` it will attempt to
+    import ``from yt_foo import bar`` first and when that fails it will
+    try to import ``from ytext.foo import bar``.
+
+    We're switching from namespace packages because it was just too painful for
+    everybody involved.
+
+    This is used by `yt.extensions`.
+
+    :copyright: (c) 2015 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+# This source code was originally in flask/exthook.py
+import sys
+import os
+from .extern.six import reraise
+
+
+class ExtensionImporter(object):
+    """This importer redirects imports from this submodule to other locations.
+    This makes it possible to transition from the old flaskext.name to the
+    newer flask_name without people having a hard time.
+    """
+
+    def __init__(self, module_choices, wrapper_module):
+        self.module_choices = module_choices
+        self.wrapper_module = wrapper_module
+        self.prefix = wrapper_module + '.'
+        self.prefix_cutoff = wrapper_module.count('.') + 1
+
+    def __eq__(self, other):
+        return self.__class__.__module__ == other.__class__.__module__ and \
+               self.__class__.__name__ == other.__class__.__name__ and \
+               self.wrapper_module == other.wrapper_module and \
+               self.module_choices == other.module_choices
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def install(self):
+        sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
+
+    def find_module(self, fullname, path=None):
+        if fullname.startswith(self.prefix):
+            return self
+
+    def load_module(self, fullname):
+        if fullname in sys.modules:
+            return sys.modules[fullname]
+        modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
+        for path in self.module_choices:
+            realname = path % modname
+            try:
+                __import__(realname)
+            except ImportError:
+                exc_type, exc_value, tb = sys.exc_info()
+                # since we only establish the entry in sys.modules at the
+                # very this seems to be redundant, but if recursive imports
+                # happen we will call into the move import a second time.
+                # On the second invocation we still don't have an entry for
+                # fullname in sys.modules, but we will end up with the same
+                # fake module name and that import will succeed since this
+                # one already has a temporary entry in the modules dict.
+                # Since this one "succeeded" temporarily that second
+                # invocation now will have created a fullname entry in
+                # sys.modules which we have to kill.
+                sys.modules.pop(fullname, None)
+
+                # If it's an important traceback we reraise it, otherwise
+                # we swallow it and try the next choice.  The skipped frame
+                # is the one from __import__ above which we don't care about
+                if self.is_important_traceback(realname, tb):
+                    reraise(exc_type, exc_value, tb.tb_next)
+                continue
+            module = sys.modules[fullname] = sys.modules[realname]
+            if '.' not in modname:
+                setattr(sys.modules[self.wrapper_module], modname, module)
+            return module
+        raise ImportError('No module named %s' % fullname)
+
+    def is_important_traceback(self, important_module, tb):
+        """Walks a traceback's frames and checks if any of the frames
+        originated in the given important module.  If that is the case then we
+        were able to import the module itself but apparently something went
+        wrong when the module was imported.  (Eg: import of an import failed).
+        """
+        while tb is not None:
+            if self.is_important_frame(important_module, tb):
+                return True
+            tb = tb.tb_next
+        return False
+
+    def is_important_frame(self, important_module, tb):
+        """Checks a single frame if it's important."""
+        g = tb.tb_frame.f_globals
+        if '__name__' not in g:
+            return False
+
+        module_name = g['__name__']
+
+        # Python 2.7 Behavior.  Modules are cleaned up late so the
+        # name shows up properly here.  Success!
+        if module_name == important_module:
+            return True
+
+        # Some python versions will clean up modules so early that the
+        # module name at that point is no longer set.  Try guessing from
+        # the filename then.
+        filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
+        test_string = os.path.sep + important_module.replace('.', os.path.sep)
+        return test_string + '.py' in filename or \
+               test_string + os.path.sep + '__init__.py' in filename

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -87,7 +87,7 @@
                  take_log=True, validators=None,
                  particle_type=False, vector_field=False, display_field=True,
                  not_in_all=False, display_name=None, output_units=None,
-                 dimensions=None):
+                 dimensions=None, ds=None):
         self.name = name
         self.take_log = take_log
         self.display_name = display_name
@@ -95,6 +95,7 @@
         self.display_field = display_field
         self.particle_type = particle_type
         self.vector_field = vector_field
+        self.ds = ds
 
         self._function = function
 
@@ -142,11 +143,11 @@
         return dd
 
     def get_units(self):
-        u = Unit(self.units)
+        u = Unit(self.units, registry=self.ds.unit_registry)
         return u.latex_representation()
 
     def get_projected_units(self):
-        u = Unit(self.units)*Unit('cm')
+        u = Unit(self.units, registry=self.ds.unit_registry)*Unit('cm')
         return u.latex_representation()
 
     def check_available(self, data):
@@ -220,7 +221,7 @@
         if projected:
             raise NotImplementedError
         else:
-            units = Unit(self.units)
+            units = Unit(self.units, registry=self.ds.unit_registry)
         # Add unit label
         if not units.is_dimensionless:
             data_label += r"\ \ (%s)" % (units.latex_representation())

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -240,6 +240,7 @@
         # the derived field and exit. If used as a decorator, function will
         # be None. In that case, we return a function that will be applied
         # to the function that the decorator is applied to.
+        kwargs.setdefault('ds', self.ds)
         if function is None:
             def create_function(f):
                 self[name] = DerivedField(name, f, **kwargs)
@@ -274,6 +275,7 @@
         return loaded, unavailable
 
     def add_output_field(self, name, **kwargs):
+        kwargs.setdefault('ds', self.ds)
         self[name] = DerivedField(name, NullFunc, **kwargs)
 
     def alias(self, alias_name, original_name, units = None):

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -769,7 +769,7 @@
 
 def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
         smoothing_length_name, density_name, smoothed_field, registry,
-        nneighbors = None, kernel_name = 'cubic'):
+        nneighbors = 64, kernel_name = 'cubic'):
     unit_system = registry.ds.unit_system
     if kernel_name == 'cubic':
         field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
@@ -791,17 +791,15 @@
         else:
             hsml = data[ptype, smoothing_length_name]
             hsml.convert_to_units("code_length")
-        kwargs = {}
-        if nneighbors:
-            kwargs['nneighbors'] = nneighbors
         # This is for applying cutoffs, similar to in the SPLASH paper.
         smooth_cutoff = data["index","cell_volume"]**(1./3)
         smooth_cutoff.convert_to_units("code_length")
         # volume_weighted smooth operations return lists of length 1.
         rv = data.smooth(pos, [mass, hsml, dens, quan],
+                         index_fields=[smooth_cutoff],
                          method="volume_weighted",
                          create_octree=True,
-                         index_fields=[smooth_cutoff],
+                         nneighbors=nneighbors,
                          kernel_name=kernel_name)[0]
         rv[np.isnan(rv)] = 0.0
         # Now some quick unit conversions.

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -34,7 +34,7 @@
         fields.append(("gas", fname))
         units.append(code_units)
 
-    base_ds = fake_random_ds(4, fields=fields, units=units, particles=10)
+    base_ds = fake_random_ds(4, fields=fields, units=units, particles=20)
 
     base_ds.index
     base_ds.cosmological_simulation = 1
@@ -196,12 +196,28 @@
             yield TestFieldAccess(field, nproc)
 
 def test_add_deposited_particle_field():
+    # NOT tested: "std", "mesh_id", "nearest" and "simple_smooth"
     global base_ds
-    fn = base_ds.add_deposited_particle_field(('io', 'particle_ones'), 'count')
-    assert_equal(fn, ('deposit', 'io_count_ones'))
     ad = base_ds.all_data()
+
+    # Test "count", "sum" and "cic" method
+    for method in ["count", "sum", "cic"]:
+        fn = base_ds.add_deposited_particle_field(('io', 'particle_mass'), method)
+        expected_fn = 'io_%s' if method == "count" else 'io_%s_mass'
+        assert_equal(fn, ('deposit', expected_fn % method))
+        ret = ad[fn]
+        if method == "count":
+            assert_equal(ret.sum(), ad['particle_ones'].sum())
+        else:
+            assert_almost_equal(ret.sum(), ad['particle_mass'].sum())
+
+    # Test "weighted_mean" method
+    fn = base_ds.add_deposited_particle_field(('io', 'particle_ones'), 'weighted_mean',
+                                              weight_field='particle_ones')
+    assert_equal(fn, ('deposit', 'io_avg_ones'))
     ret = ad[fn]
-    assert_equal(ret.sum(), ad['particle_ones'].sum())
+    # The sum should equal the number of cells that have particles
+    assert_equal(ret.sum(), np.count_nonzero(ad[("deposit", "io_count")]))
 
 @requires_file('GadgetDiskGalaxy/snapshot_200.hdf5')
 def test_add_smoothed_particle_field():

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -15,6 +15,7 @@
 from libc.stdlib cimport malloc, free
 from libc.string cimport memcpy
 import data_structures
+from yt.utilities.lib.misc_utilities import OnceIndirect
 
 cdef extern from "platform_dep.h":
     void *alloca(int)
@@ -1554,6 +1555,9 @@
         if fields is None:
             fields = []
         nf = len(fields)
+        cdef np.float64_t[::cython.view.indirect, ::1] field_pointers 
+        if nf > 0: field_pointers = OnceIndirect(fields)
+        cdef np.float64_t[:] field_vals = np.empty(nf, dtype="float64")
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
         cdef np.ndarray[np.int64_t, ndim=1] domain_ind
@@ -1568,17 +1572,9 @@
                 continue
             domain_ind[sfc - self.sfc_start] = j
             j += 1
-        cdef np.float64_t **field_pointers
-        cdef np.float64_t *field_vals
         cdef np.float64_t pos[3]
         cdef np.float64_t left_edge[3]
         cdef int coords[3]
-        cdef np.ndarray[np.float64_t, ndim=1] tarr
-        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
-        field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
-        for i in range(nf):
-            tarr = fields[i]
-            field_pointers[i] = <np.float64_t *> tarr.data
         cdef int dims[3]
         dims[0] = dims[1] = dims[2] = 1
         cdef np.int64_t offset, moff

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -178,8 +178,15 @@
                 raise RuntimeError("yt needs cylindrical to be 2D")
             self.level_dds[:,2] = 2*np.pi
             default_zbounds = (0.0, 2*np.pi)
+        elif self.ds.geometry == "spherical":
+            # BoxLib only supports 1D spherical, so ensure
+            # the other dimensions have the right extent.
+            self.level_dds[:,1] = np.pi
+            self.level_dds[:,2] = 2*np.pi
+            default_ybounds = (0.0, np.pi)
+            default_zbounds = (0.0, 2*np.pi)
         else:
-            raise RuntimeError("yt only supports cartesian and cylindrical coordinates.")
+            raise RuntimeError("Unknown BoxLib coordinate system.")
         if int(next(header_file)) != 0:
             raise RuntimeError("INTERNAL ERROR! This should be a zero.")
 
@@ -588,8 +595,10 @@
             self.geometry = "cartesian"
         elif coordinate_type == 1:
             self.geometry = "cylindrical"
+        elif coordinate_type == 2:
+            self.geometry = "spherical"
         else:
-            raise RuntimeError("yt does not yet support spherical geometry")
+            raise RuntimeError("Unknown BoxLib coord_type")
 
         # overrides for 1/2-dimensional data
         if self.dimensionality == 1:

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -278,12 +278,13 @@
             nap = dict((ap_type, []) for ap_type in 
                 params["Physics"]["ActiveParticles"]["ActiveParticlesEnabled"])
         else:
-            nap = {}
             if "AppendActiveParticleType" in self.parameters:
+                nap = {}
                 active_particles = True
                 for type in self.parameters.get("AppendActiveParticleType", []):
                     nap[type] = []
             else:
+                nap = None
                 active_particles = False
         for grid_id in range(self.num_grids):
             pbar.update(grid_id)

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-Enzo frontend tests using moving7
+Enzo frontend tests
 
 
 
@@ -33,6 +33,13 @@
 _fields = ("temperature", "density", "velocity_magnitude",
            "velocity_divergence")
 
+two_sphere_test = 'ActiveParticleTwoSphere/DD0011/DD0011'
+active_particle_cosmology = 'ActiveParticleCosmology/DD0046/DD0046'
+ecp = "enzo_cosmology_plus/DD0046/DD0046"
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
+
+
 def check_color_conservation(ds):
     species_names = ds.field_info.species_names
     dd = ds.all_data()
@@ -68,7 +75,6 @@
         test_moving7.__name__ = test.description
         yield test
 
-g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
 @requires_ds(g30, big_data=True)
 def test_galaxy0030():
     ds = data_dir_load(g30)
@@ -78,7 +84,6 @@
         test_galaxy0030.__name__ = test.description
         yield test
 
-enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
 @requires_ds(enzotiny)
 def test_simulated_halo_mass_function():
     ds = data_dir_load(enzotiny)
@@ -91,7 +96,6 @@
     for fit in range(1, 6):
         yield AnalyticHaloMassFunctionTest(ds, fit)
 
-ecp = "enzo_cosmology_plus/DD0046/DD0046"
 @requires_ds(ecp, big_data=True)
 def test_ecp():
     ds = data_dir_load(ecp)
@@ -116,3 +120,33 @@
 @requires_file(enzotiny)
 def test_EnzoDataset():
     assert isinstance(data_dir_load(enzotiny), EnzoDataset)
+
+ at requires_file(two_sphere_test)
+ at requires_file(active_particle_cosmology)
+def test_active_particle_datasets():
+    two_sph = data_dir_load(two_sphere_test)
+    assert 'AccretingParticle' in two_sph.particle_types_raw
+    assert_equal(len(two_sph.particle_unions), 0)
+    pfields = ['GridID', 'creation_time', 'dynamical_time',
+               'identifier', 'level', 'metallicity', 'particle_mass']
+    pfields += ['particle_position_%s' % d for d in 'xyz']
+    pfields += ['particle_velocity_%s' % d for d in 'xyz']
+
+    acc_part_fields = \
+        [('AccretingParticle', pf) for pf in ['AccretionRate'] + pfields]
+
+    real_acc_part_fields = sorted(
+        [f for f in two_sph.field_list if f[0] == 'AccretingParticle'])
+    assert_equal(acc_part_fields, real_acc_part_fields)
+
+
+    apcos = data_dir_load(active_particle_cosmology)
+    assert_equal(['CenOstriker', 'DarkMatter'], apcos.particle_types_raw)
+    assert 'all' in apcos.particle_unions
+
+    apcos_fields = [('CenOstriker', pf) for pf in pfields]
+
+    real_apcos_fields = sorted(
+        [f for f in apcos.field_list if f[0] == 'CenOstriker'])
+
+    assert_equal(apcos_fields, real_apcos_fields)

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -335,9 +335,10 @@
           dict([(ptype, False)
                 for ptype, pnum in self.particle_count.items()
                 if pnum > 0])
+        has_ids = False
 
         for data_file in self.data_files:
-            fl, sl, _units = self.io._identify_fields(data_file)
+            fl, sl, idl, _units = self.io._identify_fields(data_file)
             units.update(_units)
             field_list.extend([f for f in fl
                                if f not in field_list])
@@ -345,7 +346,8 @@
                                       if f not in scalar_field_list])
             for ptype in found_fields:
                 found_fields[ptype] |= data_file.total_particles[ptype]
-            if all(found_fields.values()): break
+            has_ids |= len(idl) > 0
+            if all(found_fields.values()) and has_ids: break
 
         self.field_list = field_list
         self.scalar_field_list = scalar_field_list

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -258,6 +258,7 @@
             start_index = dobj.field_data_start[i]
             end_index = dobj.field_data_end[i]
             pcount = end_index - start_index
+            if pcount == 0: continue
             field_end = field_start + end_index - start_index
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(member_fields.items()):
@@ -298,11 +299,9 @@
     def _identify_fields(self, data_file):
         fields = []
         scalar_fields = []
-        pcount = data_file.total_particles
-        if sum(pcount.values()) == 0: return fields, scalar_fields, {}
+        id_fields = {}
         with h5py.File(data_file.filename, "r") as f:
             for ptype in self.ds.particle_types_raw:
-                if data_file.total_particles[ptype] == 0: continue
                 fields.append((ptype, "particle_identifier"))
                 scalar_fields.append((ptype, "particle_identifier"))
                 my_fields, my_offset_fields = \
@@ -311,8 +310,9 @@
                 scalar_fields.extend(my_fields)
 
                 if "IDs" not in f: continue
-                fields.extend([(ptype, field) for field in f["IDs"]])
-        return fields, scalar_fields, {}
+                id_fields = [(ptype, field) for field in f["IDs"]]
+                fields.extend(id_fields)
+        return fields, scalar_fields, id_fields, {}
 
 def subfind_field_list(fh, ptype, pcount):
     fields = []

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -86,3 +86,29 @@
         # as the array of all masses.  This will test getting
         # scalar fields for halos correctly.
         yield assert_array_equal, ad[ptype, "particle_mass"], mass
+
+# fof/subhalo catalog with no member ids in first file
+g56 = "gadget_halos/data/groups_056/fof_subhalo_tab_056.0.hdf5"
+
+# This dataset has halos in one file and ids in another,
+# which can confuse the field detection.
+ at requires_file(g56)
+def test_unbalanced_dataset():
+    ds = data_dir_load(g56)
+    halo = ds.halo("Group", 0)
+    halo["member_ids"]
+    assert True
+
+# fof/subhalo catalog with no member ids in first file
+g76 = "gadget_halos/data/groups_076/fof_subhalo_tab_076.0.hdf5"
+
+# This dataset has one halo with particles distributed over 3 files
+# with the 2nd file being empty.
+ at requires_file(g76)
+def test_3file_halo():
+    ds = data_dir_load(g76)
+    # this halo's particles are distributed over 3 files with the
+    # middle file being empty
+    halo = ds.halo("Group", 6)
+    halo["member_ids"]
+    assert True

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -138,5 +138,5 @@
     cdef public int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
-                      np.float64_t ppos[3], np.float64_t *fields,
+                      np.float64_t ppos[3], np.float64_t[:] fields,
                       np.int64_t domain_ind)

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -19,9 +19,22 @@
 from libc.stdlib cimport malloc, free
 cimport cython
 from libc.math cimport sqrt
+from cpython cimport PyObject
+from yt.utilities.lib.fp_utils cimport *
 
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, OctInfo
+from cpython.array cimport array, clone
+from cython.view cimport memoryview as cymemview
+from yt.utilities.lib.misc_utilities import OnceIndirect
+
+cdef append_axes(np.ndarray arr, int naxes):
+    if arr.ndim == naxes:
+        return arr
+    # Avoid copies
+    arr2 = arr.view()
+    arr2.shape = arr2.shape + (1,) * (naxes - arr2.ndim)
+    return arr2
 
 cdef class ParticleDepositOperation:
     def __init__(self, nvals, kernel_name):
@@ -46,15 +59,10 @@
         if fields is None:
             fields = []
         nf = len(fields)
-        cdef np.float64_t **field_pointers
-        cdef np.float64_t *field_vals
+        cdef np.float64_t[::cython.view.indirect, ::1] field_pointers 
+        if nf > 0: field_pointers = OnceIndirect(fields)
         cdef np.float64_t pos[3]
-        cdef np.ndarray[np.float64_t, ndim=1] tarr
-        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
-        field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
-        for i in range(nf):
-            tarr = fields[i]
-            field_pointers[i] = <np.float64_t *> tarr.data
+        cdef np.float64_t[:] field_vals = np.empty(nf, dtype="float64")
         cdef int dims[3]
         dims[0] = dims[1] = dims[2] = (1 << octree.oref)
         cdef int nz = dims[0] * dims[1] * dims[2]
@@ -66,7 +74,7 @@
         for i in range(positions.shape[0]):
             # We should check if particle remains inside the Oct here
             for j in range(nf):
-                field_vals[j] = field_pointers[j][i]
+                field_vals[j] = field_pointers[j,i]
             for j in range(3):
                 pos[j] = positions[i, j]
             # This line should be modified to have it return the index into an
@@ -89,7 +97,7 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             # Note that this has to be our local index, not our in-file index.
-            offset = dom_ind[oct.domain_ind - moff] * nz
+            offset = dom_ind[oct.domain_ind - moff]
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
@@ -107,16 +115,11 @@
         if fields is None:
             fields = []
         nf = len(fields)
-        cdef np.float64_t **field_pointers
-        cdef np.float64_t *field_vals
+        cdef np.float64_t[:] field_vals = np.empty(nf, dtype="float64")
+        cdef np.float64_t[::cython.view.indirect, ::1] field_pointers 
+        if nf > 0: field_pointers = OnceIndirect(fields)
         cdef np.float64_t pos[3]
-        cdef np.ndarray[np.float64_t, ndim=1] tarr
-        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
-        field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
         cdef np.int64_t gid = getattr(gobj, "id", -1)
-        for i in range(nf):
-            tarr = fields[i]
-            field_pointers[i] = <np.float64_t *> tarr.data
         cdef np.float64_t dds[3]
         cdef np.float64_t left_edge[3]
         cdef int dims[3]
@@ -127,7 +130,7 @@
         for i in range(positions.shape[0]):
             # Now we process
             for j in range(nf):
-                field_vals[j] = field_pointers[j][i]
+                field_vals[j] = field_pointers[j,i]
             for j in range(3):
                 pos[j] = positions[i, j]
             self.process(dims, left_edge, dds, 0, pos, field_vals, gid)
@@ -137,19 +140,16 @@
 
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
-                      np.float64_t ppos[3], np.float64_t *fields,
+                      np.float64_t ppos[3], np.float64_t[:] fields,
                       np.int64_t domain_ind):
         raise NotImplementedError
 
 cdef class CountParticles(ParticleDepositOperation):
-    cdef np.int64_t *count # float, for ease
-    cdef public object ocount
+    cdef np.int64_t[:,:,:,:] count
     def initialize(self):
         # Create a numpy array accessible to python
-        self.ocount = np.zeros(self.nvals, dtype="int64", order='F')
-        cdef np.ndarray arr = self.ocount
-        # alias the C-view for use in cython
-        self.count = <np.int64_t*> arr.data
+        self.count = append_axes(
+            np.zeros(self.nvals, dtype="int64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -157,7 +157,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset, # offset into IO field
                       np.float64_t ppos[3], # this particle's position
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         # here we do our thing; this is the kernel
@@ -165,10 +165,12 @@
         cdef int i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        self.count[gind(ii[0], ii[1], ii[2], dim) + offset] += 1
+        self.count[ii[2], ii[1], ii[0], offset] += 1
 
     def finalize(self):
-        return self.ocount.astype('f8')
+        arr = np.asarray(self.count)
+        arr.shape = self.nvals
+        return arr.astype("float64")
 
 deposit_count = CountParticles
 
@@ -176,18 +178,14 @@
     # Note that this does nothing at the edges.  So it will give a poor
     # estimate there, and since Octrees are mostly edges, this will be a very
     # poor SPH kernel.
-    cdef np.float64_t *data
-    cdef public object odata
-    cdef np.float64_t *temp
-    cdef public object otemp
+    cdef np.float64_t[:,:,:,:] data
+    cdef np.float64_t[:,:,:,:] temp
 
     def initialize(self):
-        self.odata = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray arr = self.odata
-        self.data = <np.float64_t*> arr.data
-        self.otemp = np.zeros(self.nvals, dtype="float64", order='F')
-        arr = self.otemp
-        self.temp = <np.float64_t*> arr.data
+        self.data = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.temp = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -195,7 +193,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         cdef int ii[3]
@@ -227,14 +225,14 @@
                     dist = idist[0] + idist[1] + idist[2]
                     # Calculate distance in multiples of the smoothing length
                     dist = sqrt(dist) / fields[0]
-                    self.temp[gind(i,j,k,dim) + offset] = self.sph_kernel(dist)
-                    kernel_sum += self.temp[gind(i,j,k,dim) + offset]
+                    self.temp[k,j,i,offset] = self.sph_kernel(dist)
+                    kernel_sum += self.temp[k,j,i,offset]
         # Having found the kernel, deposit accordingly into gdata
         for i from ib0[0] <= i <= ib1[0]:
             for j from ib0[1] <= j <= ib1[1]:
                 for k from ib0[2] <= k <= ib1[2]:
-                    dist = self.temp[gind(i,j,k,dim) + offset] / kernel_sum
-                    self.data[gind(i,j,k,dim) + offset] += fields[1] * dist
+                    dist = self.temp[k,j,i,offset] / kernel_sum
+                    self.data[k,j,i,offset] += fields[1] * dist
 
     def finalize(self):
         return self.odata
@@ -242,12 +240,10 @@
 deposit_simple_smooth = SimpleSmooth
 
 cdef class SumParticleField(ParticleDepositOperation):
-    cdef np.float64_t *sum
-    cdef public object osum
+    cdef np.float64_t[:,:,:,:] sum
     def initialize(self):
-        self.osum = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray arr = self.osum
-        self.sum = <np.float64_t*> arr.data
+        self.sum = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -255,18 +251,20 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         cdef int ii[3]
         cdef int i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
-        self.sum[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0]
+        self.sum[ii[2], ii[1], ii[0], offset] += fields[0]
         return
 
     def finalize(self):
-        return self.osum
+        sum = np.asarray(self.sum)
+        sum.shape = self.nvals
+        return sum
 
 deposit_sum = SumParticleField
 
@@ -274,28 +272,20 @@
     # Thanks to Britton and MJ Turk for the link
     # to a single-pass STD
     # http://www.cs.berkeley.edu/~mhoemmen/cs194/Tutorials/variance.pdf
-    cdef np.float64_t *mk
-    cdef np.float64_t *qk
-    cdef np.float64_t *i
-    cdef public object omk
-    cdef public object oqk
-    cdef public object oi
+    cdef np.float64_t[:,:,:,:] mk
+    cdef np.float64_t[:,:,:,:] qk
+    cdef np.float64_t[:,:,:,:] i
     def initialize(self):
         # we do this in a single pass, but need two scalar
         # per cell, M_k, and Q_k and also the number of particles
         # deposited into each one
         # the M_k term
-        self.omk= np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray omkarr= self.omk
-        self.mk= <np.float64_t*> omkarr.data
-        # the Q_k term
-        self.oqk= np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray oqkarr= self.oqk
-        self.qk= <np.float64_t*> oqkarr.data
-        # particle count
-        self.oi = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray oiarr = self.oi
-        self.i = <np.float64_t*> oiarr.data
+        self.mk = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.qk = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.i = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -303,7 +293,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         cdef int ii[3]
@@ -311,35 +301,36 @@
         cdef float k, mk, qk
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        cell_index = gind(ii[0], ii[1], ii[2], dim) + offset
-        k = self.i[cell_index]
-        mk = self.mk[cell_index]
-        qk = self.qk[cell_index]
+        k = self.i[ii[2], ii[1], ii[0], offset]
+        mk = self.mk[ii[2], ii[1], ii[0], offset]
+        qk = self.qk[ii[2], ii[1], ii[0], offset]
         #print k, mk, qk, cell_index
         if k == 0.0:
             # Initialize cell values
-            self.mk[cell_index] = fields[0]
+            self.mk[ii[2], ii[1], ii[0], offset] = fields[0]
         else:
-            self.mk[cell_index] = mk + (fields[0] - mk) / k
-            self.qk[cell_index] = qk + (k - 1.0) * (fields[0] - mk)**2.0 / k
-        self.i[cell_index] += 1
+            self.mk[ii[2], ii[1], ii[0], offset] = mk + (fields[0] - mk) / k
+            self.qk[ii[2], ii[1], ii[0], offset] = \
+                qk + (k - 1.0) * (fields[0] - mk)**2.0 / k
+        self.i[ii[2], ii[1], ii[0], offset] += 1
 
     def finalize(self):
         # This is the standard variance
         # if we want sample variance divide by (self.oi - 1.0)
-        std2 = self.oqk / self.oi
-        std2[self.oi == 0.0] = 0.0
+        i = np.asarray(self.i)
+        std2 = np.asarray(self.qk) / i
+        std2[i == 0.0] = 0.0
+        std2.shape = self.nvals
         return np.sqrt(std2)
 
 deposit_std = StdParticleField
 
 cdef class CICDeposit(ParticleDepositOperation):
-    cdef np.float64_t *field
+    cdef np.float64_t[:,:,:,:] field
     cdef public object ofield
     def initialize(self):
-        self.ofield = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray arr = self.ofield
-        self.field = <np.float64_t *> arr.data
+        self.field = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -347,7 +338,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset, # offset into IO field
                       np.float64_t ppos[3], # this particle's position
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
 
@@ -372,29 +363,26 @@
         for i in range(2):
             for j in range(2):
                 for k in range(2):
-                    ii = gind(ind[0] - i, ind[1] - j, ind[2] - k, dim) + offset
-                    self.field[ii] += fields[0]*rdds[0][i]*rdds[1][j]*rdds[2][k]
+                    self.field[ind[2] - k, ind[1] - j, ind[0] - i, offset] += \
+                        fields[0]*rdds[0][i]*rdds[1][j]*rdds[2][k]
 
     def finalize(self):
-        return self.ofield
+        rv = np.asarray(self.field)
+        rv.shape = self.nvals
+        return rv
 
 deposit_cic = CICDeposit
 
 cdef class WeightedMeanParticleField(ParticleDepositOperation):
     # Deposit both mass * field and mass into two scalars
     # then in finalize divide mass * field / mass
-    cdef np.float64_t *wf
-    cdef public object owf
-    cdef np.float64_t *w
-    cdef public object ow
+    cdef np.float64_t[:,:,:,:] wf
+    cdef np.float64_t[:,:,:,:] w
     def initialize(self):
-        self.owf = np.zeros(self.nvals, dtype='float64', order='F')
-        cdef np.ndarray wfarr = self.owf
-        self.wf = <np.float64_t*> wfarr.data
-
-        self.ow = np.zeros(self.nvals, dtype='float64', order='F')
-        cdef np.ndarray warr = self.ow
-        self.w = <np.float64_t*> warr.data
+        self.wf = append_axes(
+            np.zeros(self.nvals, dtype='float64', order='F'), 4)
+        self.w = append_axes(
+            np.zeros(self.nvals, dtype='float64', order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -402,18 +390,22 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         cdef int ii[3]
         cdef int i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
-        self.w[ gind(ii[0], ii[1], ii[2], dim) + offset] += fields[1]
-        self.wf[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0] * fields[1]
+        self.w[ii[2], ii[1], ii[0], offset] += fields[1]
+        self.wf[ii[2], ii[1], ii[0], offset] += fields[0] * fields[1]
 
     def finalize(self):
-        return self.owf / self.ow
+        wf = np.asarray(self.wf)
+        w = np.asarray(self.w)
+        rv = wf / w
+        rv.shape = self.nvals
+        return rv
 
 deposit_weighted_mean = WeightedMeanParticleField
 
@@ -430,7 +422,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         fields[0] = domain_ind
@@ -441,19 +433,14 @@
 deposit_mesh_id = MeshIdentifier
 
 cdef class NNParticleField(ParticleDepositOperation):
-    cdef np.float64_t *nnfield
-    cdef np.float64_t *distfield
-    cdef public object onnfield
-    cdef public object odistfield
+    cdef np.float64_t[:,:,:,:] nnfield
+    cdef np.float64_t[:,:,:,:] distfield
     def initialize(self):
-        self.onnfield = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray arr = self.onnfield
-        self.nnfield = <np.float64_t*> arr.data
-
-        self.odistfield = np.zeros(self.nvals, dtype="float64", order='F')
-        self.odistfield[:] = np.inf
-        arr = self.odistfield
-        self.distfield = <np.float64_t*> arr.data
+        self.nnfield = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.distfield = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.distfield[:] = np.inf
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -461,14 +448,13 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         # This one is a bit slow.  Every grid cell is going to be iterated
         # over, and we're going to deposit particles in it.
         cdef int i, j, k
         cdef int ii[3]
-        cdef np.int64_t ggind
         cdef np.float64_t r2
         cdef np.float64_t gpos[3]
         gpos[0] = left_edge[0] + 0.5 * dds[0]
@@ -477,19 +463,20 @@
             for j in range(dim[1]):
                 gpos[2] = left_edge[2] + 0.5 * dds[2]
                 for k in range(dim[2]):
-                    ggind = gind(i, j, k, dim) + offset
                     r2 = ((ppos[0] - gpos[0])*(ppos[0] - gpos[0]) +
                           (ppos[1] - gpos[1])*(ppos[1] - gpos[1]) +
                           (ppos[2] - gpos[2])*(ppos[2] - gpos[2]))
-                    if r2 < self.distfield[ggind]:
-                        self.distfield[ggind] = r2
-                        self.nnfield[ggind] = fields[0]
+                    if r2 < self.distfield[k,j,i,offset]:
+                        self.distfield[k,j,i,offset] = r2
+                        self.nnfield[k,j,i,offset] = fields[0]
                     gpos[2] += dds[2]
                 gpos[1] += dds[1]
             gpos[0] += dds[0]
         return
 
     def finalize(self):
-        return self.onnfield
+        nn = np.asarray(self.nnfield)
+        nn.shape = self.nvals
+        return nn
 
 deposit_nearest = NNParticleField

diff -r 7e69492b6a4d54fb151eb1d2550e978570e9849a -r 345bc90272fd562906541619d951b0d7a05045c5 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -14,6 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import operator
 import numpy as np
 
 from yt.funcs import mylog
@@ -42,19 +43,28 @@
 steps = np.array([[-1, -1, -1], [-1, -1,  0], [-1, -1,  1],
                   [-1,  0, -1], [-1,  0,  0], [-1,  0,  1],
                   [-1,  1, -1], [-1,  1,  0], [-1,  1,  1],
-                  
+
                   [ 0, -1, -1], [ 0, -1,  0], [ 0, -1,  1],
                   [ 0,  0, -1],
                   # [ 0,  0,  0],
                   [ 0,  0,  1],
                   [ 0,  1, -1], [ 0,  1,  0], [ 0,  1,  1],
-                  
+
                   [ 1, -1, -1], [ 1, -1,  0], [ 1, -1,  1],
                   [ 1,  0, -1], [ 1,  0,  0], [ 1,  0,  1],
                   [ 1,  1, -1], [ 1,  1,  0], [ 1,  1,  1] ])
 
+def _apply_log(data, log_changed, log_new):
+    '''Helper used to set log10/10^ to data in AMRKDTree'''
+    if not log_changed:
+        return
+    if log_new:
+        np.log10(data, data)
+    else:
+        np.power(10.0, data, data)
+
 class Tree(object):
-    def __init__(self, ds, comm_rank=0, comm_size=1, left=None, right=None, 
+    def __init__(self, ds, comm_rank=0, comm_size=1, left=None, right=None,
         min_level=None, max_level=None, data_source=None):
 
         self.ds = ds
@@ -155,6 +165,7 @@
         self.brick_dimensions = []
         self.sdx = ds.index.get_smallest_dx()
 
+        self.regenerate_data = True
         self._initialized = False
         try:
             self._id_offset = ds.index.grids[0]._id_offset
@@ -171,13 +182,25 @@
                          data_source=data_source)
 
     def set_fields(self, fields, log_fields, no_ghost):
-        self.fields = self.data_source._determine_fields(fields)
+        new_fields = self.data_source._determine_fields(fields)
+        self.regenerate_data = \
+            self.fields is None or \
+            len(self.fields) != len(new_fields) or \
+            self.fields != new_fields
+        self.fields = new_fields
+
+        if self.log_fields is not None:
+            flip_log = map(operator.ne, self.log_fields, log_fields)
+        else:
+            flip_log = [False] * len(log_fields)
         self.log_fields = log_fields
+
         self.no_ghost = no_ghost
         del self.bricks, self.brick_dimensions
         self.brick_dimensions = []
         bricks = []
         for b in self.traverse():
+            map(_apply_log, b.my_data, flip_log, log_fields)
             bricks.append(b)
         self.bricks = np.array(bricks)
         self.brick_dimensions = np.array(self.brick_dimensions)
@@ -261,7 +284,8 @@
         return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
-        if node.data is not None: return node.data
+        if node.data is not None and not self.regenerate_data:
+            return node.data
         grid = self.ds.index.grids[node.grid - self._id_offset]
         dds = grid.dds.ndarray_view()
         gle = grid.LeftEdge.ndarray_view()
@@ -273,7 +297,7 @@
         assert(np.all(grid.LeftEdge <= nle))
         assert(np.all(grid.RightEdge >= nre))
 
-        if grid in self.current_saved_grids:
+        if grid in self.current_saved_grids and not self.regenerate_data:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
@@ -301,6 +325,7 @@
         node.data = brick
         if not self._initialized:
             self.brick_dimensions.append(dims)
+        self.regenerate_data = False
         return brick
 
     def locate_brick(self, position):
@@ -327,16 +352,16 @@
         cis: List of neighbor cell index tuples
 
         Both of these are neighbors that, relative to the current cell
-        index (i,j,k), are ordered as: 
-        
-        (i-1, j-1, k-1), (i-1, j-1, k ), (i-1, j-1, k+1), ...  
-        (i-1, j  , k-1), (i-1, j  , k ), (i-1, j  , k+1), ...  
+        index (i,j,k), are ordered as:
+
+        (i-1, j-1, k-1), (i-1, j-1, k ), (i-1, j-1, k+1), ...
+        (i-1, j  , k-1), (i-1, j  , k ), (i-1, j  , k+1), ...
         (i+1, j+1, k-1), (i-1, j-1, k ), (i+1, j+1, k+1)
 
         That is they start from the lower left and proceed to upper
         right varying the third index most frequently. Note that the
         center cell (i,j,k) is ommitted.
-        
+
         """
         ci = np.array(ci)
         center_dds = grid.dds
@@ -351,7 +376,7 @@
         new_positions = position + steps*offs
         new_positions = [periodic_position(p, self.ds) for p in new_positions]
         grids[in_grid] = grid
-                
+
         get_them = np.argwhere(in_grid).ravel()
         cis[in_grid] = new_cis[in_grid]
 
@@ -367,11 +392,11 @@
         return grids, cis
 
     def locate_neighbors_from_position(self, position):
-        r"""Given a position, finds the 26 neighbor grids 
+        r"""Given a position, finds the 26 neighbor grids
         and cell indices.
 
         This is a mostly a wrapper for locate_neighbors.
-        
+
         Parameters
         ----------
         position: array-like
@@ -383,16 +408,16 @@
         cis: List of neighbor cell index tuples
 
         Both of these are neighbors that, relative to the current cell
-        index (i,j,k), are ordered as: 
-        
-        (i-1, j-1, k-1), (i-1, j-1, k ), (i-1, j-1, k+1), ...  
-        (i-1, j  , k-1), (i-1, j  , k ), (i-1, j  , k+1), ...  
+        index (i,j,k), are ordered as:
+
+        (i-1, j-1, k-1), (i-1, j-1, k ), (i-1, j-1, k+1), ...
+        (i-1, j  , k-1), (i-1, j  , k ), (i-1, j  , k+1), ...
         (i+1, j+1, k-1), (i-1, j-1, k ), (i+1, j+1, k+1)
 
         That is they start from the lower left and proceed to upper
         right varying the third index most frequently. Note that the
         center cell (i,j,k) is ommitted.
-        
+
         """
         position = np.array(position)
         grid = self.ds.index.grids[self.locate_brick(position).grid -
@@ -421,7 +446,7 @@
         del f
         if self.comm.rank != (self.comm.size-1):
             self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
-        
+
     def load_kd_bricks(self,fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.ds
@@ -435,10 +460,10 @@
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,
-                                                 node.l_corner.copy(), 
-                                                 node.r_corner.copy(), 
+                                                 node.l_corner.copy(),
+                                                 node.r_corner.copy(),
                                                  node.dims.astype('int64'))
-                    
+
                     self.bricks.append(node.data)
                     self.brick_dimensions.append(node.dims)
 
@@ -457,15 +482,15 @@
         if self.comm.size == 0: return
         nid, pid, lid, rid, les, res, gid, splitdims, splitposs = \
                 self.get_node_arrays()
-        nid = self.comm.par_combine_object(nid, 'cat', 'list') 
-        pid = self.comm.par_combine_object(pid, 'cat', 'list') 
-        lid = self.comm.par_combine_object(lid, 'cat', 'list') 
-        rid = self.comm.par_combine_object(rid, 'cat', 'list') 
-        gid = self.comm.par_combine_object(gid, 'cat', 'list') 
-        les = self.comm.par_combine_object(les, 'cat', 'list') 
-        res = self.comm.par_combine_object(res, 'cat', 'list') 
-        splitdims = self.comm.par_combine_object(splitdims, 'cat', 'list') 
-        splitposs = self.comm.par_combine_object(splitposs, 'cat', 'list') 
+        nid = self.comm.par_combine_object(nid, 'cat', 'list')
+        pid = self.comm.par_combine_object(pid, 'cat', 'list')
+        lid = self.comm.par_combine_object(lid, 'cat', 'list')
+        rid = self.comm.par_combine_object(rid, 'cat', 'list')
+        gid = self.comm.par_combine_object(gid, 'cat', 'list')
+        les = self.comm.par_combine_object(les, 'cat', 'list')
+        res = self.comm.par_combine_object(res, 'cat', 'list')
+        splitdims = self.comm.par_combine_object(splitdims, 'cat', 'list')
+        splitposs = self.comm.par_combine_object(splitposs, 'cat', 'list')
         nid = np.array(nid)
         self.rebuild_tree_from_array(nid, pid, lid,
             rid, les, res, gid, splitdims, splitposs)
@@ -481,25 +506,25 @@
         splitdims = []
         splitposs = []
         for node in depth_first_touch(self.tree.trunk):
-            nids.append(node.node_id) 
-            les.append(node.get_left_edge()) 
-            res.append(node.get_right_edge()) 
+            nids.append(node.node_id)
+            les.append(node.get_left_edge())
+            res.append(node.get_right_edge())
             if node.left is None:
-                leftids.append(-1) 
+                leftids.append(-1)
             else:
-                leftids.append(node.left.node_id) 
+                leftids.append(node.left.node_id)
             if node.right is None:
-                rightids.append(-1) 
+                rightids.append(-1)
             else:
-                rightids.append(node.right.node_id) 
+                rightids.append(node.right.node_id)
             if node.parent is None:
-                parentids.append(-1) 
+                parentids.append(-1)
             else:
-                parentids.append(node.parent.node_id) 
+                parentids.append(node.parent.node_id)
             if node.grid is None:
-                gridids.append(-1) 
+                gridids.append(-1)
             else:
-                gridids.append(node.grid) 
+                gridids.append(node.grid)
             splitdims.append(node.get_split_dim())
             splitposs.append(node.get_split_pos())
 
@@ -510,10 +535,10 @@
                                rids, les, res, gids, splitdims, splitposs):
         del self.tree.trunk
 
-        self.tree.trunk = Node(None, 
+        self.tree.trunk = Node(None,
                     None,
                     None,
-                    les[0], res[0], gids[0], nids[0]) 
+                    les[0], res[0], gids[0], nids[0])
 
         N = nids.shape[0]
         for i in range(N):
@@ -521,14 +546,14 @@
             n.set_left_edge(les[i])
             n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, 
-                              np.zeros(3, dtype='float64'),  
-                              np.zeros(3, dtype='float64'),  
+                n.left = Node(n, None, None,
+                              np.zeros(3, dtype='float64'),
+                              np.zeros(3, dtype='float64'),
                               -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, 
-                               np.zeros(3, dtype='float64'),  
-                               np.zeros(3, dtype='float64'),  
+                n.right = Node(n, None, None,
+                               np.zeros(3, dtype='float64'),
+                               np.zeros(3, dtype='float64'),
                                -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
@@ -541,9 +566,9 @@
 
     def count_volume(self):
         return kd_sum_volume(self.tree.trunk)
-    
+
     def count_cells(self):
-        return self.tree.sum_cells() 
+        return self.tree.sum_cells()
 
 if __name__ == "__main__":
     import yt

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/c002a7682193/
Changeset:   c002a7682193
Branch:      yt
User:        jzuhone
Date:        2016-03-02 21:42:20+00:00
Summary:     Docstrings here
Affected #:  1 file

diff -r 345bc90272fd562906541619d951b0d7a05045c5 -r c002a76821932fcc44714f4dd9556a3e3d0c9d7b yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -17,6 +17,10 @@
 from yt.utilities import physical_constants as pc
 
 class UnitSystemConstants(object):
+    """
+    A class to faciliate conversions of physical constants into a given unit
+    system specified by *name*.
+    """
     def __init__(self, name):
         self.name = name
 
@@ -30,14 +34,34 @@
         return getattr(pc, item).in_base(self.name)
 
 class UnitSystem(object):
+    """
+    Create a UnitSystem for facilitating conversions to a default set of units.
+
+    Parameters
+    ----------
+    name : string
+        The name of the unit system. Will be used as the key in the *unit_system_registry*
+        dict to reference the unit system by.
+    length_unit : string
+        The base length unit of this unit system.
+    mass_unit : string
+        The base mass unit of this unit system.
+    time_unit : string
+        The base time unit of this unit system.
+    temperature_unit : string, optional
+        The base temperature unit of this unit system. Defaults to "K".
+    angle_unit : string, optional
+        The base angle unit of this unit system. Defaults to "rad".
+    curent_mks_unit : string, optional
+        The base current unit of this unit system. Only used in MKS or MKS-based unit systems.
+    registry : :class:`yt.units.unit_registry.UnitRegistry` object
+        The unit registry associated with this unit system. Only useful for defining unit
+        systems based on code units.
+    """
     def __init__(self, name, length_unit, mass_unit, time_unit,
-                 temperature_unit=None, angle_unit=None, current_mks_unit=None,
+                 temperature_unit="K", angle_unit="rad", current_mks_unit=None,
                  registry=None):
         self.registry = registry
-        if temperature_unit is None:
-            temperature_unit = "K"
-        if angle_unit is None:
-            angle_unit = "rad"
         self.units_map = {dimensions.length: Unit(length_unit, registry=self.registry),
                           dimensions.mass: Unit(mass_unit, registry=self.registry),
                           dimensions.time: Unit(time_unit, registry=self.registry),


https://bitbucket.org/yt_analysis/yt/commits/f6a19ed992ae/
Changeset:   f6a19ed992ae
Branch:      yt
User:        jzuhone
Date:        2016-03-02 21:42:45+00:00
Summary:     Adding another quick example
Affected #:  1 file

diff -r c002a76821932fcc44714f4dd9556a3e3d0c9d7b -r f6a19ed992aeaa32bdc36217c115ebaa187d86ba doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -313,6 +313,25 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "Equivalently, one could import a physical constant from the main database and convert it using `in_base`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.utilities.physical_constants import G\n",
+    "print (G.in_base(\"mks\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "### Defining Your Own Unit System"
    ]
   },

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list