[yt-svn] commit/yt: 49 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Oct 24 11:26:45 PDT 2014


49 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/42f9fe514aa2/
Changeset:   42f9fe514aa2
Branch:      yt
User:        jzuhone
Date:        2014-09-23 03:49:44+00:00
Summary:     First steps toward overriding code units.
Affected #:  1 file

diff -r aeef5129527cdc239f3926c26dfa50ec6592505e -r 42f9fe514aa27b475e70d3383447aebefae266c4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -147,7 +147,7 @@
             obj = _cached_datasets[apath]
         return obj
 
-    def __init__(self, filename, dataset_type=None, file_style=None):
+    def __init__(self, filename, dataset_type=None, file_style=None, units_override=None):
         """
         Base class for generating new output types.  Principally consists of
         a *filename* and a *dataset_type* which will be passed on to children.
@@ -162,6 +162,10 @@
         self.known_filters = self.known_filters or {}
         self.particle_unions = self.particle_unions or {}
         self.field_units = self.field_units or {}
+        if units_override is None:
+            self.units_override = {}
+        else:
+            self.units_override = units_override
 
         # path stuff
         self.parameter_filename = str(filename)
@@ -674,6 +678,8 @@
 
     def set_code_units(self):
         self._set_code_unit_attributes()
+        # here we override units, if an override has been provided.
+        self._override_code_units()
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
@@ -686,6 +692,14 @@
             self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
                                    DW.units.dimensions)
 
+    def _override_code_units(self):
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
+            val = self.units_override.get("%s_unit" % unit, None)
+            if val is not None:
+                if not isinstance(val, tuple):
+                    val = (val, cgs)
+                setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
+
     _arr = None
     @property
     def arr(self):


https://bitbucket.org/yt_analysis/yt/commits/b32eaa3759bc/
Changeset:   b32eaa3759bc
Branch:      yt
User:        jzuhone
Date:        2014-09-23 04:07:58+00:00
Summary:     Adding more unit checks of the units_override dict, and and some logging.
Affected #:  1 file

diff -r 42f9fe514aa27b475e70d3383447aebefae266c4 -r b32eaa3759bc588bdbebece4dd1dbe88fcaa58f9 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -162,10 +162,7 @@
         self.known_filters = self.known_filters or {}
         self.particle_unions = self.particle_unions or {}
         self.field_units = self.field_units or {}
-        if units_override is None:
-            self.units_override = {}
-        else:
-            self.units_override = units_override
+        self.units_override = units_override
 
         # path stuff
         self.parameter_filename = str(filename)
@@ -693,11 +690,19 @@
                                    DW.units.dimensions)
 
     def _override_code_units(self):
-        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
+        if self.units_override is None:
+            return
+        else:
+            mylog.warning("Overriding code units. This is an experimental and potentially "+
+                          "dangerous option that may yield inconsistent results, and must be used "+
+                          "very carefully.")
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
+                          ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
             val = self.units_override.get("%s_unit" % unit, None)
             if val is not None:
                 if not isinstance(val, tuple):
                     val = (val, cgs)
+                mylog.info("Overriding %s_unit with %s %s.", unit, val[0], val[1])
                 setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
 
     _arr = None


https://bitbucket.org/yt_analysis/yt/commits/922035fe1422/
Changeset:   922035fe1422
Branch:      yt
User:        jzuhone
Date:        2014-09-23 04:08:37+00:00
Summary:     Attempting to fold the old functionality of the parameters dict into units_override for Athena data.
Affected #:  1 file

diff -r b32eaa3759bc588bdbebece4dd1dbe88fcaa58f9 -r 922035fe14220d7b1ebdeff883297bbe6de39cbc yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -352,12 +352,19 @@
     _dataset_type = "athena"
 
     def __init__(self, filename, dataset_type='athena',
-                 storage_filename=None, parameters=None):
+                 storage_filename=None, parameters=None,
+                 units_override=None):
         self.fluid_types += ("athena",)
         if parameters is None:
             parameters = {}
         self.specified_parameters = parameters
-        Dataset.__init__(self, filename, dataset_type)
+        if units_override is None:
+            units_override = {}
+        # This is for backwards-compatibility
+        for k,v in self.specified_parameters.items():
+            if k.endswith("_unit") and k not in self.units_override:
+                self.units_override[k] = self.specified_parameters.pop(k)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.filename = filename
         if storage_filename is None:
             storage_filename = '%s.yt' % filename.split('/')[-1]
@@ -373,7 +380,7 @@
         Generates the conversion to various physical _units based on the parameter file
         """
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            val = self.specified_parameters.get("%s_unit" % unit, None)
+            val = self.units_override.get("%s_unit" % unit, None)
             if val is None:
                 if unit == "length": self.no_cgs_equiv_length = True
                 mylog.warning("No %s conversion to cgs provided.  " +


https://bitbucket.org/yt_analysis/yt/commits/ae929f4f6632/
Changeset:   ae929f4f6632
Branch:      yt
User:        jzuhone
Date:        2014-09-23 12:59:55+00:00
Summary:     Implementing units_override in FLASH
Affected #:  1 file

diff -r 922035fe14220d7b1ebdeff883297bbe6de39cbc -r ae929f4f66326e656d1203a215e7a8abe8d1a10a yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -191,14 +191,12 @@
     def __init__(self, filename, dataset_type='flash_hdf5',
                  storage_filename = None,
                  particle_filename = None, 
-                 conversion_override = None):
+                 units_override = None):
 
         self.fluid_types += ("flash",)
         if self._handle is not None: return
         self._handle = HDF5FileHandler(filename)
-        if conversion_override is None: conversion_override = {}
-        self._conversion_override = conversion_override
-        
+
         self.particle_filename = particle_filename
 
         if self.particle_filename is None :
@@ -213,7 +211,7 @@
         # generalization.
         self.refine_by = 2
 
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.storage_filename = storage_filename
 
         self.parameters["HydroMethod"] = 'flash' # always PPM DE


https://bitbucket.org/yt_analysis/yt/commits/f3241715c2c2/
Changeset:   f3241715c2c2
Branch:      yt
User:        jzuhone
Date:        2014-09-24 13:51:19+00:00
Summary:     In case we override length_unit, time_unit, or velocity_unit, make sure the attribute gets set
Affected #:  1 file

diff -r ae929f4f66326e656d1203a215e7a8abe8d1a10a -r f3241715c2c2f2e993d11fbfd9e37d94a39d6626 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -704,6 +704,7 @@
                     val = (val, cgs)
                 mylog.info("Overriding %s_unit with %s %s.", unit, val[0], val[1])
                 setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
+        self.velocity_unit = getattr(self, "velocity_unit", self.length_unit/self.time_unit)
 
     _arr = None
     @property


https://bitbucket.org/yt_analysis/yt/commits/8c8ebf5bb147/
Changeset:   8c8ebf5bb147
Branch:      yt
User:        jzuhone
Date:        2014-09-24 13:51:39+00:00
Summary:     More cleaning up of the Athena frontend to make it work with units_override
Affected #:  1 file

diff -r f3241715c2c2f2e993d11fbfd9e37d94a39d6626 -r 8c8ebf5bb1473f51672f2a8603a8825a8bb424bc yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -385,17 +385,13 @@
                 if unit == "length": self.no_cgs_equiv_length = True
                 mylog.warning("No %s conversion to cgs provided.  " +
                               "Assuming 1.0 = 1.0 %s", unit, cgs)
-                val = 1.0
-            if not isinstance(val, tuple):
-                val = (val, cgs)
-            setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
-        self.velocity_unit = self.length_unit/self.time_unit
-        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
-                                  (self.time_unit**2 * self.length_unit))
-        self.magnetic_unit.convert_to_units("gauss")
+                setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
 
     def set_code_units(self):
         super(AthenaDataset, self).set_code_units()
+        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                     (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit.convert_to_units("gauss")
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
 
     def _parse_parameter_file(self):


https://bitbucket.org/yt_analysis/yt/commits/1552ff868ac9/
Changeset:   1552ff868ac9
Branch:      yt
User:        jzuhone
Date:        2014-09-24 13:51:59+00:00
Summary:     Support units_override in the FITS frontend and clean up PEP8-isms
Affected #:  1 file

diff -r 8c8ebf5bb1473f51672f2a8603a8825a8bb424bc -r 1552ff868ac9ea762f213a2d1fdbb0acc53d240e yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -311,20 +311,23 @@
     _handle = None
 
     def __init__(self, filename,
-                 dataset_type = 'fits',
-                 auxiliary_files = [],
-                 nprocs = None,
-                 storage_filename = None,
-                 nan_mask = None,
-                 spectral_factor = 1.0,
-                 z_axis_decomp = False,
-                 line_database = None,
-                 line_width = None,
-                 suppress_astropy_warnings = True,
-                 parameters = None):
+                 dataset_type='fits',
+                 auxiliary_files=[],
+                 nprocs=None,
+                 storage_filename=None,
+                 nan_mask=None,
+                 spectral_factor=1.0,
+                 z_axis_decomp=False,
+                 line_database=None,
+                 line_width=None,
+                 suppress_astropy_warnings=True,
+                 parameters=None,
+                 units_override=None):
 
         if parameters is None:
             parameters = {}
+        if units_override is None:
+            units_override = {}
         parameters["nprocs"] = nprocs
         self.specified_parameters = parameters
 
@@ -432,7 +435,7 @@
 
         self.refine_by = 2
 
-        Dataset.__init__(self, fn, dataset_type)
+        Dataset.__init__(self, fn, dataset_type, units_override=units_override)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):


https://bitbucket.org/yt_analysis/yt/commits/644f7e587d88/
Changeset:   644f7e587d88
Branch:      yt
User:        jzuhone
Date:        2014-09-24 22:59:33+00:00
Summary:     Changing the way this works
Affected #:  2 files

diff -r 1552ff868ac9ea762f213a2d1fdbb0acc53d240e -r 644f7e587d8814964e2ebc621df0214fa48ab071 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -162,6 +162,8 @@
         self.known_filters = self.known_filters or {}
         self.particle_unions = self.particle_unions or {}
         self.field_units = self.field_units or {}
+        if units_override is None:
+            units_override = {}
         self.units_override = units_override
 
         # path stuff
@@ -690,7 +692,7 @@
                                    DW.units.dimensions)
 
     def _override_code_units(self):
-        if self.units_override is None:
+        if len(self.units_override) == 0:
             return
         else:
             mylog.warning("Overriding code units. This is an experimental and potentially "+

diff -r 1552ff868ac9ea762f213a2d1fdbb0acc53d240e -r 644f7e587d8814964e2ebc621df0214fa48ab071 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -326,8 +326,6 @@
 
         if parameters is None:
             parameters = {}
-        if units_override is None:
-            units_override = {}
         parameters["nprocs"] = nprocs
         self.specified_parameters = parameters
 


https://bitbucket.org/yt_analysis/yt/commits/f312f7414956/
Changeset:   f312f7414956
Branch:      yt
User:        jzuhone
Date:        2014-09-24 23:07:53+00:00
Summary:     Adding units_override to Enzo
Affected #:  1 file

diff -r 644f7e587d8814964e2ebc621df0214fa48ab071 -r f312f74149565ea2b12ec57b8528a28f7871dffd yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -665,7 +665,8 @@
                  file_style = None,
                  parameter_override = None,
                  conversion_override = None,
-                 storage_filename = None):
+                 storage_filename = None,
+                 units_override=None):
         """
         This class is a stripped down class that simply reads and parses
         *filename* without looking at the index.  *dataset_type* gets passed
@@ -682,8 +683,7 @@
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
         self.storage_filename = storage_filename
-
-        Dataset.__init__(self, filename, dataset_type, file_style=file_style)
+        Dataset.__init__(self, filename, dataset_type, file_style=file_style, units_override=units_override)
 
     def _setup_1d(self):
         self._index_class = EnzoHierarchy1D
@@ -921,6 +921,8 @@
             self.time_unit = self.quan(time_unit, "s")
             self.velocity_unit = self.length_unit / self.time_unit
 
+        self._override_code_units()
+
         magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                 (self.time_unit**2 * self.length_unit))
         magnetic_unit = np.float64(magnetic_unit.in_cgs())


https://bitbucket.org/yt_analysis/yt/commits/b75525541345/
Changeset:   b75525541345
Branch:      yt
User:        jzuhone
Date:        2014-09-25 14:21:19+00:00
Summary:     Adding units_override to gdf, fixing PEP8-isms
Affected #:  2 files

diff -r f312f74149565ea2b12ec57b8528a28f7871dffd -r b75525541345835e906f0dc5e28694071a05756d yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -162,10 +162,11 @@
     _field_info_class = GDFFieldInfo
 
     def __init__(self, filename, dataset_type='grid_data_format',
-                 storage_filename=None, geometry = 'cartesian'):
+                 storage_filename=None, geometry='cartesian',
+                 units_override=None):
         self.geometry = geometry
         self.fluid_types += ("gdf",)
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.storage_filename = storage_filename
         self.filename = filename
 

diff -r f312f74149565ea2b12ec57b8528a28f7871dffd -r b75525541345835e906f0dc5e28694071a05756d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -292,8 +292,8 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = 'stream'
 
-    def __init__(self, stream_handler, storage_filename = None,
-                 geometry = "cartesian"):
+    def __init__(self, stream_handler, storage_filename=None,
+                 geometry="cartesian"):
         #if parameter_override is None: parameter_override = {}
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}


https://bitbucket.org/yt_analysis/yt/commits/52e01773f111/
Changeset:   52e01773f111
Branch:      yt
User:        jzuhone
Date:        2014-09-25 14:39:00+00:00
Summary:     Updating Athena documentation for units_override and adding some missing bits
Affected #:  1 file

diff -r b75525541345835e906f0dc5e28694071a05756d -r 52e01773f111ecd1a781b234c62d769d2a9dcc7a doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -113,29 +113,54 @@
 
 yt works in cgs ("Gaussian") units by default, but Athena data is not
 normally stored in these units. If you would like to convert data to
-cgs units, you may supply conversions for length, time, and mass to ``load``:
+cgs units, you may supply conversions for length, time, and mass to ``load`` using
+the ``units_override`` functionality:
 
 .. code-block:: python
 
    import yt
-   ds = yt.load("id0/cluster_merger.0250.vtk",
-                parameters={"length_unit":(1.0,"Mpc"),
-                            "time_unit"(1.0,"Myr"),
-                            "mass_unit":(1.0e14,"Msun")})
+
+   units_override = {"length_unit":(1.0,"Mpc"),
+                     "time_unit"(1.0,"Myr"),
+                     "mass_unit":(1.0e14,"Msun")}
+
+   ds = yt.load("id0/cluster_merger.0250.vtk", units_override=units_override)
 
 This means that the yt fields, e.g. ``("gas","density")``, ``("gas","x-velocity")``,
 ``("gas","magnetic_field_x")``, will be in cgs units, but the Athena fields, e.g.,
 ``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
 in code units.
 
+Alternative values for the following simulation parameters may be specified using a ``parameters``
+dict, accepting the following keys:
+
+* ``Gamma``: ratio of specific heats, float
+* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or ``"cylindrical"``
+* ``periodicity``: Is the domain periodic? Tuple of boolean values corresponding to each dimension
+
+.. code-block:: python
+
+   import yt
+
+   parameters = {"gamma":4./3., "geometry":"cylindrical", "periodicity":(False,False,False)}
+
+   ds = yt.load("relativistic_jet_0000.vtk", parameters=parameters)
+
 .. rubric:: Caveats
 
 * yt primarily works with primitive variables. If the Athena
   dataset contains conservative variables, the yt primitive fields will be generated from the
   conserved variables on disk.
+* Special relativistic datasets may be loaded, but are not fully supported. In particular,
 * Domains may be visualized assuming periodicity.
 * Particle list data is currently unsupported.
 
+.. note::
+
+   The old behavior of supplying unit conversions using a ``parameters``
+   dict supplied to ``load`` for Athena datasets is still supported, but is being deprecated in
+   favor of ``units_override``, which provides the same functionality.
+
 .. _loading-orion-data:
 
 Boxlib Data


https://bitbucket.org/yt_analysis/yt/commits/7f5682ec5201/
Changeset:   7f5682ec5201
Branch:      yt
User:        jzuhone
Date:        2014-09-25 14:39:24+00:00
Summary:     Adding a deprecation warning in case someone tries to use the parameters dict for passing in units
Affected #:  1 file

diff -r 52e01773f111ecd1a781b234c62d769d2a9dcc7a -r 7f5682ec520145aa2dea224779fa176ad42c156a yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -285,7 +285,8 @@
 
         # Need to reset the units in the dataset based on the correct
         # domain left/right/dimensions.
-        self.dataset._set_code_unit_attributes()
+        # DEV: Is this really necessary?
+        #self.dataset._set_code_unit_attributes()
 
         if self.dataset.dimensionality <= 2 :
             self.dataset.domain_dimensions[2] = np.int(1)
@@ -362,8 +363,10 @@
             units_override = {}
         # This is for backwards-compatibility
         for k,v in self.specified_parameters.items():
-            if k.endswith("_unit") and k not in self.units_override:
-                self.units_override[k] = self.specified_parameters.pop(k)
+            if k.endswith("_unit") and k not in units_override:
+                raise DeprecationWarning("Supplying unit conversions from the parameters dict is deprecated, "+
+                                         "and will be removed in a future release. Use units_override instead.")
+                units_override[k] = self.specified_parameters.pop(k)
         Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.filename = filename
         if storage_filename is None:


https://bitbucket.org/yt_analysis/yt/commits/b93472f8597f/
Changeset:   b93472f8597f
Branch:      yt
User:        jzuhone
Date:        2014-09-25 16:21:45+00:00
Summary:     Wordsmithing
Affected #:  1 file

diff -r 7f5682ec520145aa2dea224779fa176ad42c156a -r b93472f8597f0a774badde881af23fa3620ffbdf doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -134,9 +134,9 @@
 Alternative values for the following simulation parameters may be specified using a ``parameters``
 dict, accepting the following keys:
 
-* ``Gamma``: ratio of specific heats, float
+* ``Gamma``: ratio of specific heats, Type: Float
 * ``geometry``: Geometry type, currently accepts ``"cartesian"`` or ``"cylindrical"``
-* ``periodicity``: Is the domain periodic? Tuple of boolean values corresponding to each dimension
+* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values corresponding to each dimension
 
 .. code-block:: python
 


https://bitbucket.org/yt_analysis/yt/commits/6ab8af95d218/
Changeset:   6ab8af95d218
Branch:      yt
User:        jzuhone
Date:        2014-09-25 16:33:20+00:00
Summary:     Unit overrides should only happen if the unit is explicitly identified (no changing derivatives like "velocity" or "magnetic"), and no units should be set after the override.
Affected #:  3 files

diff -r b93472f8597f0a774badde881af23fa3620ffbdf -r 6ab8af95d218ed3506da8659c8152386675f5fe5 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -706,7 +706,6 @@
                     val = (val, cgs)
                 mylog.info("Overriding %s_unit with %s %s.", unit, val[0], val[1])
                 setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
-        self.velocity_unit = getattr(self, "velocity_unit", self.length_unit/self.time_unit)
 
     _arr = None
     @property

diff -r b93472f8597f0a774badde881af23fa3620ffbdf -r 6ab8af95d218ed3506da8659c8152386675f5fe5 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -389,12 +389,12 @@
                 mylog.warning("No %s conversion to cgs provided.  " +
                               "Assuming 1.0 = 1.0 %s", unit, cgs)
                 setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
+        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                     (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit.convert_to_units("gauss")
 
     def set_code_units(self):
         super(AthenaDataset, self).set_code_units()
-        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
-                                     (self.time_unit**2 * self.length_unit))
-        self.magnetic_unit.convert_to_units("gauss")
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
 
     def _parse_parameter_file(self):

diff -r b93472f8597f0a774badde881af23fa3620ffbdf -r 6ab8af95d218ed3506da8659c8152386675f5fe5 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -921,13 +921,13 @@
             self.time_unit = self.quan(time_unit, "s")
             self.velocity_unit = self.length_unit / self.time_unit
 
-        self._override_code_units()
-
         magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                 (self.time_unit**2 * self.length_unit))
         magnetic_unit = np.float64(magnetic_unit.in_cgs())
         self.magnetic_unit = self.quan(magnetic_unit, "gauss")
 
+        self._override_code_units()
+
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)


https://bitbucket.org/yt_analysis/yt/commits/1e7e50206d32/
Changeset:   1e7e50206d32
Branch:      yt
User:        jzuhone
Date:        2014-09-25 16:38:39+00:00
Summary:     Changing the info output a bit.
Affected #:  1 file

diff -r 6ab8af95d218ed3506da8659c8152386675f5fe5 -r 1e7e50206d32c44ebb9cf378448fb9e1b7ec1f23 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -677,7 +677,7 @@
 
     def set_code_units(self):
         self._set_code_unit_attributes()
-        # here we override units, if an override has been provided.
+        # here we override units, if overrides have been provided.
         self._override_code_units()
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
@@ -704,7 +704,8 @@
             if val is not None:
                 if not isinstance(val, tuple):
                     val = (val, cgs)
-                mylog.info("Overriding %s_unit with %s %s.", unit, val[0], val[1])
+                u = getattr(self, "%s_unit" % unit)
+                mylog.info("Overriding %s_unit: %g %s -> %g %s.", unit, u.v, u.units, val[0], val[1])
                 setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
 
     _arr = None


https://bitbucket.org/yt_analysis/yt/commits/ebbd155aa4be/
Changeset:   ebbd155aa4be
Branch:      yt
User:        jzuhone
Date:        2014-09-25 16:43:40+00:00
Summary:     For magnetic units to work correctly this needs to be placed here
Affected #:  1 file

diff -r 1e7e50206d32c44ebb9cf378448fb9e1b7ec1f23 -r ebbd155aa4bef1b76f92d1e40b1bd63037a9ec61 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -389,12 +389,15 @@
                 mylog.warning("No %s conversion to cgs provided.  " +
                               "Assuming 1.0 = 1.0 %s", unit, cgs)
                 setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
-        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
-                                     (self.time_unit**2 * self.length_unit))
-        self.magnetic_unit.convert_to_units("gauss")
 
     def set_code_units(self):
         super(AthenaDataset, self).set_code_units()
+        mag_unit = getattr(self, "magnetic_unit", None)
+        if mag_unit is None:
+            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                         (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit.convert_to_units("gauss")
+
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
 
     def _parse_parameter_file(self):


https://bitbucket.org/yt_analysis/yt/commits/2233987989b4/
Changeset:   2233987989b4
Branch:      yt
User:        jzuhone
Date:        2014-09-25 16:46:00+00:00
Summary:     Explanatory comment
Affected #:  1 file

diff -r ebbd155aa4bef1b76f92d1e40b1bd63037a9ec61 -r 2233987989b4a2aee44e3c9da604120a974bb193 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -383,6 +383,8 @@
         Generates the conversion to various physical _units based on the parameter file
         """
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
+            # We check to see if these will be overridden. If not, we simply
+            # set them to the unity values in cgs. 
             val = self.units_override.get("%s_unit" % unit, None)
             if val is None:
                 if unit == "length": self.no_cgs_equiv_length = True


https://bitbucket.org/yt_analysis/yt/commits/383cb1f9d733/
Changeset:   383cb1f9d733
Branch:      yt
User:        jzuhone
Date:        2014-09-29 02:11:45+00:00
Summary:     Alfven speed and Mach alfven fields
Affected #:  1 file

diff -r 2233987989b4a2aee44e3c9da604120a974bb193 -r 383cb1f9d7336488520cbffe2551e7153cd36b28 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -55,7 +55,7 @@
     def _plasma_beta(field,data):
         """This assumes that your front end has provided Bx, By, Bz in
         units of Gauss. If you use MKS, make sure to write your own
-        PlasmaBeta field to deal with non-unitary \mu_0.
+        plasma_beta field to deal with non-unitary \mu_0.
         """
         return data[ftype,'pressure']/data[ftype,'magnetic_energy']
     registry.add_field((ftype, "plasma_beta"),
@@ -69,6 +69,10 @@
              units="erg / cm**3")
 
     def _magnetic_field_strength(field,data):
+        """This assumes that your front end has provided Bx, By, Bz in
+        units of Gauss. If you use MKS, make sure to write your own
+        PlasmaBeta field to deal with non-unitary \mu_0.
+        """
         return np.sqrt(8.*np.pi*data[ftype,"magnetic_energy"])
     registry.add_field((ftype,"magnetic_field_strength"),
                        function=_magnetic_field_strength,
@@ -110,3 +114,17 @@
              units="gauss",
              validators=[ValidateParameter("normal")])
 
+    def _alfven_speed(field,data):
+        """This assumes that your front end has provided Bx, By, Bz in
+        units of Gauss. If you use MKS, make sure to write your own
+        alfven_speed field to deal with non-unitary \mu_0.
+        """
+        return data[ftype,'magnetic_field_strength']/np.sqrt(4.*np.pi*data[ftype,'density'])
+    registry.add_field((ftype, "alfven_speed"), function=_alfven_speed,
+                       units="cm/s")
+
+    def _mach_alfven(field,data):
+        return data[ftype,'velocity_magnitude']/data[ftype,'alfven_speed']
+    registry.add_field((ftype, "mach_alfven"), function=_mach_alfven,
+                       units="dimensionless")
+


https://bitbucket.org/yt_analysis/yt/commits/8078a8b94d64/
Changeset:   8078a8b94d64
Branch:      yt
User:        jzuhone
Date:        2014-10-02 21:01:54+00:00
Summary:     Forgot to finish this sentence
Affected #:  1 file

diff -r 383cb1f9d7336488520cbffe2551e7153cd36b28 -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -151,7 +151,9 @@
 * yt primarily works with primitive variables. If the Athena
   dataset contains conservative variables, the yt primitive fields will be generated from the
   conserved variables on disk.
-* Special relativistic datasets may be loaded, but are not fully supported. In particular,
+* Special relativistic datasets may be loaded, but are not fully supported. In particular, the relationships between
+  quantities such as pressure and thermal energy will be incorrect, as it is currently assumed that their relationship
+  is that of an ideal a :math:`\gamma`-law equation of state.
 * Domains may be visualized assuming periodicity.
 * Particle list data is currently unsupported.
 


https://bitbucket.org/yt_analysis/yt/commits/d6109b509d64/
Changeset:   d6109b509d64
Branch:      yt
User:        jzuhone
Date:        2014-10-04 05:24:48+00:00
Summary:     Merge
Affected #:  200 files

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -186,9 +186,20 @@
     this_f = getattr(frontends_module, frontend)
     field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
     dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
+
     if frontend == "sph":
         field_info_names = \
           ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    elif frontend == "boxlib":
+        field_info_names = []
+        for d in dataset_names:
+            if "Maestro" in d:  
+                field_info_names.append("MaestroFieldInfo")
+            elif "Castro" in d: 
+                field_info_names.append("CastroFieldInfo")
+            else: 
+                field_info_names.append("BoxlibFieldInfo")
+
     for dset_name, fi_name in zip(dataset_names, field_info_names):
         fi = getattr(this_f, fi_name)
         nfields = 0

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -16,7 +16,7 @@
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
+BRANCH="yt" # This is the branch to which we will forcibly update.
 
 if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
@@ -500,13 +500,28 @@
     fi
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
+    BUILD_ARGS=""
+    case $LIB in
+        *h5py*)
+            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            ;;
+        *numpy*)
+            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            then
+                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                                                 import numpy; print SV(numpy.__version__) < SV("1.8.0")')
+                if [ $VER == "True" ]
+                then
+                    echo "Removing previous NumPy instance (see issue #889)"
+                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                fi
+            fi
+            ;;
+        *)
+            ;;
+    esac
     cd $LIB
-    if [ ! -z `echo $LIB | grep h5py` ]
-    then
-	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    else
-        ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    fi
+    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
@@ -580,56 +595,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +666,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -729,7 +741,7 @@
         cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
-		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -932,7 +944,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY
@@ -975,8 +986,10 @@
 
 if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
-    echo "Installing pure-python readline"
-    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
+    if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+        echo "Installing pure-python readline"
+        ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}
+    fi
 fi
 
 if [ $INST_ENZO -eq 1 ]
@@ -1026,7 +1039,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/bootcamp/"
+    echo "    http://yt-project.org/doc/quickstart/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -39,6 +39,13 @@
         padding-top: 10px;
         padding-bottom: 10px;
     }
+    /* since 3.1.0 */
+    .navbar-collapse.collapse.in { 
+        display: block!important;
+    }
+    .collapsing {
+        overflow: hidden!important;
+    }
 }
 
 /* 

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -291,7 +291,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], method=\"sum\")\n",
       "prj.set_log(\"density\", True)\n",
       "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
       "prj.show()"
@@ -304,4 +304,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -72,6 +72,8 @@
 * Quantities
 * Callbacks
 
+A list of all available filters, quantities, and callbacks can be found in 
+:ref:`halo_analysis_ref`.  
 All interaction with this analysis can be performed by importing from 
 halo_analysis.
 
@@ -129,7 +131,14 @@
 are center_of_mass and bulk_velocity. Their definitions are available in 
 ``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
 your quantity may be of use to the general community, add it to 
-``halo_quantities.py`` and issue a pull request.
+``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
+
+* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
+* ``particle_mass`` -- Mass of halo
+* ``particle_position_x`` -- Location of halo
+* ``particle_position_y`` -- Location of halo
+* ``particle_position_z`` -- Location of halo
+* ``virial_radius`` -- Virial radius of halo
 
 An example of adding a quantity:
 
@@ -154,6 +163,18 @@
    # ... Later on in your script
    hc.add_quantity("my_quantity") 
 
+This quantity will then be accessible for functions called later via the 
+*quantities* dictionary that is associated with the halo object.
+
+.. code-block:: python
+
+   def my_new_function(halo):
+       print halo.quantities["my_quantity"]
+   add_callback("print_quantity", my_new_function)
+
+   # ... Anywhere after "my_quantity" has been called
+   hc.add_callback("print_quantity")
+
 Callbacks
 ^^^^^^^^^
 
@@ -171,10 +192,10 @@
    hc.add_callback("sphere", factor=2.0)
     
 Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``. New callbacks may 
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
 be added by using the syntax shown below. If you think that your 
 callback may be of use to the general community, add it to 
-halo_callbacks.py and issue a pull request
+halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
 

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -75,7 +75,8 @@
   mass. In simulations where the highest-resolution particles all have the 
   same mass (ie: zoom-in grid based simulations), one can set up a particle
   filter to select the lowest mass particles and perform the halo finding
-  only on those.
+  only on those.  See the this cookbook recipe for an example: 
+  :ref:`cookbook-rockstar-nested-grid`.
 
 To run the Rockstar Halo finding, you must launch python with MPI and 
 parallelization enabled. While Rockstar itself does not require MPI to run, 

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -225,12 +225,12 @@
 
 **Projection** 
     | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`
-    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, style="integrate", field_parameters=None)``
+    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, method="integrate", field_parameters=None)``
     | A 2D projection of a 3D volume along one of the axis directions.  
       By default, this is a line integral through the entire simulation volume 
       (although it can be a subset of that volume specified by a data object
       with the ``data_source`` keyword).  Alternatively, one can specify 
-      a weight_field and different ``style`` values to change the nature
+      a weight_field and different ``method`` values to change the nature
       of the projection outcome.  See :ref:`projection-types` for more information.
 
 **Streamline** 
@@ -263,7 +263,7 @@
 
    ds = load("my_data")
    sp = ds.sphere('c', (10, 'kpc'))
-   print ad.quantities.angular_momentum_vector()
+   print sp.quantities.angular_momentum_vector()
 
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d705a81671d5692ed6691b3402115edbe9c98af815af5bb160ddf551bf02c76"
+  "signature": "sha256:427da1e1d02deb543246218dc8cce991268b518b25cfdd5944a4a436695f874b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -40,11 +40,13 @@
      "source": [
       "We will filter these into young stars and old stars by masking on the ('Stars', 'creation_time') field. \n",
       "\n",
-      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The second argument is a yt data container and is usually the only one used in a filter definition.\n",
+      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The first argument is a `ParticleFilter` object that contains metadata about the filter its self.  The second argument is a yt data container.\n",
       "\n",
-      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages.\n",
+      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages. \n",
       "\n",
-      "Old stars either formed dynamically in the simulation (ages greater than 5 Myr) or were present in the initial conditions (negative ages)."
+      "Conversely, let's define \"old\" stars as those stars formed dynamically in the simulation with ages greater than 5 Myr.  We also include stars with negative ages, since these stars were included in the simulation initial conditions.\n",
+      "\n",
+      "We make use of `pfilter.filtered_type` so that the filter definition will use the same particle type as the one specified in the call to `add_particle_filter` below.  This makes the filter definition usable for arbitrary particle types.  Since we're only filtering the `\"Stars\"` particle type in this example, we could have also replaced `pfilter.filtered_type` with `\"Stars\"` and gotten the same result."
      ]
     },
     {
@@ -52,12 +54,12 @@
      "collapsed": false,
      "input": [
       "def young_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_and(age.in_units('Myr') <= 5, age >= 0)\n",
       "    return filter\n",
       "\n",
       "def old_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_or(age.in_units('Myr') >= 5, age < 0)\n",
       "    return filter"
      ],
@@ -140,4 +142,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -37,7 +37,7 @@
 .. note::
 
    The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
+   http://yt-project.org/data.  See :ref:`quickstart-introduction` for more
    details.
 
 Let us know if you would like to contribute other example notebooks, or have

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/1)_Introduction.ipynb
--- a/doc/source/bootcamp/1)_Introduction.ipynb
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:39620670ce7751b23f30d2123fd3598de1c7843331f65de13e29f4ae9f759e0f"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Welcome to the yt bootcamp!\n",
-      "\n",
-      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
-      "\n",
-      "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook.  The documentation exists at http://yt-project.org/doc/.  If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.\n",
-      "\n",
-      "## Acquiring the datasets for this tutorial\n",
-      "\n",
-      "If you are executing these tutorials interactively, you need some sample datasets on which to run the code.  You can download these datasets at http://yt-project.org/data/.  The datasets necessary for each lesson are noted next to the corresponding tutorial.\n",
-      "\n",
-      "## What's Next?\n",
-      "\n",
-      "The Notebooks are meant to be explored in this order:\n",
-      "\n",
-      "1. Introduction\n",
-      "2. Data Inspection (IsolatedGalaxy dataset)\n",
-      "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n",
-      "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n",
-      "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
-      "6. Volume Rendering (IsolatedGalaxy dataset)"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "download_datasets = False\n",
-      "if download_datasets:\n",
-      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
-      "    print \"Got enzo_tiny_cosmology\"\n",
-      "    !tar xf enzo_tiny_cosmology.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
-      "    print \"Got Enzo_64\"\n",
-      "    !tar xf Enzo_64.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
-      "    print \"Got IsolatedGalaxy\"\n",
-      "    !tar xf IsolatedGalaxy.tar\n",
-      "    \n",
-      "    print \"All done!\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ /dev/null
@@ -1,384 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a8fe78715c1f3900c37c675d84320fe65f0ba8734abba60fd12e74d957e5d8ee"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Starting Out and Loading Data\n",
-      "\n",
-      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now that we've loaded yt, we can load up some data.  Let's load the `IsolatedGalaxy` dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Fields and Facts\n",
-      "\n",
-      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"index\" it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.print_stats()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also tell you the fields it found on disk:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, all of the fields it thinks it knows how to generate:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.derived_field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also transparently generate fields.  However, we encourage you to examine exactly what yt is doing when it generates those fields.  To see, you can ask for the source of a given field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.field_info[\"gas\", \"vorticity_x\"].get_source()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt stores information about the domain of the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also convert this into various units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width.in_units(\"kpc\")\n",
-      "print ds.domain_width.in_units(\"au\")\n",
-      "print ds.domain_width.in_units(\"mile\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Mesh Structure\n",
-      "\n",
-      "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grid_left_edge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The index (`ds.index` here) has an attribute `grids` which is all of the grid objects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grids[1]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g = ds.index.grids[1]\n",
-      "print g"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Grids have dimensions, extents, level, and even a list of Child grids."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.ActiveDimensions"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.LeftEdge, g.RightEdge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Level"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Children"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Advanced Grid Inspection\n",
-      "\n",
-      "If we want to examine grids only at a given level, we can!  Not only that, but we can load data and take a look at various fields.\n",
-      "\n",
-      "*This section can be skipped!*"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "gs = ds.index.select_grids(ds.index.max_level)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g2 = gs[0]\n",
-      "print g2\n",
-      "print g2.Parent\n",
-      "print g2.get_global_startindex()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print g2[\"density\"][:,:,0]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print (g2.Parent.child_mask == 0).sum() * 8\n",
-      "print g2.ActiveDimensions.prod()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "for f in ds.field_list:\n",
-      "    fv = g[f]\n",
-      "    if fv.size == 0: continue\n",
-      "    print f, fv.min(), fv.max()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Examining Data in Regions\n",
-      "\n",
-      "yt provides data object selectors.  In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it.  yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n",
-      "\n",
-      "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10, 'kpc'))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can calculate a bunch of bulk quantities.  Here's that list, but there's a list in the docs, too!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Let's look at the total mass.  This is how you call a given quantity.  yt calls these \"Derived Quantities\".  We'll talk about a few in a later notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.total_mass()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ /dev/null
@@ -1,275 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Simple Visualizations of Data\n",
-      "\n",
-      "Just like in our first notebook, we have to load yt and then some data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "For this notebook, we'll load up a cosmology dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "print \"Redshift =\", ds.current_redshift"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the terms that yt uses, a projection is a line integral through the domain.  This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned.  Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you.  However, we also provide a simple method of creating Projections and plotting them in a single step.  This is called a Plot Window, here specifically known as a `ProjectionPlot`.  One thing to note is that in yt, we project all the way through the entire domain at a single time.  This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n",
-      "\n",
-      "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly.  The cookbook in the documentation includes detailed examples of this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
-      "\n",
-      "Now we'll zoom and pan a bit."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(2.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((0.1, 0.0))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((-0.25, -0.5))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(0.1)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we specify multiple fields, each time we call `show` we get multiple plots back.  Same for `save`!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the colormap on a field-by-field basis."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.set_cmap(\"temperature\", \"hot\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, we can re-center the plot on different locations.  One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "v, c = ds.find_max(\"density\")\n",
-      "p.set_center((c[0], c[1]))\n",
-      "p.zoom(10)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
-      "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
-      "s.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the logging of various fields:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.set_log(\"velocity_magnitude\", True)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides many different annotations for your plots.  You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here.  We'll annotate with velocity arrows."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.annotate_velocity()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Contours can also be overlaid:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
-      "s.annotate_contour(\"temperature\")\n",
-      "s.zoom(2.5)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, we can save out to the file system."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.save()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ /dev/null
@@ -1,382 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a46e1baa90d32045c2b524100f28bad41b3665249612c9a275ee0375a6f4be20"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Data Objects and Time Series Data\n",
-      "\n",
-      "Just like before, we will load up yt.  Since we'll be using pylab to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from matplotlib import pylab\n",
-      "from yt.analysis_modules.halo_finding.api import HaloFinder"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Time Series Data\n",
-      "\n",
-      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `DatasetSeries` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
-      "\n",
-      "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ts = yt.DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 1: Simple Time Series\n",
-      "\n",
-      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for ds in ts` where `ds` means \"Dataset\" and `ts` is the \"Time Series\" we just loaded up.  For each dataset, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the `extrema` Derived Quantity, and append the min and max to our extrema outputs."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "rho_ex = []\n",
-      "times = []\n",
-      "for ds in ts:\n",
-      "    dd = ds.all_data()\n",
-      "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(ds.current_time.in_units(\"Gyr\"))\n",
-      "rho_ex = np.array(rho_ex)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the minimum and the maximum:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n",
-      "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n",
-      "pylab.ylabel(\"Density ($g/cm^3$)\")\n",
-      "pylab.xlabel(\"Time (Gyr)\")\n",
-      "pylab.legend()\n",
-      "pylab.ylim(1e-32, 1e-21)\n",
-      "pylab.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 2: Advanced Time Series\n",
-      "\n",
-      "Let's do something a bit different.  Let's calculate the total mass inside halos and outside halos.\n",
-      "\n",
-      "This actually touches a lot of different pieces of machinery in yt.  For every dataset, we will run the halo finder HOP.  Then, we calculate the total mass in the domain.  Then, for each halo, we calculate the sum of the baryon mass in that halo.  We'll keep running tallies of these two things."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units import Msun\n",
-      "\n",
-      "mass = []\n",
-      "zs = []\n",
-      "for ds in ts:\n",
-      "    halos = HaloFinder(ds)\n",
-      "    dd = ds.all_data()\n",
-      "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    total_in_baryons = 0.0*Msun\n",
-      "    for halo in halos:\n",
-      "        sp = halo.get_sphere()\n",
-      "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    mass.append(total_in_baryons/total_mass)\n",
-      "    zs.append(ds.current_redshift)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now let's plot them!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogx(zs, mass, '-xb')\n",
-      "pylab.xlabel(\"Redshift\")\n",
-      "pylab.ylabel(\"Mass in halos / Total mass\")\n",
-      "pylab.xlim(max(zs), min(zs))\n",
-      "pylab.ylim(-0.01, .18)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Data Objects\n",
-      "\n",
-      "Time series data have many applications, but most of them rely on examining the underlying data in some way.  Below, we'll see how to use and manipulate data objects.\n",
-      "\n",
-      "### Ray Queries\n",
-      "\n",
-      "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
-      "\n",
-      "To create a ray, we specify the start and end points.\n",
-      "\n",
-      "Note that we need to convert these arrays to numpy arrays due to a bug in matplotlib 1.3.1."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
-      "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"dts\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"t\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"x\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Slice Queries\n",
-      "\n",
-      "While slices are often used for visualization, they can be useful for other operations as well.  yt regards slices as multi-resolution objects.  They are an array of cells that are not all the same size; it only returns the cells at the highest resolution that it intersects.  (This is true for all yt data objects.)  Slices and projections have the special fields `px`, `py`, `pdx` and `pdy`, which correspond to the coordinates and half-widths in the pixel plane."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "v, c = ds.find_max(\"density\")\n",
-      "sl = ds.slice(0, c[0])\n",
-      "print sl[\"index\", \"x\"]\n",
-      "print sl[\"index\", \"z\"]\n",
-      "print sl[\"pdx\"]\n",
-      "print sl[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to do something interesting with a `Slice`, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "frb = sl.to_frb((50.0, 'kpc'), 1024)\n",
-      "print frb[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of `density`, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "yt.write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
-      "from IPython.display import Image\n",
-      "Image(filename = \"temp.png\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Off-Axis Slices\n",
-      "\n",
-      "yt provides not only slices, but off-axis slices that are sometimes called \"cutting planes.\"  These are specified by (in order) a normal vector and a center.  Here we've set the normal vector to `[0.2, 0.3, 0.5]` and the center to be the point of maximum density.\n",
-      "\n",
-      "We can then turn these directly into plot windows using `to_pw`.  Note that the `to_pw` and `to_frb` methods are available on slices, off-axis slices, and projections, and can be used on any of them."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
-      "pw = cp.to_pw(fields = [(\"gas\", \"density\")])"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Once we have our plot window from our cutting plane, we can show it here."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pw.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can, as noted above, do the same with our slice:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pws = sl.to_pw(fields=[\"density\"])\n",
-      "#pws.show()\n",
-      "print pws.plots.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Covering Grids\n",
-      "\n",
-      "If we want to access a 3D array of data that spans multiple resolutions in our simulation, we can use a covering grid.  This will return a 3D array of data, drawing from up to the resolution level specified when creating the data.  For example, if you create a covering grid that spans two child grids of a single parent grid, it will fill those zones covered by a zone of a child grid with the data from that child grid.  Where it is covered only by the parent grid, the cells from the parent grid will be duplicated (appropriately) to fill the covering grid.\n",
-      "\n",
-      "There are two different types of covering grids: unsmoothed and smoothed.  Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc.  This will help to reduce edge effects.  Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n",
-      "\n",
-      "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2.  We can then ask for the Density field, which will be a 3D array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cg = ds.covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print cg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In this example, we do exactly the same thing: except we ask for a *smoothed* covering grid, which will reduce edge effects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "scg = ds.smoothed_covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print scg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ /dev/null
@@ -1,254 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Derived Fields and Profiles\n",
-      "\n",
-      "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk.  This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used.  Additionally, you can create them by just writing python functions."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from yt import derived_field\n",
-      "from matplotlib import pylab"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Derived Fields\n",
-      "\n",
-      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `dinosaurs` and our units are `K*cm/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "@derived_field(name = \"dinosaurs\", units = \"K * cm/s\")\n",
-      "def _dinos(field, data):\n",
-      "    return data[\"temperature\"] * data[\"velocity_magnitude\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One important thing to note is that derived fields must be defined *before* any datasets are loaded.  Let's load up our data and take a look at some quantities."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "dd = ds.all_data()\n",
-      "print dd.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `extrema` quantity -- the exact same way that we would for density, temperature, and so on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.extrema(\"dinosaurs\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can do the same for the average quantities as well."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.weighted_average_quantity(\"dinosaurs\", weight=\"temperature\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## A Few Other Quantities\n",
-      "\n",
-      "We can ask other quantities of our data, as well.  For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema.  All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10.0, 'kpc'))\n",
-      "bv = sp.quantities.bulk_velocity()\n",
-      "L = sp.quantities.angular_momentum_vector()\n",
-      "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
-      "print bv\n",
-      "print L\n",
-      "print rho_min, rho_max"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Profiles\n",
-      "\n",
-      "yt provides the ability to bin in 1, 2 and 3 dimensions.  This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n",
-      "\n",
-      "We do this using the objects `Profile1D`, `Profile2D`, and `Profile3D`.  The first two are the most common since they are the easiest to visualize.\n",
-      "\n",
-      "This first set of commands manually creates a profile object the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `temperature` and (previously-defined) `dinosaurs`.  We then plot it in a loglog plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
-      "prof.add_fields([\"temperature\",\"dinosaurs\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Temperature $(K)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the `dinosaurs` field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Dinosaurs $(K cm / s)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to see the total mass in every bin, we profile the `cell_mass` field with no weight.  Specifying `weight=None` will simply take the total value in every bin and add that up."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
-      "prof.add_fields([\"cell_mass\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Cell mass $(M_\\odot)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class.  Let's redo the last plot using `ProfilePlot`"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n",
-      "prof.set_unit('cell_mass', 'Msun')\n",
-      "prof.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Field Parameters\n",
-      "\n",
-      "Field parameters are a method of passing information to derived fields.  For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation.  yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off.  Here we show how that works:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp_small = ds.sphere(\"max\", (50.0, 'kpc'))\n",
-      "bv = sp_small.quantities.bulk_velocity()\n",
-      "\n",
-      "sp = ds.sphere(\"max\", (0.1, 'Mpc'))\n",
-      "rv1 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "sp.clear_data()\n",
-      "sp.set_field_parameter(\"bulk_velocity\", bv)\n",
-      "rv2 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "print bv\n",
-      "print rv1\n",
-      "print rv2"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/6)_Volume_Rendering.ipynb
--- a/doc/source/bootcamp/6)_Volume_Rendering.ipynb
+++ /dev/null
@@ -1,96 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:2a24bbe82955f9d948b39cbd1b1302968ff57f62f73afb2c7a5c4953393d00ae"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# A Brief Demo of Volume Rendering\n",
-      "\n",
-      "This shows a small amount of volume rendering.  Really, just enough to get your feet wet!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To create a volume rendering, we need a camera and a transfer function.  We'll use the `ColorTransferFunction`, which accepts (in log space) the minimum and maximum bounds of our transfer function.  This means behavior for data outside these values is undefined.\n",
-      "\n",
-      "We then add on \"layers\" like an onion.  This function can accept a width (here specified) in data units, and also a color map.  Here we add on four layers.\n",
-      "\n",
-      "Finally, we create a camera.  The focal point is `[0.5, 0.5, 0.5]`, the width is 20 kpc (including front-to-back integration) and we specify a transfer function.  Once we've done that, we call `show` to actually cast our rays and display them inline."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -24))\n",
-      "tf.add_layers(4, w=0.01)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
-      "cam.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cam.show(clip_ratio=4)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "There are several other options we can specify.  Note that here we have turned on the use of ghost zones, shortened the data interval for the transfer function, and widened our gaussian layers."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -25))\n",
-      "tf.add_layers(4, w=0.03)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
-      "cam.show(clip_ratio=4.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/data_inspection.rst
--- a/doc/source/bootcamp/data_inspection.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _data_inspection:
-
-Data Inspection
----------------
-
-.. notebook:: 2)_Data_Inspection.ipynb

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/data_objects_and_time_series.rst
--- a/doc/source/bootcamp/data_objects_and_time_series.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Data Objects and Time Series
-----------------------------
-
-.. notebook:: 4)_Data_Objects_and_Time_Series.ipynb

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/derived_fields_and_profiles.rst
--- a/doc/source/bootcamp/derived_fields_and_profiles.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Derived Fields and Profiles
----------------------------
-
-.. notebook:: 5)_Derived_Fields_and_Profiles.ipynb

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/index.rst
--- a/doc/source/bootcamp/index.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-.. _bootcamp:
-
-yt Bootcamp
-===========
-
-The bootcamp is a series of worked examples of how to use much of the
-funtionality of yt.  These are simple, short introductions to give you a taste
-of what the code can do and are not meant to be detailed walkthroughs.
-
-There are two ways in which you can go through the bootcamp: interactively and 
-non-interactively.  We recommend the interactive method, but if you're pressed 
-on time, you can non-interactively go through the linked pages below and view the 
-worked examples.
-
-To execute the bootcamp interactively, you need to download the repository and
-start the IPython notebook.  If you do not already have the yt repository, the
-easiest way to get the repository is to clone it using mercurial:
-
-.. code-block:: bash
-
-   hg clone https://bitbucket.org/yt_analysis/yt
-
-Now start the IPython notebook from within the repository:
-
-.. code-block:: bash
-
-   cd yt/doc/source/bootcamp
-   yt notebook
-
-This command will give you information about the notebook server and how to
-access it.  You will basically just pick a password (for security reasons) and then 
-redirect your web browser to point to the notebook server.
-Once you have done so, choose "Introduction" from the list of
-notebooks, which includes an introduction and information about how to download
-the sample data.
-
-.. warning:: The pre-filled out notebooks are *far* less fun than running them
-             yourselves!  Check out the repo and give it a try.
-
-Here are the notebooks, which have been filled in for inspection:
-
-.. toctree::
-   :maxdepth: 1
-
-   introduction
-   data_inspection
-   simple_visualization
-   data_objects_and_time_series
-   derived_fields_and_profiles
-   volume_rendering
-
-.. note::
-
-   The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
-   details.
-
-Let us know if you would like to contribute other example notebooks, or have
-any suggestions for how these can be improved.

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/introduction.rst
--- a/doc/source/bootcamp/introduction.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _bootcamp-introduction:
-
-Introduction
-------------
-
-.. notebook:: 1)_Introduction.ipynb

diff -r 8078a8b94d64aef2a1781984054d495ff1b01fe9 -r d6109b509d644d08a959d706867011d96552c26f doc/source/bootcamp/simple_visualization.rst
--- a/doc/source/bootcamp/simple_visualization.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Simple Visualization
---------------------
-
-.. notebook:: 3)_Simple_Visualization.ipynb

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/f430f7f69f17/
Changeset:   f430f7f69f17
Branch:      yt
User:        jzuhone
Date:        2014-10-05 00:22:05+00:00
Summary:     Fixing this mistake
Affected #:  1 file

diff -r d6109b509d644d08a959d706867011d96552c26f -r f430f7f69f17e420054a83816fa99b11e8eec266 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -172,7 +172,7 @@
     _field_info_class = GDFFieldInfo
 
     def __init__(self, filename, dataset_type='grid_data_format',
-                 storage_filename=None, geometry=None):
+                 storage_filename=None, geometry=None,
                  units_override=None):
         self.geometry = geometry
         self.fluid_types += ("gdf",)


https://bitbucket.org/yt_analysis/yt/commits/e0f598341739/
Changeset:   e0f598341739
Branch:      yt
User:        jzuhone
Date:        2014-10-17 15:17:54+00:00
Summary:     Adding units_override to chombo.
Affected #:  1 file

diff -r f430f7f69f17e420054a83816fa99b11e8eec266 -r e0f598341739160a388b6b6950149bc0a5c5118c yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -243,7 +243,8 @@
     _field_info_class = ChomboFieldInfo
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
-                 storage_filename = None, ini_filename = None):
+                 storage_filename = None, ini_filename = None,
+                 units_override=None):
         self.fluid_types += ("chombo",)
         self._handle = HDF5FileHandler(filename)
         self.dataset_type = dataset_type
@@ -270,7 +271,8 @@
         self.geometry = "cartesian"
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
-        Dataset.__init__(self,filename, self.dataset_type)
+        Dataset.__init__(self,filename, self.dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
         self.cosmological_simulation = False
 
@@ -445,10 +447,12 @@
     _field_info_class = PlutoFieldInfo
 
     def __init__(self, filename, dataset_type='pluto_chombo_native',
-                 storage_filename = None, ini_filename = None):
+                 storage_filename = None, ini_filename = None,
+                 units_override=None):
 
         ChomboDataset.__init__(self, filename, dataset_type, 
-                    storage_filename, ini_filename)
+                               storage_filename, ini_filename,
+                               units_override=units_override)
 
     def _parse_parameter_file(self):
         """
@@ -575,10 +579,12 @@
     _field_info_class = Orion2FieldInfo
 
     def __init__(self, filename, dataset_type='orion_chombo_native',
-                 storage_filename = None, ini_filename = None):
+                 storage_filename = None, ini_filename = None,
+                 units_override=None):
 
         ChomboDataset.__init__(self, filename, dataset_type,
-                    storage_filename, ini_filename)
+                               storage_filename, ini_filename,
+                               units_override=units_override)
 
     def _parse_parameter_file(self):
         """
@@ -660,10 +666,12 @@
     _field_info_class = ChomboPICFieldInfo3D
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
-                 storage_filename=None, ini_filename=None):
+                 storage_filename=None, ini_filename=None,
+                 units_override=None):
 
         ChomboDataset.__init__(self, filename, dataset_type,
-                               storage_filename, ini_filename)
+                               storage_filename, ini_filename,
+                               units_override=units_override)
 
         if self.dimensionality == 1:
             self._field_info_class = ChomboPICFieldInfo1D


https://bitbucket.org/yt_analysis/yt/commits/35ec585d0ae9/
Changeset:   35ec585d0ae9
Branch:      yt
User:        jzuhone
Date:        2014-10-17 15:25:01+00:00
Summary:     Adding units_override to boxlib.
Affected #:  1 file

diff -r e0f598341739160a388b6b6950149bc0a5c5118c -r 35ec585d0ae96cffd045edfc7ce300f6eca8c31f yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -366,7 +366,8 @@
                  cparam_filename="inputs",
                  fparam_filename="probin",
                  dataset_type='boxlib_native',
-                 storage_filename=None):
+                 storage_filename=None,
+                 units_override=None):
         """
         The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -380,7 +381,8 @@
         self.fparam_filename = self._localize_check(fparam_filename)
         self.storage_filename = storage_filename
 
-        Dataset.__init__(self, output_dir, dataset_type)
+        Dataset.__init__(self, output_dir, dataset_type,
+                         units_override=units_override)
 
         # These are still used in a few places.
         if "HydroMethod" not in self.parameters.keys():


https://bitbucket.org/yt_analysis/yt/commits/9bb0ea07d3ec/
Changeset:   9bb0ea07d3ec
Branch:      yt
User:        jzuhone
Date:        2014-10-17 16:09:12+00:00
Summary:     Fixing up the Athena fronton's implementation of this
Affected #:  1 file

diff -r 35ec585d0ae96cffd045edfc7ce300f6eca8c31f -r 9bb0ea07d3ec1050b2cfd2020db47ce659b6647f yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -362,10 +362,13 @@
         if units_override is None:
             units_override = {}
         # This is for backwards-compatibility
+        already_warned = False
         for k,v in self.specified_parameters.items():
             if k.endswith("_unit") and k not in units_override:
-                raise DeprecationWarning("Supplying unit conversions from the parameters dict is deprecated, "+
-                                         "and will be removed in a future release. Use units_override instead.")
+                if not already_warned:
+                    mylog.warning("Supplying unit conversions from the parameters dict is deprecated, "+
+                                  "and will be removed in a future release. Use units_override instead.")
+                    already_warned = True
                 units_override[k] = self.specified_parameters.pop(k)
         Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.filename = filename
@@ -382,15 +385,12 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
+        if "length_unit" not in self.units_override:
+            self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            # We check to see if these will be overridden. If not, we simply
-            # set them to the unity values in cgs. 
-            val = self.units_override.get("%s_unit" % unit, None)
-            if val is None:
-                if unit == "length": self.no_cgs_equiv_length = True
-                mylog.warning("No %s conversion to cgs provided.  " +
-                              "Assuming 1.0 = 1.0 %s", unit, cgs)
-                setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
+            # We set these to cgs for now, but they may be overridden later.
+            mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
+            setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
 
     def set_code_units(self):
         super(AthenaDataset, self).set_code_units()


https://bitbucket.org/yt_analysis/yt/commits/c613abd5293a/
Changeset:   c613abd5293a
Branch:      yt
User:        jzuhone
Date:        2014-10-17 16:09:22+00:00
Summary:     units_override docs
Affected #:  1 file

diff -r 9bb0ea07d3ec1050b2cfd2020db47ce659b6647f -r c613abd5293a2f93963adbb024aee9c8d405b01b doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8ba193cc3867e2185133bbf3952bd5834e6c63993208635c71cf55fa6f27b491"
+  "signature": "sha256:2d88f5fb494c00df08117dfae8184efdce91866616db8e38614cf1c2f44370ad"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -305,9 +305,71 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Overriding Code Unit Definitions"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "On occasion, you might have a dataset for a supported frontend that does not have the conversions to code units accessible (for example, Athena data) or you may want to change them outright. `yt` provides a mechanism so that one may provide their own code unit definitions to `load`, which override the default rules for a given frontend for defining code units. This is provided through the `units_override` dictionary. We'll use an example of an Athena dataset. First, a call to `load` without `units_override`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds1 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\")\n",
+      "print ds1.length_unit\n",
+      "print ds1.mass_unit\n",
+      "print ds1.time_unit\n",
+      "sp1 = ds1.sphere(\"c\",(0.1,\"unitary\"))\n",
+      "print sp1[\"density\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This is a galaxy cluster dataset, so it is not likely that the units of density are correct. We happen to know that the unit definitions are different, so we can override the units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "units_override = {\"length_unit\":(1.0,\"Mpc\"),\n",
+      "                  \"time_unit\":(1.0,\"Myr\"),\n",
+      "                  \"mass_unit\":(1.0e14,\"Msun\")}\n",
+      "ds2 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)\n",
+      "print ds2.length_unit\n",
+      "print ds2.mass_unit\n",
+      "print ds2.time_unit\n",
+      "sp2 = ds2.sphere(\"c\",(0.1,\"unitary\"))\n",
+      "print sp2[\"density\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This option should be used very carefully, and *only* if you know that the dataset does not provide units or that the unit definitions generated are incorrect for some reason. "
+     ]
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/940dc0b9a934/
Changeset:   940dc0b9a934
Branch:      yt
User:        jzuhone
Date:        2014-10-17 17:45:13+00:00
Summary:     Adding units_override to ART and ARTIO
Affected #:  2 files

diff -r c613abd5293a2f93963adbb024aee9c8d405b01b -r 940dc0b9a934b567bc2b4f036829d02ca2a783f5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -166,7 +166,8 @@
                  skip_particles=False, skip_stars=False,
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
-                 file_particle_data=None, file_particle_stars=None):
+                 file_particle_data=None, file_particle_stars=None,
+                 units_override=None):
         self.fluid_types += ("art", )
         if fields is None:
             fields = fluid_fields
@@ -186,7 +187,8 @@
         self.spread_age = spread_age
         self.domain_left_edge = np.zeros(3, dtype='float')
         self.domain_right_edge = np.zeros(3, dtype='float')+1.0
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _find_files(self, file_amr):

diff -r c613abd5293a2f93963adbb024aee9c8d405b01b -r 940dc0b9a934b567bc2b4f036829d02ca2a783f5 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -314,7 +314,8 @@
     _field_info_class = ARTIOFieldInfo
 
     def __init__(self, filename, dataset_type='artio',
-                 storage_filename=None, max_range = 1024):
+                 storage_filename=None, max_range = 1024,
+                 units_override=None):
         if self._handle is not None:
             return
         self.max_range = max_range
@@ -324,7 +325,8 @@
         self._handle = artio_fileset(self._fileset_prefix)
         self.artio_parameters = self._handle.parameters
         # Here we want to initiate a traceback, if the reader is not built.
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):


https://bitbucket.org/yt_analysis/yt/commits/0ea586566966/
Changeset:   0ea586566966
Branch:      yt
User:        jzuhone
Date:        2014-10-17 17:51:25+00:00
Summary:     Adding units_override to RAMSES and Moab.
Affected #:  2 files

diff -r 940dc0b9a934b567bc2b4f036829d02ca2a783f5 -r 0ea58656696640913aec173c7707e788ca43bec0 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -69,9 +69,10 @@
     periodicity = (False, False, False)
 
     def __init__(self, filename, dataset_type='moab_hex8',
-                 storage_filename = None):
+                 storage_filename = None, units_override=None):
         self.fluid_types += ("moab",)
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
         self.filename = filename
         self._handle = h5py.File(self.parameter_filename, "r")
@@ -147,11 +148,12 @@
     periodicity = (False, False, False)
 
     def __init__(self, pyne_mesh, dataset_type='moab_hex8_pyne',
-                 storage_filename = None):
+                 storage_filename = None, units_override=None):
         self.fluid_types += ("pyne",)
         filename = "pyne_mesh_" + str(id(pyne_mesh))
         self.pyne_mesh = pyne_mesh
-        Dataset.__init__(self, str(filename), dataset_type)
+        Dataset.__init__(self, str(filename), dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
         self.filename = filename
 

diff -r 940dc0b9a934b567bc2b4f036829d02ca2a783f5 -r 0ea58656696640913aec173c7707e788ca43bec0 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -462,7 +462,8 @@
     gamma = 1.4 # This will get replaced on hydro_fn open
     
     def __init__(self, filename, dataset_type='ramses',
-                 fields = None, storage_filename = None):
+                 fields = None, storage_filename = None,
+                 units_override=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, types.StringTypes):
             fields = field_aliases[fields]
@@ -472,7 +473,7 @@
         '''
         self.fluid_types += ("ramses",)
         self._fields_in_file = fields
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.storage_filename = storage_filename
 
     def __repr__(self):


https://bitbucket.org/yt_analysis/yt/commits/01009ff1117a/
Changeset:   01009ff1117a
Branch:      yt
User:        jzuhone
Date:        2014-10-17 17:54:29+00:00
Summary:     Adding units_override to SDF
Affected #:  1 file

diff -r 0ea58656696640913aec173c7707e788ca43bec0 -r 01009ff1117a8e5353446c3185acbf8acd6c8ab0 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -77,7 +77,8 @@
                  midx_filename = None,
                  midx_header = None,
                  midx_level = None,
-                 field_map = None):
+                 field_map = None,
+                 units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
@@ -102,7 +103,8 @@
         if filename.startswith("http"):
             prefix += 'http_'
         dataset_type = prefix + 'sdf_particles'
-        super(SDFDataset, self).__init__(filename, dataset_type)
+        super(SDFDataset, self).__init__(filename, dataset_type,
+                                         units_override=units_override)
 
     def _parse_parameter_file(self):
         if self.parameter_filename.startswith("http"):


https://bitbucket.org/yt_analysis/yt/commits/fc626db52789/
Changeset:   fc626db52789
Branch:      yt
User:        jzuhone
Date:        2014-10-17 17:57:09+00:00
Summary:     Adding units_override to halo_catalogs
Affected #:  3 files

diff -r 01009ff1117a8e5353446c3185acbf8acd6c8ab0 -r fc626db52789b53da5fc181c22aaa2c3aaef39a6 yt/frontends/halo_catalogs/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalogs/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/data_structures.py
@@ -52,10 +52,11 @@
     _suffix = ".h5"
 
     def __init__(self, filename, dataset_type="halocatalog_hdf5",
-                 n_ref = 16, over_refine_factor = 1):
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(HaloCatalogDataset, self).__init__(filename, dataset_type)
+        super(HaloCatalogDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
 
     def _parse_parameter_file(self):
         with h5py.File(self.parameter_filename, "r") as f:

diff -r 01009ff1117a8e5353446c3185acbf8acd6c8ab0 -r fc626db52789b53da5fc181c22aaa2c3aaef39a6 yt/frontends/halo_catalogs/owls_subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
@@ -113,10 +113,11 @@
     _suffix = ".hdf5"
 
     def __init__(self, filename, dataset_type="subfind_hdf5",
-                 n_ref = 16, over_refine_factor = 1):
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(OWLSSubfindDataset, self).__init__(filename, dataset_type)
+        super(OWLSSubfindDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
 
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")

diff -r 01009ff1117a8e5353446c3185acbf8acd6c8ab0 -r fc626db52789b53da5fc181c22aaa2c3aaef39a6 yt/frontends/halo_catalogs/rockstar/data_structures.py
--- a/yt/frontends/halo_catalogs/rockstar/data_structures.py
+++ b/yt/frontends/halo_catalogs/rockstar/data_structures.py
@@ -56,10 +56,12 @@
     _suffix = ".bin"
 
     def __init__(self, filename, dataset_type="rockstar_binary",
-                 n_ref = 16, over_refine_factor = 1):
+                 n_ref = 16, over_refine_factor = 1,
+                 units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(RockstarDataset, self).__init__(filename, dataset_type)
+        super(RockstarDataset, self).__init__(filename, dataset_type,
+                                              units_override=units_override)
 
     def _parse_parameter_file(self):
         with open(self.parameter_filename, "rb") as f:


https://bitbucket.org/yt_analysis/yt/commits/f6a05a9dd69c/
Changeset:   f6a05a9dd69c
Branch:      yt
User:        jzuhone
Date:        2014-10-17 17:58:31+00:00
Summary:     Adding units_override to the skeleton.
Affected #:  1 file

diff -r fc626db52789b53da5fc181c22aaa2c3aaef39a6 -r f6a05a9dd69c0bd07e3c263b903c3d966db6f84a yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -95,9 +95,12 @@
     _index_class = SkeletonHierarchy
     _field_info_class = SkeletonFieldInfo
     
-    def __init__(self, filename, dataset_type='skeleton'):
+    def __init__(self, filename, dataset_type='skeleton',
+                 storage_filename=None,
+                 units_override=None):
         self.fluid_types += ('skeleton',)
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):


https://bitbucket.org/yt_analysis/yt/commits/4a8cb91c1585/
Changeset:   4a8cb91c1585
Branch:      yt
User:        jzuhone
Date:        2014-10-17 18:00:19+00:00
Summary:     Fixing overflow
Affected #:  1 file

diff -r f6a05a9dd69c0bd07e3c263b903c3d966db6f84a -r 4a8cb91c1585a5fbaa4e3fdaa3503198d66119eb yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -683,7 +683,8 @@
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
         self.storage_filename = storage_filename
-        Dataset.__init__(self, filename, dataset_type, file_style=file_style, units_override=units_override)
+        Dataset.__init__(self, filename, dataset_type, file_style=file_style,
+                         units_override=units_override)
 
     def _setup_1d(self):
         self._index_class = EnzoHierarchy1D


https://bitbucket.org/yt_analysis/yt/commits/56b1aa57a795/
Changeset:   56b1aa57a795
Branch:      yt
User:        jzuhone
Date:        2014-10-17 18:12:42+00:00
Summary:     Adding units_override checks to SPH, but we won't use it because unit_base does the same thing
Affected #:  1 file

diff -r 4a8cb91c1585a5fbaa4e3fdaa3503198d66119eb -r 56b1aa57a79504809cf02a4aacc14afc671a4f50 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,7 +96,8 @@
                  bounding_box = None,
                  header_spec = "default",
                  field_spec = "default",
-                 ptype_spec = "default"):
+                 ptype_spec = "default",
+                 units_override=None):
         if self._instantiated: return
         self._header_spec = self._setup_binary_spec(
             header_spec, gadget_header_specs)
@@ -120,6 +121,9 @@
             self.domain_right_edge = bbox[:,1]
         else:
             self.domain_left_edge = self.domain_right_edge = None
+        if units_override is not None:
+            mylog.warning("units_override is not supported for GadgetDataset, "+
+                          "so it will be ignored. Use unit_base instead.")
         super(GadgetDataset, self).__init__(filename, dataset_type)
 
     def _setup_binary_spec(self, spec, spec_dict):
@@ -269,9 +273,13 @@
     def __init__(self, filename, dataset_type="gadget_hdf5", 
                  unit_base = None, n_ref=64,
                  over_refine_factor=1,
-                 bounding_box = None):
+                 bounding_box = None,
+                 units_override=None):
         self.storage_filename = None
         filename = os.path.abspath(filename)
+        if units_override is not None:
+            mylog.warning("units_override is not supported for GadgetHDF5Dataset, "+
+                          "so it will be ignored. Use unit_base instead.")
         super(GadgetHDF5Dataset, self).__init__(
             filename, dataset_type, unit_base=unit_base, n_ref=n_ref,
             over_refine_factor=over_refine_factor,
@@ -505,7 +513,8 @@
                  unit_base=None,
                  parameter_file=None,
                  cosmology_parameters=None,
-                 n_ref=64, over_refine_factor=1):
+                 n_ref=64, over_refine_factor=1,
+                 units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if field_dtypes is None:
@@ -534,6 +543,9 @@
             parameter_file = os.path.abspath(parameter_file)
         self._param_file = parameter_file
         filename = os.path.abspath(filename)
+        if units_override is not None:
+            mylog.warning("units_override is not supported for TipsyDataset, "+
+                          "so it will be ignored. Use unit_base instead.")
         super(TipsyDataset, self).__init__(filename, dataset_type)
 
     def __repr__(self):


https://bitbucket.org/yt_analysis/yt/commits/e8d2804ad6d2/
Changeset:   e8d2804ad6d2
Branch:      yt
User:        jzuhone
Date:        2014-10-17 18:15:37+00:00
Summary:     A little more information
Affected #:  1 file

diff -r 56b1aa57a79504809cf02a4aacc14afc671a4f50 -r e8d2804ad6d2fb6c23ea1604af443610b1c3009c doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2d88f5fb494c00df08117dfae8184efdce91866616db8e38614cf1c2f44370ad"
+  "signature": "sha256:67eb4b2a3d1017bac09209ebc939e8c1fe154660fa15f76862019dfc8652ec32"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -349,7 +349,31 @@
      "input": [
       "units_override = {\"length_unit\":(1.0,\"Mpc\"),\n",
       "                  \"time_unit\":(1.0,\"Myr\"),\n",
-      "                  \"mass_unit\":(1.0e14,\"Msun\")}\n",
+      "                  \"mass_unit\":(1.0e14,\"Msun\")}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`units_override` can take the following keys:\n",
+      "\n",
+      "* `length_unit`\n",
+      "* `time_unit`\n",
+      "* `mass_unit`\n",
+      "* `magnetic_unit`\n",
+      "* `temperature_unit`\n",
+      "\n",
+      "and the associated values can be (value, unit) tuples, `YTQuantities`, or floats (in the latter case they are assumed to have the corresponding cgs unit). "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
       "ds2 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)\n",
       "print ds2.length_unit\n",
       "print ds2.mass_unit\n",


https://bitbucket.org/yt_analysis/yt/commits/6e44311379a8/
Changeset:   6e44311379a8
Branch:      yt
User:        jzuhone
Date:        2014-10-17 18:25:39+00:00
Summary:     Allow YTQuantities to be passed in as well
Affected #:  1 file

diff -r e8d2804ad6d2fb6c23ea1604af443610b1c3009c -r 6e44311379a8d2b1ba43f3392238027294337aa5 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -695,7 +695,9 @@
                           ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
             val = self.units_override.get("%s_unit" % unit, None)
             if val is not None:
-                if not isinstance(val, tuple):
+                if isinstance(val, YTQuantity):
+                    val = (val.v, str(val.units))
+                elif not isinstance(val, tuple):
                     val = (val, cgs)
                 u = getattr(self, "%s_unit" % unit)
                 mylog.info("Overriding %s_unit: %g %s -> %g %s.", unit, u.v, u.units, val[0], val[1])


https://bitbucket.org/yt_analysis/yt/commits/e5816c401e98/
Changeset:   e5816c401e98
Branch:      yt
User:        jzuhone
Date:        2014-10-17 18:29:11+00:00
Summary:     Really warn them!
Affected #:  1 file

diff -r 6e44311379a8d2b1ba43f3392238027294337aa5 -r e5816c401e98aa4aef29c1681a53319a4e6358f0 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -690,7 +690,7 @@
         else:
             mylog.warning("Overriding code units. This is an experimental and potentially "+
                           "dangerous option that may yield inconsistent results, and must be used "+
-                          "very carefully.")
+                          "very carefully, and only if you know what you want from it.")
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
                           ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
             val = self.units_override.get("%s_unit" % unit, None)


https://bitbucket.org/yt_analysis/yt/commits/f1d1537f02d9/
Changeset:   f1d1537f02d9
Branch:      yt
User:        jzuhone
Date:        2014-10-17 19:37:42+00:00
Summary:     First crack at units_override tests.
Affected #:  4 files

diff -r e5816c401e98aa4aef29c1681a53319a4e6358f0 -r f1d1537f02d9944a7db8ba9dc17be346b8264d6f yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -90,3 +90,8 @@
     ds = data_dir_load(ecp)
     # Now we test our species fields
     yield check_color_conservation(ds)
+
+ at requires_file(enzotiny)
+def test_units_override():
+    for test in units_override_check(enzotiny):
+        yield test

diff -r e5816c401e98aa4aef29c1681a53319a4e6358f0 -r f1d1537f02d9944a7db8ba9dc17be346b8264d6f yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -47,3 +47,8 @@
 @requires_file(wt)
 def test_FLASHDataset():
     assert isinstance(data_dir_load(wt), FLASHDataset)
+
+ at requires_file(sloshing)
+def test_units_override():
+    for test in units_override_check(sloshing):
+        yield test

diff -r e5816c401e98aa4aef29c1681a53319a4e6358f0 -r f1d1537f02d9944a7db8ba9dc17be346b8264d6f yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,6 +26,7 @@
     assert_allclose, assert_raises
 from yt.units.yt_array import uconcatenate
 import yt.fields.api as field_api
+from yt.convenience import load
 
 def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True):
     # We have nan checks in here because occasionally we have fields that get
@@ -321,7 +322,28 @@
             return ftrue
         else:
             return ffalse
-                                        
+
+def units_override_check(fn):
+    units_list = ["length","time","mass","velocity",
+                  "magnetic","temperature"]
+    ds1 = load(fn)
+    units_override = {}
+    attrs1 = []
+    attrs2 = []
+    for u in units_list:
+        unit_attr = getattr(ds1, "%s_unit" % u, None)
+        if unit_attr is not None:
+            attrs1.append(unit_attr)
+            units_override["%s_unit" % u] = (unit_attr.v, str(unit_attr.units))
+    del ds1
+    ds2 = load(fn, units_override=units_override)
+    assert(len(ds2.units_override) > 0)
+    for u in units_list:
+        unit_attr = getattr(ds2, "%s_unit" % u, None)
+        if unit_attr is not None:
+            attrs2.append(unit_attr)
+    yield assert_equal, attrs1, attrs2
+
 # This is an export of the 40 grids in IsolatedGalaxy that are of level 4 or
 # lower.  It's just designed to give a sample AMR index to deal with.
 _amr_grid_index = [

diff -r e5816c401e98aa4aef29c1681a53319a4e6358f0 -r f1d1537f02d9944a7db8ba9dc17be346b8264d6f yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -5,7 +5,6 @@
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
-from IPython import embed
 from yt.units.yt_array import uconcatenate
 
 def setup() :


https://bitbucket.org/yt_analysis/yt/commits/750dc030a35d/
Changeset:   750dc030a35d
Branch:      yt
User:        jzuhone
Date:        2014-10-17 19:47:38+00:00
Summary:     FITS units_override test.
Affected #:  1 file

diff -r f1d1537f02d9944a7db8ba9dc17be346b8264d6f -r 750dc030a35d21cc447eaf628b7ca42c0a892d32 yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -41,3 +41,9 @@
     for test in small_patch_amr(vf, _fields_vels, input_center="c", input_weight="ones"):
         test_velocity_field.__name__ = test.description
         yield test
+
+ at requires_file(vf)
+def test_units_override():
+    for test in units_override_check(vf):
+        yield test
+


https://bitbucket.org/yt_analysis/yt/commits/cda405b45c34/
Changeset:   cda405b45c34
Branch:      yt
User:        jzuhone
Date:        2014-10-17 19:51:18+00:00
Summary:     Fixing up the Athena answer test for units_override
Affected #:  1 file

diff -r 750dc030a35d21cc447eaf628b7ca42c0a892d32 -r cda405b45c3468dff117ebbecf102e7dfa7b4d51 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -43,16 +43,16 @@
         test_blast.__name__ = test.description
         yield test
 
-parameters_stripping = {"time_unit":3.086e14,
-                        "length_unit":8.0236e22,
-                        "mass_unit":9.999e-30*8.0236e22**3}
+uo_stripping = {"time_unit":3.086e14,
+                "length_unit":8.0236e22,
+                "mass_unit":9.999e-30*8.0236e22**3}
 
 _fields_stripping = ("temperature", "density", "specific_scalar[0]")
 
 stripping = "RamPressureStripping/id0/rps.0062.vtk"
 @requires_ds(stripping, big_data=True)
 def test_stripping():
-    ds = data_dir_load(stripping, kwargs={"parameters":parameters_stripping})
+    ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
     yield assert_equal, str(ds), "rps.0062"
     for test in small_patch_amr(stripping, _fields_stripping):
         test_stripping.__name__ = test.description


https://bitbucket.org/yt_analysis/yt/commits/c36fa416bd40/
Changeset:   c36fa416bd40
Branch:      yt
User:        jzuhone
Date:        2014-10-17 20:36:08+00:00
Summary:     This is a nasty, ugly hack but it gets the job done
Affected #:  1 file

diff -r cda405b45c3468dff117ebbecf102e7dfa7b4d51 -r c36fa416bd405858c98c313a8b86d0079fabdb09 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -324,6 +324,7 @@
             return ffalse
 
 def units_override_check(fn):
+    import gc
     units_list = ["length","time","mass","velocity",
                   "magnetic","temperature"]
     ds1 = load(fn)
@@ -336,6 +337,7 @@
             attrs1.append(unit_attr)
             units_override["%s_unit" % u] = (unit_attr.v, str(unit_attr.units))
     del ds1
+    gc.collect()
     ds2 = load(fn, units_override=units_override)
     assert(len(ds2.units_override) > 0)
     for u in units_list:


https://bitbucket.org/yt_analysis/yt/commits/885535621366/
Changeset:   885535621366
Branch:      yt
User:        jzuhone
Date:        2014-10-17 20:59:44+00:00
Summary:     units_override tests for chombo, ART, and ARTIO.
Affected #:  3 files

diff -r c36fa416bd405858c98c313a8b86d0079fabdb09 -r 88553562136601d3dba0a25b515171d53343636a yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -16,7 +16,8 @@
 
 from yt.testing import \
     requires_file, \
-    assert_equal
+    assert_equal, \
+    units_override_check
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     big_patch_amr, \
@@ -48,3 +49,9 @@
 @requires_file(d9p)
 def test_ARTDataset():
     assert isinstance(data_dir_load(d9p), ARTDataset)
+
+ at requires_file(d9p)
+def test_units_override():
+    for test in units_override_check(d9p):
+        yield test
+

diff -r c36fa416bd405858c98c313a8b86d0079fabdb09 -r 88553562136601d3dba0a25b515171d53343636a yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -50,3 +50,8 @@
 @requires_file(sizmbhloz)
 def test_ARTIODataset():
     assert isinstance(data_dir_load(sizmbhloz), ARTIODataset)
+
+ at requires_file(sizmbhloz)
+def test_units_override():
+    for test in units_override_check(sizmbhloz):
+        yield test

diff -r c36fa416bd405858c98c313a8b86d0079fabdb09 -r 88553562136601d3dba0a25b515171d53343636a yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -15,7 +15,8 @@
 
 from yt.testing import \
     requires_file, \
-    assert_equal
+    assert_equal, \
+    units_override_check
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     small_patch_amr, \
@@ -80,3 +81,18 @@
 @requires_file(kho)
 def test_PlutoDataset():
     assert isinstance(data_dir_load(kho), PlutoDataset)
+
+ at requires_file(zp)
+def test_units_override_zp():
+    for test in units_override_check(zp):
+        yield test
+
+ at requires_file(gc)
+def test_units_override_gc():
+    for test in units_override_check(gc):
+        yield test
+
+ at requires_file(kho)
+def test_units_override_kho():
+    for test in units_override_check(kho):
+        yield test
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/c252b248b8e2/
Changeset:   c252b248b8e2
Branch:      yt
User:        jzuhone
Date:        2014-10-17 21:03:02+00:00
Summary:     Adding units_override to orion
Affected #:  2 files

diff -r 88553562136601d3dba0a25b515171d53343636a -r c252b248b8e22e7b8c7c6b9595430b4d06cfbd14 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -723,10 +723,12 @@
                  cparam_filename="inputs",
                  fparam_filename="probin",
                  dataset_type='orion_native',
-                 storage_filename=None):
+                 storage_filename=None,
+                 units_override=None):
 
         BoxlibDataset.__init__(self, output_dir,
-                               cparam_filename, fparam_filename, dataset_type)
+                               cparam_filename, fparam_filename,
+                               dataset_type, units_override=units_override)
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):

diff -r 88553562136601d3dba0a25b515171d53343636a -r c252b248b8e22e7b8c7c6b9595430b4d06cfbd14 yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -47,3 +47,9 @@
 @requires_file(rt)
 def test_OrionDataset():
     assert isinstance(data_dir_load(rt), OrionDataset)
+
+ at requires_file(rt)
+def test_units_override():
+    for test in units_override_check(rt):
+        yield test
+


https://bitbucket.org/yt_analysis/yt/commits/c3b832dfeb9c/
Changeset:   c3b832dfeb9c
Branch:      yt
User:        jzuhone
Date:        2014-10-18 01:19:32+00:00
Summary:     PEP8-isms
Affected #:  1 file

diff -r c252b248b8e22e7b8c7c6b9595430b4d06cfbd14 -r c3b832dfeb9cd2ea09051ece16f71a65e30951ab yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -73,7 +73,7 @@
     def _detect_output_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [("flash", s) for s in self._handle["/unknown names"][:].flat]
-        if ("/particle names" in self._particle_handle) :
+        if ("/particle names" in self._particle_handle):
             self.field_list += [("io", "particle_" + s[0].strip()) for s
                                 in self._particle_handle["/particle names"][:]]
     
@@ -113,10 +113,10 @@
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
-        if self.num_grids > 1 :
+        if self.num_grids > 1:
             np.add.accumulate(self.grid_particle_count.squeeze(),
                               out=self._particle_indices[1:])
-        else :
+        else:
             self._particle_indices[1] = self.grid_particle_count.squeeze()
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
@@ -199,12 +199,12 @@
 
         self.particle_filename = particle_filename
 
-        if self.particle_filename is None :
+        if self.particle_filename is None:
             self._particle_handle = self._handle
-        else :
+        else:
             try:
                 self._particle_handle = HDF5FileHandler(self.particle_filename)
-            except :
+            except:
                 raise IOError(self.particle_filename)
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better
@@ -309,9 +309,9 @@
                     zipover = zip(self._handle[hn][:,'name'],self._handle[hn][:,'value'])
                 for varname, val in zipover:
                     vn = varname.strip()
-                    if hn.startswith("string") :
+                    if hn.startswith("string"):
                         pval = val.strip()
-                    else :
+                    else:
                         pval = val
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.info("{0} {1} overwrites a simulation "
@@ -325,7 +325,7 @@
             nzb = self.parameters["nzb"]
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
-                              for ax in 'xyz'] # FLASH2 only!
+                             for ax in 'xyz'] # FLASH2 only!
         
         # Determine dimensionality
         try:
@@ -341,18 +341,18 @@
 
         self.geometry = self.parameters["geometry"]
         # Determine base grid parameters
-        if 'lrefine_min' in self.parameters.keys() : # PARAMESH
+        if 'lrefine_min' in self.parameters.keys(): # PARAMESH
             nblockx = self.parameters["nblockx"]
             nblocky = self.parameters["nblocky"]
             nblockz = self.parameters["nblockz"]
-        else : # Uniform Grid
+        else: # Uniform Grid
             nblockx = self.parameters["iprocs"]
             nblocky = self.parameters["jprocs"]
             nblockz = self.parameters["kprocs"]
 
         # In case the user wasn't careful
-        if self.dimensionality <= 2 : nblockz = 1
-        if self.dimensionality == 1 : nblocky = 1
+        if self.dimensionality <= 2: nblockz = 1
+        if self.dimensionality == 1: nblocky = 1
 
         # Determine domain boundaries
         self.domain_left_edge = np.array(


https://bitbucket.org/yt_analysis/yt/commits/4f0ec65bc783/
Changeset:   4f0ec65bc783
Branch:      yt
User:        jzuhone
Date:        2014-10-18 01:26:28+00:00
Summary:     MOAB should use the HDF5FileHandler
Affected #:  2 files

diff -r c3b832dfeb9cd2ea09051ece16f71a65e30951ab -r 4f0ec65bc7839d289ec211219b1ca0d102c830a2 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -28,6 +28,7 @@
     io_registry
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.file_handler import HDF5FileHandler
 
 from .fields import MoabFieldInfo, PyneFieldInfo
 
@@ -75,7 +76,7 @@
                          units_override=units_override)
         self.storage_filename = storage_filename
         self.filename = filename
-        self._handle = h5py.File(self.parameter_filename, "r")
+        self._handle = HDF5FileHandler(filename)
 
     def _set_code_unit_attributes(self):
         # Almost everything is regarded as dimensionless in MOAB, so these will

diff -r c3b832dfeb9cd2ea09051ece16f71a65e30951ab -r 4f0ec65bc7839d289ec211219b1ca0d102c830a2 yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -60,3 +60,8 @@
 @requires_file(c5)
 def test_MoabHex8Dataset():
     assert isinstance(data_dir_load(c5), MoabHex8Dataset)
+
+ at requires_file(c5)
+def test_units_override():
+    for test in units_override_check(c5):
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/22a8ea31d3a6/
Changeset:   22a8ea31d3a6
Branch:      yt
User:        jzuhone
Date:        2014-10-18 01:42:10+00:00
Summary:     This reference appears to be no longer necessary. Also adding the RAMSES units_override tests.
Affected #:  2 files

diff -r 4f0ec65bc7839d289ec211219b1ca0d102c830a2 -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -331,7 +331,6 @@
 class RAMSESIndex(OctreeIndex):
 
     def __init__(self, ds, dataset_type='ramses'):
-        self._ds = ds # TODO: Figure out the class composition better!
         self.fluid_field_list = ds._fields_in_file
         self.dataset_type = dataset_type
         self.dataset = weakref.proxy(ds)
@@ -370,12 +369,12 @@
         
 
         # TODO: copy/pasted from DomainFile; needs refactoring!
-        num = os.path.basename(self._ds.parameter_filename).split("."
+        num = os.path.basename(self.dataset.parameter_filename).split("."
                 )[0].split("_")[1]
         testdomain = 1 # Just pick the first domain file to read
         basename = "%s/%%s_%s.out%05i" % (
             os.path.abspath(
-              os.path.dirname(self._ds.parameter_filename)),
+              os.path.dirname(self.dataset.parameter_filename)),
             num, testdomain)
         hydro_fn = basename % "hydro"
         # Do we have a hydro file?
@@ -553,6 +552,7 @@
         self.omega_matter = rheader["omega_m"]
         self.hubble_constant = rheader["H0"] / 100.0 # This is H100
         self.max_level = rheader['levelmax'] - self.min_level
+        f.close()
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 4f0ec65bc7839d289ec211219b1ca0d102c830a2 -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -49,3 +49,8 @@
 @requires_file(output_00080)
 def test_RAMSESDataset():
     assert isinstance(data_dir_load(output_00080), RAMSESDataset)
+
+ at requires_file(output_00080)
+def test_units_override():
+    for test in units_override_check(output_00080):
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/500ca4d5c56f/
Changeset:   500ca4d5c56f
Branch:      yt
User:        jzuhone
Date:        2014-10-18 01:46:33+00:00
Summary:     Merge
Affected #:  21 files

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -987,6 +987,7 @@
 if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+    then
         echo "Installing pure-python readline"
         ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}
     fi

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -302,7 +302,7 @@
 
    .. code-block:: bash
 
-      $ hg pull
+      $ hg pull upstream
       $ hg update -r "remote(tip,'upstream')"
 
 After the above steps, your local repository should be at the tip of
@@ -312,13 +312,13 @@
   [alias]
   myupdate = update -r "remote(tip,'upstream')"
 
-And then you can just issue ``hg myupdate`` to get at the tip of the yt
-branch of the main yt repository.
+And then you can just issue ``hg myupdate`` to get at the tip of the main yt repository.
 
-You can then make changes and ``hg commit`` them.  If you prefer
-working with `bookmarks <http://mercurial.selenic.com/wiki/Bookmarks>`_, you may
-want to make a bookmark before committing your changes, such as
-``hg bookmark mybookmark``.
+Make sure you are on the branch you want to be on, and then you can
+make changes and ``hg commit`` them.  If you prefer working with
+`bookmarks <http://mercurial.selenic.com/wiki/Bookmarks>`_, you may
+want to make a bookmark before committing your changes, such as ``hg
+bookmark mybookmark``.
 
 To push to your fork on BitBucket if you didn't use a bookmark, you issue the following:
 

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,6 +6,12 @@
 import subprocess
 import shutil
 import glob
+
+if sys.version_info < (2, 7):
+    print("yt currently requires Python version 2.7")
+    print("certain features may fail unexpectedly and silently with older versions.")
+    sys.exit(1)
+
 import setuptools
 from distutils.version import StrictVersion
 if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -729,6 +729,7 @@
         events[tblhdu.header["CHANTYPE"]] = dchannel.astype(int)
 
         info = {"ChannelType" : tblhdu.header["CHANTYPE"],
+                "Mission" : tblhdu.header["MISSION"],
                 "Telescope" : tblhdu.header["TELESCOP"],
                 "Instrument" : tblhdu.header["INSTRUME"]}
         
@@ -789,6 +790,8 @@
             parameters["ARF"] = f["/arf"].value
         if "channel_type" in f:
             parameters["ChannelType"] = f["/channel_type"].value
+        if "mission" in f:
+            parameters["Mission"] = f["/mission"].value
         if "telescope" in f:
             parameters["Telescope"] = f["/telescope"].value
         if "instrument" in f:
@@ -831,6 +834,8 @@
             parameters["ARF"] = tblhdu["ARF"]
         if "CHANTYPE" in tblhdu.header:
             parameters["ChannelType"] = tblhdu["CHANTYPE"]
+        if "MISSION" in tblhdu.header:
+            parameters["Mission"] = tblhdu["MISSION"]
         if "TELESCOP" in tblhdu.header:
             parameters["Telescope"] = tblhdu["TELESCOP"]
         if "INSTRUME" in tblhdu.header:
@@ -920,11 +925,13 @@
         tbhdu.header["RADECSYS"] = "FK5"
         tbhdu.header["EQUINOX"] = 2000.0
         if "RMF" in self.parameters:
-            tbhdu.header["RMF"] = self.parameters["RMF"]
+            tbhdu.header["RESPFILE"] = self.parameters["RMF"]
         if "ARF" in self.parameters:
-            tbhdu.header["ARF"] = self.parameters["ARF"]
+            tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
         if "ChannelType" in self.parameters:
             tbhdu.header["CHANTYPE"] = self.parameters["ChannelType"]
+        if "Mission" in self.parameters:
+            tbhdu.header["MISSION"] = self.parameters["Mission"]
         if "Telescope" in self.parameters:
             tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
         if "Instrument" in self.parameters:
@@ -1041,6 +1048,8 @@
             f.create_dataset("/rmf", data=self.parameters["RMF"])
         if "ChannelType" in self.parameters:
             f.create_dataset("/channel_type", data=self.parameters["ChannelType"])
+        if "Mission" in self.parameters:
+            f.create_dataset("/mission", data=self.parameters["Mission"]) 
         if "Telescope" in self.parameters:
             f.create_dataset("/telescope", data=self.parameters["Telescope"])
         if "Instrument" in self.parameters:
@@ -1209,6 +1218,10 @@
                 tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
             else:        
                 tbhdu.header["ANCRFILE"] = "none"
+            if self.parameters.has_key("Mission"):
+                tbhdu.header["MISSION"] = self.parameters["Mission"]
+            else:
+                tbhdu.header["MISSION"] = "none"
             if self.parameters.has_key("Telescope"):
                 tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
             else:

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -333,61 +333,6 @@
     def normal(self):
         return self._norm_vec
 
-    def to_frb(self, width, resolution, height=None,
-               periodic=False):
-        r"""This function returns an ObliqueFixedResolutionBuffer generated
-        from this object.
-
-        An ObliqueFixedResolutionBuffer is an object that accepts a
-        variable-resolution 2D object and transforms it into an NxM bitmap that
-        can be plotted, examined or processed.  This is a convenience function
-        to return an FRB directly from an existing 2D data object.  Unlike the
-        corresponding to_frb function for other YTSelectionContainer2D objects, 
-        this does not accept a 'center' parameter as it is assumed to be 
-        centered at the center of the cutting plane.
-
-        Parameters
-        ----------
-        width : width specifier
-            This can either be a floating point value, in the native domain
-            units of the simulation, or a tuple of the (value, unit) style.
-            This will be the width of the FRB.
-        height : height specifier, optional
-            This will be the height of the FRB, by default it is equal to width.
-        resolution : int or tuple of ints
-            The number of pixels on a side of the final FRB.
-
-        Returns
-        -------
-        frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`
-            A fixed resolution buffer, which can be queried for fields.
-
-        Examples
-        --------
-
-        >>> v, c = ds.find_max("density")
-        >>> sp = ds.sphere(c, (100.0, 'au'))
-        >>> L = sp.quantities.angular_momentum_vector()
-        >>> cutting = ds.cutting(L, c)
-        >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
-        """
-        if iterable(width):
-            w, u = width
-            width = self.ds.quan(w, input_units = u)
-        if height is None:
-            height = width
-        elif iterable(height):
-            h, u = height
-            height = self.ds.quan(w, input_units = u)
-        if not iterable(resolution):
-            resolution = (resolution, resolution)
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
-        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution,
-                    periodic = periodic)
-        return frb
-
     def _generate_container_field(self, field):
         if self._current_chunk is None:
             self.index._identify_base_chunk(self)
@@ -455,7 +400,7 @@
         pw._setup_plots()
         return pw
 
-    def to_frb(self, width, resolution, height=None):
+    def to_frb(self, width, resolution, height=None, periodic=False):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
 
@@ -477,6 +422,9 @@
             This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
+        periodic : boolean
+            This can be true or false, and governs whether the pixelization
+            will span the domain boundaries.
 
         Returns
         -------
@@ -505,7 +453,8 @@
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
         bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
-        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
+        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution,
+                                           periodic=periodic)
         return frb
 
 class YTDiskBase(YTSelectionContainer3D):

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -331,10 +331,10 @@
             mylog.debug("Creating Particle Union 'all'")
             pu = ParticleUnion("all", list(self.particle_types_raw))
             self.add_particle_union(pu)
+        mylog.info("Loading field plugins.")
+        self.field_info.load_all_plugins()
         deps, unloaded = self.field_info.check_derived_fields()
         self.field_dependencies.update(deps)
-        mylog.info("Loading field plugins.")
-        self.field_info.load_all_plugins()
 
     def setup_deprecated_fields(self):
         from yt.fields.field_aliases import _field_name_aliases

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -178,3 +178,4 @@
             # Skip it
             continue
         func(registry, ftype, species, particle_type)
+    add_nuclei_density_fields(registry, ftype, particle_type=particle_type)

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -113,7 +113,8 @@
         self.directory = ds.fullpath
         self._handle = ds._handle
 
-        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        tr = self._handle['Chombo_global'].attrs.get("testReal", "float32")
+            
         self._levels = [key for key in self._handle.keys() if key.startswith('level')]
         GridIndex.__init__(self, ds, dataset_type)
 
@@ -156,12 +157,18 @@
         for key, val in self._handle.attrs.items():
             if key.startswith("particle"):
                 particle_fields.append(val)
-        self.field_list.extend([("io", c) for c in particle_fields])        
+        self.field_list.extend([("io", c) for c in particle_fields])
 
     def _count_grids(self):
         self.num_grids = 0
         for lev in self._levels:
-            self.num_grids += self._handle[lev]['Processors'].len()
+            d = self._handle[lev]
+            if 'Processors' in d:
+                self.num_grids += d['Processors'].len()
+            elif 'boxes' in d:
+                self.num_grids += d['boxes'].len()
+            else:
+                raise RuntimeError("Uknown file specification")
 
     def _parse_index(self):
         f = self._handle # shortcut
@@ -256,18 +263,6 @@
         if D == 2:
             self.dataset_type = 'chombo2d_hdf5'
 
-        # some datasets will not be time-dependent, and to make
-        # matters worse, the simulation time is not always
-        # stored in the same place in the hdf file! Make
-        # sure we handle that here.
-        try:
-            self.current_time = self._handle.attrs['time']
-        except KeyError:
-            try:
-                self.current_time = self._handle['/level_0'].attrs['time']
-            except KeyError:
-                self.current_time = 0.0
-
         self.geometry = "cartesian"
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
@@ -317,6 +312,20 @@
 
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self._determine_periodic()
+        self._determine_current_time()
+
+    def _determine_current_time(self):
+        # some datasets will not be time-dependent, and to make
+        # matters worse, the simulation time is not always
+        # stored in the same place in the hdf file! Make
+        # sure we handle that here.
+        try:
+            self.current_time = self._handle.attrs['time']
+        except KeyError:
+            try:
+                self.current_time = self._handle['/level_0'].attrs['time']
+            except KeyError:
+                self.current_time = 0.0
 
     def _determine_periodic(self):
         # we default to true unless the HDF5 file says otherwise
@@ -502,6 +511,7 @@
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
 
+        self._determine_current_time()
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
@@ -535,7 +545,8 @@
 
         # look for particle fields
         self.particle_filename = self.index_filename[:-4] + 'sink'
-        if not os.path.exists(self.particle_filename): return
+        if not os.path.exists(self.particle_filename):
+            return
         pfield_list = [("io", c) for c in self.io.particle_field_index.keys()]
         self.field_list.extend(pfield_list)
 
@@ -607,6 +618,7 @@
         self.domain_dimensions = self._calc_domain_dimensions()
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self._determine_periodic()
+        self._determine_current_time()
 
     def _parse_inputs_file(self, ini_filename):
         self.fullplotdir = os.path.abspath(self.parameter_filename)

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,6 +25,7 @@
     _dataset_type = "chombo_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
+    _offsets = None
 
     def __init__(self, ds, *args, **kwargs):
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
@@ -32,6 +33,29 @@
         self._handle = ds._handle
         self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
         self._read_ghost_info()
+        if self._offset_string not in self._handle['level_0']:
+            self._calculate_offsets()
+
+    def _calculate_offsets(self):
+        def box_size(corners):
+            size = 1
+            for idim in range(self.dim):
+                size *= (corners[idim+self.dim] - corners[idim] + 1)
+            return size
+
+        self._offsets = {}
+        num_comp = self._handle.attrs['num_components']
+        level = 0
+        while 1:
+            lname = 'level_%i' % level
+            if lname not in self._handle: break
+            boxes = self._handle['level_0']['boxes'].value
+            box_sizes = np.array([box_size(box) for box in boxes])
+
+            offsets = np.cumsum(box_sizes*num_comp, dtype='int64') 
+            offsets -= offsets[0]
+            self._offsets[level] = offsets
+            level += 1
 
     def _read_ghost_info(self):
         try:
@@ -41,7 +65,7 @@
             self.ghost = np.array(self.ghost)
         except KeyError:
             # assume zero ghosts if outputGhosts not present
-            self.ghost = np.zeros(self.dim)
+            self.ghost = np.zeros(self.dim, 'int64')
 
     _field_dict = None
     @property
@@ -80,7 +104,10 @@
         shape = grid.ActiveDimensions + 2*self.ghost
         boxsize = shape.prod()
 
-        grid_offset = lev[self._offset_string][grid._level_id]
+        if self._offsets is not None:
+            grid_offset = self._offsets[grid.Level][grid._level_id]
+        else:
+            grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
@@ -223,7 +250,14 @@
     # Figure out the format of the particle file
     with open(fn, 'r') as f:
         lines = f.readlines()
-    line = lines[1]
+
+    try:
+        line = lines[1]
+    except IndexError:
+        # a particle file exists, but there is only one line,
+        # so no sinks have been created yet.
+        index = {}
+        return index
 
     # The basic fields that all sink particles have
     index = {'particle_mass': 0,

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -20,9 +20,6 @@
     FieldInfoContainer
 from yt.units.yt_array import \
     YTArray
-from yt.fields.species_fields import \
-    add_nuclei_density_fields, \
-    add_species_field_by_density
 from yt.utilities.physical_constants import \
     mh, me, mp, \
     mass_sun_cgs
@@ -152,7 +149,6 @@
         for sp in species_names:
             self.add_species_field(sp)
             self.species_names.append(known_species_names[sp])
-        add_nuclei_density_fields(self, "gas")
 
     def setup_fluid_fields(self):
         # Now we conditionally load a few other things.

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -95,3 +95,13 @@
 def test_units_override():
     for test in units_override_check(enzotiny):
         yield test
+
+ at requires_ds(ecp, big_data=True)
+def test_nuclei_density_fields():
+    ds = data_dir_load(ecp)
+    ad = ds.all_data()
+    yield assert_array_equal, ad["H_nuclei_density"], \
+      (ad["H_number_density"] + ad["H_p1_number_density"])
+    yield assert_array_equal, ad["He_nuclei_density"], \
+      (ad["He_number_density"] + ad["He_p1_number_density"] +
+       ad["He_p2_number_density"])

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -157,6 +157,8 @@
             naxis4 = 1
         for i, fits_file in enumerate(self.dataset._handle._fits_files):
             for j, hdu in enumerate(fits_file):
+                if isinstance(hdu, _astropy.pyfits.BinTableHDU):
+                    continue
                 if self._ensure_same_dims(hdu):
                     units = self._determine_image_units(hdu.header, known_units)
                     try:

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/halo_catalogs/owls_subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
@@ -80,7 +80,7 @@
         # TODO: Add additional fields
         dsl = []
         units = {}
-        for dom in self.data_files[:1]:
+        for dom in self.data_files:
             fl, _units = self.io._identify_fields(dom)
             units.update(_units)
             dom._calculate_offsets(fl)
@@ -209,14 +209,15 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+        need_groups = ['Constants', 'Header', 'Parameters', 'Units', 'FOF']
+        veto_groups = []
+        valid = True
         try:
-            fileh = h5py.File(args[0], mode='r')
-            if "Constants" in fileh["/"].keys() and \
-               "Header" in fileh["/"].keys() and \
-               "SUBFIND" in fileh["/"].keys():
-                fileh.close()
-                return True
-            fileh.close()
+            fh = h5py.File(args[0], mode='r')
+            valid = all(ng in fh["/"] for ng in need_groups) and \
+              not any(vg in fh["/"] for vg in veto_groups)
+            fh.close()
         except:
+            valid = False
             pass
-        return False
+        return valid

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/halo_catalogs/owls_subfind/io.py
--- a/yt/frontends/halo_catalogs/owls_subfind/io.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/io.py
@@ -82,12 +82,13 @@
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]
+                    if pcount == 0: continue
                     coords = f[ptype]["CenterOfMass"].value.astype("float64")
                     coords = np.resize(coords, (pcount, 3))
                     x = coords[:, 0]
                     y = coords[:, 1]
                     z = coords[:, 2]
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
                     for field in field_list:
@@ -113,8 +114,9 @@
                         yield (ptype, field), data
 
     def _initialize_index(self, data_file, regions):
-        pcount = sum(self._count_particles(data_file).values())
+        pcount = sum(data_file.total_particles.values())
         morton = np.empty(pcount, dtype='uint64')
+        if pcount == 0: return morton
         mylog.debug("Initializing index % 5i (% 7i particles)",
                     data_file.file_id, pcount)
         ind = 0
@@ -122,12 +124,11 @@
             if not f.keys(): return None
             dx = np.finfo(f["FOF"]['CenterOfMass'].dtype).eps
             dx = 2.0*self.ds.quan(dx, "code_length")
-            
-            for ptype, pattr in zip(["FOF", "SUBFIND"],
-                                    ["Number_of_groups", "Number_of_subgroups"]):
-                my_pcount = f[ptype].attrs[pattr]
+
+            for ptype in data_file.ds.particle_types_raw:
+                if data_file.total_particles[ptype] == 0: continue
                 pos = f[ptype]["CenterOfMass"].value.astype("float64")
-                pos = np.resize(pos, (my_pcount, 3))
+                pos = np.resize(pos, (data_file.total_particles[ptype], 3))
                 pos = data_file.ds.arr(pos, "code_length")
                 
                 # These are 32 bit numbers, so we give a little lee-way.
@@ -151,17 +152,24 @@
 
     def _count_particles(self, data_file):
         with h5py.File(data_file.filename, "r") as f:
-            # We need this to figure out where the offset fields are stored.
-            data_file.total_offset = f["SUBFIND"].attrs["Number_of_groups"]
-            return {"FOF": f["FOF"].attrs["Number_of_groups"],
-                    "SUBFIND": f["FOF"].attrs["Number_of_subgroups"]}
+            pcount = {"FOF": f["FOF"].attrs["Number_of_groups"]}
+            if "SUBFIND" in f:
+                # We need this to figure out where the offset fields are stored.
+                data_file.total_offset = f["SUBFIND"].attrs["Number_of_groups"]
+                pcount["SUBFIND"] = f["FOF"].attrs["Number_of_subgroups"]
+            else:
+                data_file.total_offset = 0
+                pcount["SUBFIND"] = 0
+            return pcount
 
     def _identify_fields(self, data_file):
-        fields = [(ptype, "particle_identifier")
-                  for ptype in self.ds.particle_types_raw]
+        fields = []
         pcount = data_file.total_particles
+        if sum(pcount.values()) == 0: return fields, {}
         with h5py.File(data_file.filename, "r") as f:
             for ptype in self.ds.particle_types_raw:
+                if data_file.total_particles[ptype] == 0: continue
+                fields.append((ptype, "particle_identifier"))
                 my_fields, my_offset_fields = \
                   subfind_field_list(f[ptype], ptype, data_file.total_particles)
                 fields.extend(my_fields)

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/halo_catalogs/owls_subfind/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/halo_catalogs/owls_subfind/tests/test_outputs.py
@@ -0,0 +1,40 @@
+"""
+OWLSSubfind frontend tests using owls_fof_halos datasets
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    FieldValuesTest, \
+    requires_ds, \
+    data_dir_load
+
+_fields = ("particle_position_x", "particle_position_y",
+           "particle_position_z", "particle_mass")
+
+g8 = "owls_fof_halos/groups_008/group_008.0.hdf5"
+ at requires_ds(g8)
+def test_fields_g8():
+    ds = data_dir_load(g8)
+    yield assert_equal, str(ds), "group_008.0.hdf5"
+    for field in _fields:
+        yield FieldValuesTest(g8, field)
+
+# a dataset with empty files
+g3 = "owls_fof_halos/groups_003/group_003.0.hdf5"
+ at requires_ds(g3)
+def test_fields_g3():
+    ds = data_dir_load(g3)
+    yield assert_equal, str(ds), "group_003.0.hdf5"
+    for field in _fields:
+        yield FieldValuesTest(g3, field)

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -387,7 +387,7 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
         need_groups = ['Constants', 'Header', 'Parameters', 'Units']
-        veto_groups = ['SUBFIND',
+        veto_groups = ['SUBFIND', 'FOF',
                        'PartType0/ChemistryAbundances', 
                        'PartType0/ChemicalAbundances',
                        'RuntimePars', 'HashTable']

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -364,8 +364,10 @@
     _additional_fields = ()
 
     @property
-    def all_fields(self): 
-        fields = list(self._additional_fields) + self[0].keys()
+    def all_fields(self):
+        self_fields = chain.from_iterable(s.keys() for s in self.values())
+        self_fields = list(set(self_fields))
+        fields = list(self._additional_fields) + self_fields
         fields = list(set(fields))
         return fields
 
@@ -471,9 +473,8 @@
             field_units[k] = v.units
             new_data[k] = v.copy().d
         data = new_data
-    elif all([isinstance(val, np.ndarray) for val in data.values()]):
-        field_units = {field:'' for field in data.keys()}
-    elif all([(len(val) == 2) for val in data.values()]):
+    elif all([((not isinstance(val, np.ndarray)) and (len(val) == 2))
+             for val in data.values()]):
         new_data, field_units = {}, {}
         for field in data:
             try:
@@ -489,6 +490,9 @@
                 raise RuntimeError("The data dict appears to be invalid.\n" +
                                    str(e))
         data = new_data
+    elif all([iterable(val) for val in data.values()]):
+        field_units = {field:'' for field in data.keys()}
+        data = dict((field, np.array(val)) for field, val in data.iteritems())
     else:
         raise RuntimeError("The data dict appears to be invalid. "
                            "The data dictionary must map from field "
@@ -763,12 +767,13 @@
     ...          dimensions = [32, 32, 32],
     ...          number_of_particles = 0)
     ... ]
-    ... 
+    ...
     >>> for g in grid_data:
-    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...     g["density"] = np.random.random(g["dimensions"]) * 2**g["level"]
     ...
-    >>> units = dict(Density='g/cm**3')
-    >>> ds = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    >>> units = dict(density='g/cm**3')
+    >>> ds = load_amr_grids(grid_data, [32, 32, 32], field_units=units,
+    ...                     length_unit=1.0)
     """
 
     domain_dimensions = np.array(domain_dimensions)
@@ -822,6 +827,11 @@
     if magnetic_unit is None:
         magnetic_unit = 'code_magnetic'
 
+    particle_types = {}
+
+    for grid in grid_data:
+        particle_types.update(set_particle_types(grid))
+
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
@@ -833,7 +843,7 @@
         sfh,
         field_units,
         (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
-        particle_types=set_particle_types(grid_data[0])
+        particle_types=particle_types
     )
 
     handler.name = "AMRGridData"
@@ -1018,7 +1028,7 @@
     magnetic_unit : float
         Conversion factor from simulation magnetic units to gauss
     bbox : array_like (xdim:zdim, LE:RE), optional
-        Size of computational domain in units sim_unit_to_cm
+        Size of computational domain in units of the length_unit
     sim_time : float, optional
         The simulation time in seconds
     periodicity : tuple of booleans
@@ -1191,10 +1201,8 @@
     coordinates : array_like
         This should be of size (M,3) where M is the number of vertices
         indicated in the connectivity matrix.
-    sim_unit_to_cm : float
-        Conversion factor from simulation units to centimeters
     bbox : array_like (xdim:zdim, LE:RE), optional
-        Size of computational domain in units sim_unit_to_cm
+        Size of computational domain in units of the length unit.
     sim_time : float, optional
         The simulation time in seconds
     mass_unit : string

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -727,9 +727,14 @@
                 if dobj.left_edge[i] < dobj.ds.domain_left_edge[i] or \
                    dobj.right_edge[i] > dobj.ds.domain_right_edge[i]:
                     raise RuntimeError(
-                        "Error: bad Region in non-periodic domain along dimension %s. "
-                        "Region left edge = %s, Region right edge = %s"
-                        "Dataset left edge = %s, Dataset right edge = %s" % \
+                        "Error: yt attempted to read outside the boundaries of "
+                        "a non-periodic domain along dimension %s.\n"
+                        "Region left edge = %s, Region right edge = %s\n"
+                        "Dataset left edge = %s, Dataset right edge = %s\n\n"
+                        "This commonly happens when trying to compute ghost cells "
+                        "up to the domain boundary. Two possible solutions are to "
+                        "load a smaller region that does not border the edge or "
+                        "override the periodicity for this dataset." % \
                         (i, dobj.left_edge[i], dobj.right_edge[i],
                          dobj.ds.domain_left_edge[i], dobj.ds.domain_right_edge[i])
                     )

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -44,29 +44,6 @@
     except: return False
     return True
 
-def ensure_unitless(func):
-    @wraps(func)
-    def wrapped(unit):
-        if unit != Unit():
-            raise RuntimeError(
-                "This operation is only defined for unitless quantities. "
-                "Received unit (%s)" % unit
-                )
-        return func(unit)
-    return wrapped
-
-def ensure_same_dimensions(func):
-    @wraps(func)
-    def wrapped(unit1, unit2):
-        if unit1 is None and not unit2.is_dimensionless:
-            raise RuntimeError
-        elif unit2 is None and not unit1.is_dimensionless:
-            raise RuntimeError
-        elif unit1.dimensions != unit2.dimensions:
-            raise RuntimeError
-        return func(unit1, unit2)
-    return wrapped
-
 def return_arr(func):
     @wraps(func)
     def wrapped(*args, **kwargs):
@@ -84,7 +61,6 @@
 def multiply_units(unit1, unit2):
     return unit1 * unit2
 
- at ensure_same_dimensions
 def preserve_units(unit1, unit2):
     return unit1
 
@@ -106,15 +82,9 @@
 def return_without_unit(unit):
     return None
 
- at ensure_unitless
-def unitless(unit):
-    return Unit()
-
- at ensure_same_dimensions
 def arctan2_unit(unit1, unit2):
     return Unit()
 
- at ensure_same_dimensions
 def comparison_unit(unit1, unit2):
     return None
 
@@ -979,12 +949,7 @@
             u = getattr(context[1][0], 'units', None)
             if u is None:
                 u = Unit()
-            try:
-                unit = self._ufunc_registry[context[0]](u)
-            # Catch the RuntimeError raised inside of ensure_same_dimensions
-            # Raise YTUnitOperationError up here since we know the context now
-            except RuntimeError:
-                raise YTUnitOperationError(context[0], u)
+            unit = self._ufunc_registry[context[0]](u)
             ret_class = type(self)
         elif context[0] in binary_operators:
             oper1 = coerce_iterable_units(context[1][0])
@@ -1014,12 +979,7 @@
                         raise YTUnitOperationError(context[0], unit1, unit2)
                     else:
                         raise YTUfuncUnitError(context[0], unit1, unit2)
-            try:
-                unit = self._ufunc_registry[context[0]](unit1, unit2)
-            # Catch the RuntimeError raised inside of ensure_same_dimensions
-            # Raise YTUnitOperationError up here since we know the context now
-            except RuntimeError:
-                raise YTUnitOperationError(context[0], unit1, unit2)
+            unit = self._ufunc_registry[context[0]](unit1, unit2)
         else:
             raise RuntimeError("Operation is not defined.")
         if unit is None:

diff -r 22a8ea31d3a6bb1973f32a1234a56e8100dd5593 -r 500ca4d5c56fb959eb9c727f2224378f24724f2b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -755,9 +755,10 @@
             hinv = False
             for i, un in enumerate((unit_x, unit_y)):
                 if hasattr(self.ds.coordinates, "default_unit_label"):
-                    axax = getattr(self.ds.coordinates, "%s_axis" % ("xy"[i]))[axis_index]
+                    axax = getattr(self.ds.coordinates,
+                                   "%s_axis" % ("xy"[i]))[axis_index]
                     un = self.ds.coordinates.default_unit_label[axax]
-                    axes_unit_labels[i] = '\/\/('+un+')'
+                    axes_unit_labels[i] = '\/\/\left('+un+'\right)'
                     continue
                 # Use sympy to factor h out of the unit.  In this context 'un'
                 # is a string, so we call the Unit constructor.
@@ -813,7 +814,7 @@
                             1, self.xlim, self.ylim)
                     else:
                         ymin, ymax = [float(y) for y in extenty]
-                    self.plots[f].image.set_extent((xmin,xmax,ymin,ymax))
+                    self.plots[f].image.set_extent((xmin, xmax, ymin, ymax))
                     self.plots[f].axes.set_aspect("auto")
 
             x_label, y_label, colorbar_label = self._get_axes_labels(f)
@@ -843,7 +844,7 @@
                 if units is None or units == '':
                     pass
                 else:
-                    colorbar_label += r'$\/\/('+units+r')$'
+                    colorbar_label += r'$\/\/\left('+units+r'\right)$'
 
             parser = MathTextParser('Agg')
             try:


https://bitbucket.org/yt_analysis/yt/commits/798284bc01f9/
Changeset:   798284bc01f9
Branch:      yt
User:        jzuhone
Date:        2014-10-21 05:06:36+00:00
Summary:     Adding a configuration option to turn off automatic caching of datasets. I'm implementing this to avoid issues with loading a dataset twice in a test, but could have other applications.
Affected #:  4 files

diff -r 500ca4d5c56fb959eb9c727f2224378f24724f2b -r 798284bc01f92f867ea8e1c7b05b7c9dd95fe553 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -95,3 +95,5 @@
   quiet.
 * ``stdoutStreamLogging`` (default: ``'False'``): If true, logging is directed
   to stdout rather than stderr
+* ``skip_dataset_cache`` (default: ``'False'``): If true, automatic caching of datasets
+  is turned off.
\ No newline at end of file

diff -r 500ca4d5c56fb959eb9c727f2224378f24724f2b -r 798284bc01f92f867ea8e1c7b05b7c9dd95fe553 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -39,6 +39,7 @@
     storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoreddatasets = '500',
+    skip_dataset_cache = 'False',
     loadfieldplugins = 'True',
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',

diff -r 500ca4d5c56fb959eb9c727f2224378f24724f2b -r 798284bc01f92f867ea8e1c7b05b7c9dd95fe553 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -134,7 +134,9 @@
             return obj
         apath = os.path.abspath(filename)
         #if not os.path.exists(apath): raise IOError(filename)
-        if apath not in _cached_datasets:
+        if ytcfg.getboolean("yt","skip_dataset_cache"):
+            obj = object.__new__(cls)
+        elif apath not in _cached_datasets:
             obj = object.__new__(cls)
             if obj._skip_cache is False:
                 _cached_datasets[apath] = obj

diff -r 500ca4d5c56fb959eb9c727f2224378f24724f2b -r 798284bc01f92f867ea8e1c7b05b7c9dd95fe553 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -324,7 +324,7 @@
             return ffalse
 
 def units_override_check(fn):
-    import gc
+    ytcfg["yt","skip_dataset_cache"] = "True"
     units_list = ["length","time","mass","velocity",
                   "magnetic","temperature"]
     ds1 = load(fn)
@@ -337,8 +337,8 @@
             attrs1.append(unit_attr)
             units_override["%s_unit" % u] = (unit_attr.v, str(unit_attr.units))
     del ds1
-    gc.collect()
     ds2 = load(fn, units_override=units_override)
+    ytcfg["yt","skip_dataset_cache"] = "False"
     assert(len(ds2.units_override) > 0)
     for u in units_list:
         unit_attr = getattr(ds2, "%s_unit" % u, None)


https://bitbucket.org/yt_analysis/yt/commits/34ab3bf4077c/
Changeset:   34ab3bf4077c
Branch:      yt
User:        jzuhone
Date:        2014-10-21 12:00:27+00:00
Summary:     Removing this from the else block
Affected #:  1 file

diff -r 798284bc01f92f867ea8e1c7b05b7c9dd95fe553 -r 34ab3bf4077c554cdde1f843b7e0f9615bb2b9c8 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -689,10 +689,9 @@
     def _override_code_units(self):
         if len(self.units_override) == 0:
             return
-        else:
-            mylog.warning("Overriding code units. This is an experimental and potentially "+
-                          "dangerous option that may yield inconsistent results, and must be used "+
-                          "very carefully, and only if you know what you want from it.")
+        mylog.warning("Overriding code units. This is an experimental and potentially "+
+                      "dangerous option that may yield inconsistent results, and must be used "+
+                      "very carefully, and only if you know what you want from it.")
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
                           ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
             val = self.units_override.get("%s_unit" % unit, None)


https://bitbucket.org/yt_analysis/yt/commits/42b8e035e8d2/
Changeset:   42b8e035e8d2
Branch:      yt
User:        jzuhone
Date:        2014-10-21 19:54:55+00:00
Summary:     Throw a RuntimeError instead of warn for attempting to use units_override with (these) SPH frontends.
Affected #:  1 file

diff -r 34ab3bf4077c554cdde1f843b7e0f9615bb2b9c8 -r 42b8e035e8d24c8b1cea776e6dd33fc5fd0a886e yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -122,8 +122,8 @@
         else:
             self.domain_left_edge = self.domain_right_edge = None
         if units_override is not None:
-            mylog.warning("units_override is not supported for GadgetDataset, "+
-                          "so it will be ignored. Use unit_base instead.")
+            raise RuntimeError("units_override is not supported for GadgetDataset. "+
+                               "Use unit_base instead.")
         super(GadgetDataset, self).__init__(filename, dataset_type)
 
     def _setup_binary_spec(self, spec, spec_dict):
@@ -278,8 +278,8 @@
         self.storage_filename = None
         filename = os.path.abspath(filename)
         if units_override is not None:
-            mylog.warning("units_override is not supported for GadgetHDF5Dataset, "+
-                          "so it will be ignored. Use unit_base instead.")
+            raise RuntimeError("units_override is not supported for GadgetHDF5Dataset. "+
+                               "Use unit_base instead.")
         super(GadgetHDF5Dataset, self).__init__(
             filename, dataset_type, unit_base=unit_base, n_ref=n_ref,
             over_refine_factor=over_refine_factor,
@@ -544,8 +544,8 @@
         self._param_file = parameter_file
         filename = os.path.abspath(filename)
         if units_override is not None:
-            mylog.warning("units_override is not supported for TipsyDataset, "+
-                          "so it will be ignored. Use unit_base instead.")
+            raise RuntimeError("units_override is not supported for TipsyDataset. "+
+                               "Use unit_base instead.")
         super(TipsyDataset, self).__init__(filename, dataset_type)
 
     def __repr__(self):


https://bitbucket.org/yt_analysis/yt/commits/840185d54851/
Changeset:   840185d54851
Branch:      yt
User:        ngoldbaum
Date:        2014-10-24 18:26:35+00:00
Summary:     Merged in jzuhone/yt-3.x (pull request #1236)

[APICHANGE] Overriding code units
Affected #:  36 files

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8ba193cc3867e2185133bbf3952bd5834e6c63993208635c71cf55fa6f27b491"
+  "signature": "sha256:67eb4b2a3d1017bac09209ebc939e8c1fe154660fa15f76862019dfc8652ec32"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -305,9 +305,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Overriding Code Unit Definitions"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "On occasion, you might have a dataset for a supported frontend that does not have the conversions to code units accessible (for example, Athena data) or you may want to change them outright. `yt` provides a mechanism so that one may provide their own code unit definitions to `load`, which override the default rules for a given frontend for defining code units. This is provided through the `units_override` dictionary. We'll use an example of an Athena dataset. First, a call to `load` without `units_override`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds1 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\")\n",
+      "print ds1.length_unit\n",
+      "print ds1.mass_unit\n",
+      "print ds1.time_unit\n",
+      "sp1 = ds1.sphere(\"c\",(0.1,\"unitary\"))\n",
+      "print sp1[\"density\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This is a galaxy cluster dataset, so it is not likely that the units of density are correct. We happen to know that the unit definitions are different, so we can override the units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "units_override = {\"length_unit\":(1.0,\"Mpc\"),\n",
+      "                  \"time_unit\":(1.0,\"Myr\"),\n",
+      "                  \"mass_unit\":(1.0e14,\"Msun\")}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`units_override` can take the following keys:\n",
+      "\n",
+      "* `length_unit`\n",
+      "* `time_unit`\n",
+      "* `mass_unit`\n",
+      "* `magnetic_unit`\n",
+      "* `temperature_unit`\n",
+      "\n",
+      "and the associated values can be (value, unit) tuples, `YTQuantities`, or floats (in the latter case they are assumed to have the corresponding cgs unit). "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds2 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)\n",
+      "print ds2.length_unit\n",
+      "print ds2.mass_unit\n",
+      "print ds2.time_unit\n",
+      "sp2 = ds2.sphere(\"c\",(0.1,\"unitary\"))\n",
+      "print sp2[\"density\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This option should be used very carefully, and *only* if you know that the dataset does not provide units or that the unit definitions generated are incorrect for some reason. "
+     ]
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -113,29 +113,56 @@
 
 yt works in cgs ("Gaussian") units by default, but Athena data is not
 normally stored in these units. If you would like to convert data to
-cgs units, you may supply conversions for length, time, and mass to ``load``:
+cgs units, you may supply conversions for length, time, and mass to ``load`` using
+the ``units_override`` functionality:
 
 .. code-block:: python
 
    import yt
-   ds = yt.load("id0/cluster_merger.0250.vtk",
-                parameters={"length_unit":(1.0,"Mpc"),
-                            "time_unit"(1.0,"Myr"),
-                            "mass_unit":(1.0e14,"Msun")})
+
+   units_override = {"length_unit":(1.0,"Mpc"),
+                     "time_unit"(1.0,"Myr"),
+                     "mass_unit":(1.0e14,"Msun")}
+
+   ds = yt.load("id0/cluster_merger.0250.vtk", units_override=units_override)
 
 This means that the yt fields, e.g. ``("gas","density")``, ``("gas","x-velocity")``,
 ``("gas","magnetic_field_x")``, will be in cgs units, but the Athena fields, e.g.,
 ``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
 in code units.
 
+Alternative values for the following simulation parameters may be specified using a ``parameters``
+dict, accepting the following keys:
+
+* ``Gamma``: ratio of specific heats, Type: Float
+* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or ``"cylindrical"``
+* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values corresponding to each dimension
+
+.. code-block:: python
+
+   import yt
+
+   parameters = {"gamma":4./3., "geometry":"cylindrical", "periodicity":(False,False,False)}
+
+   ds = yt.load("relativistic_jet_0000.vtk", parameters=parameters)
+
 .. rubric:: Caveats
 
 * yt primarily works with primitive variables. If the Athena
   dataset contains conservative variables, the yt primitive fields will be generated from the
   conserved variables on disk.
+* Special relativistic datasets may be loaded, but are not fully supported. In particular, the relationships between
+  quantities such as pressure and thermal energy will be incorrect, as it is currently assumed that their relationship
+  is that of an ideal a :math:`\gamma`-law equation of state.
 * Domains may be visualized assuming periodicity.
 * Particle list data is currently unsupported.
 
+.. note::
+
+   The old behavior of supplying unit conversions using a ``parameters``
+   dict supplied to ``load`` for Athena datasets is still supported, but is being deprecated in
+   favor of ``units_override``, which provides the same functionality.
+
 .. _loading-orion-data:
 
 BoxLib Data

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -95,3 +95,5 @@
   quiet.
 * ``stdoutStreamLogging`` (default: ``'False'``): If true, logging is directed
   to stdout rather than stderr
+* ``skip_dataset_cache`` (default: ``'False'``): If true, automatic caching of datasets
+  is turned off.
\ No newline at end of file

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -39,6 +39,7 @@
     storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoreddatasets = '500',
+    skip_dataset_cache = 'False',
     loadfieldplugins = 'True',
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -134,7 +134,9 @@
             return obj
         apath = os.path.abspath(filename)
         #if not os.path.exists(apath): raise IOError(filename)
-        if apath not in _cached_datasets:
+        if ytcfg.getboolean("yt","skip_dataset_cache"):
+            obj = object.__new__(cls)
+        elif apath not in _cached_datasets:
             obj = object.__new__(cls)
             if obj._skip_cache is False:
                 _cached_datasets[apath] = obj
@@ -142,7 +144,7 @@
             obj = _cached_datasets[apath]
         return obj
 
-    def __init__(self, filename, dataset_type=None, file_style=None):
+    def __init__(self, filename, dataset_type=None, file_style=None, units_override=None):
         """
         Base class for generating new output types.  Principally consists of
         a *filename* and a *dataset_type* which will be passed on to children.
@@ -157,6 +159,9 @@
         self.known_filters = self.known_filters or {}
         self.particle_unions = self.particle_unions or {}
         self.field_units = self.field_units or {}
+        if units_override is None:
+            units_override = {}
+        self.units_override = units_override
 
         # path stuff
         self.parameter_filename = str(filename)
@@ -667,6 +672,8 @@
 
     def set_code_units(self):
         self._set_code_unit_attributes()
+        # here we override units, if overrides have been provided.
+        self._override_code_units()
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
@@ -679,6 +686,24 @@
             self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
                                    DW.units.dimensions)
 
+    def _override_code_units(self):
+        if len(self.units_override) == 0:
+            return
+        mylog.warning("Overriding code units. This is an experimental and potentially "+
+                      "dangerous option that may yield inconsistent results, and must be used "+
+                      "very carefully, and only if you know what you want from it.")
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
+                          ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
+            val = self.units_override.get("%s_unit" % unit, None)
+            if val is not None:
+                if isinstance(val, YTQuantity):
+                    val = (val.v, str(val.units))
+                elif not isinstance(val, tuple):
+                    val = (val, cgs)
+                u = getattr(self, "%s_unit" % unit)
+                mylog.info("Overriding %s_unit: %g %s -> %g %s.", unit, u.v, u.units, val[0], val[1])
+                setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
+
     _arr = None
     @property
     def arr(self):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -55,7 +55,7 @@
     def _plasma_beta(field,data):
         """This assumes that your front end has provided Bx, By, Bz in
         units of Gauss. If you use MKS, make sure to write your own
-        PlasmaBeta field to deal with non-unitary \mu_0.
+        plasma_beta field to deal with non-unitary \mu_0.
         """
         return data[ftype,'pressure']/data[ftype,'magnetic_energy']
     registry.add_field((ftype, "plasma_beta"),
@@ -69,6 +69,10 @@
              units="erg / cm**3")
 
     def _magnetic_field_strength(field,data):
+        """This assumes that your front end has provided Bx, By, Bz in
+        units of Gauss. If you use MKS, make sure to write your own
+        PlasmaBeta field to deal with non-unitary \mu_0.
+        """
         return np.sqrt(8.*np.pi*data[ftype,"magnetic_energy"])
     registry.add_field((ftype,"magnetic_field_strength"),
                        function=_magnetic_field_strength,
@@ -110,3 +114,17 @@
              units="gauss",
              validators=[ValidateParameter("normal")])
 
+    def _alfven_speed(field,data):
+        """This assumes that your front end has provided Bx, By, Bz in
+        units of Gauss. If you use MKS, make sure to write your own
+        alfven_speed field to deal with non-unitary \mu_0.
+        """
+        return data[ftype,'magnetic_field_strength']/np.sqrt(4.*np.pi*data[ftype,'density'])
+    registry.add_field((ftype, "alfven_speed"), function=_alfven_speed,
+                       units="cm/s")
+
+    def _mach_alfven(field,data):
+        return data[ftype,'velocity_magnitude']/data[ftype,'alfven_speed']
+    registry.add_field((ftype, "mach_alfven"), function=_mach_alfven,
+                       units="dimensionless")
+

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -85,9 +85,12 @@
     _index_class = SkeletonHierarchy
     _field_info_class = SkeletonFieldInfo
     
-    def __init__(self, filename, dataset_type='skeleton'):
+    def __init__(self, filename, dataset_type='skeleton',
+                 storage_filename=None,
+                 units_override=None):
         self.fluid_types += ('skeleton',)
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -166,7 +166,8 @@
                  skip_particles=False, skip_stars=False,
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
-                 file_particle_data=None, file_particle_stars=None):
+                 file_particle_data=None, file_particle_stars=None,
+                 units_override=None):
         self.fluid_types += ("art", )
         if fields is None:
             fields = fluid_fields
@@ -186,7 +187,8 @@
         self.spread_age = spread_age
         self.domain_left_edge = np.zeros(3, dtype='float')
         self.domain_right_edge = np.zeros(3, dtype='float')+1.0
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _find_files(self, file_amr):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -16,7 +16,8 @@
 
 from yt.testing import \
     requires_file, \
-    assert_equal
+    assert_equal, \
+    units_override_check
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     big_patch_amr, \
@@ -48,3 +49,9 @@
 @requires_file(d9p)
 def test_ARTDataset():
     assert isinstance(data_dir_load(d9p), ARTDataset)
+
+ at requires_file(d9p)
+def test_units_override():
+    for test in units_override_check(d9p):
+        yield test
+

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -314,7 +314,8 @@
     _field_info_class = ARTIOFieldInfo
 
     def __init__(self, filename, dataset_type='artio',
-                 storage_filename=None, max_range = 1024):
+                 storage_filename=None, max_range = 1024,
+                 units_override=None):
         if self._handle is not None:
             return
         self.max_range = max_range
@@ -324,7 +325,8 @@
         self._handle = artio_fileset(self._fileset_prefix)
         self.artio_parameters = self._handle.parameters
         # Here we want to initiate a traceback, if the reader is not built.
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -50,3 +50,8 @@
 @requires_file(sizmbhloz)
 def test_ARTIODataset():
     assert isinstance(data_dir_load(sizmbhloz), ARTIODataset)
+
+ at requires_file(sizmbhloz)
+def test_units_override():
+    for test in units_override_check(sizmbhloz):
+        yield test

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -285,7 +285,8 @@
 
         # Need to reset the units in the dataset based on the correct
         # domain left/right/dimensions.
-        self.dataset._set_code_unit_attributes()
+        # DEV: Is this really necessary?
+        #self.dataset._set_code_unit_attributes()
 
         if self.dataset.dimensionality <= 2 :
             self.dataset.domain_dimensions[2] = np.int(1)
@@ -352,12 +353,24 @@
     _dataset_type = "athena"
 
     def __init__(self, filename, dataset_type='athena',
-                 storage_filename=None, parameters=None):
+                 storage_filename=None, parameters=None,
+                 units_override=None):
         self.fluid_types += ("athena",)
         if parameters is None:
             parameters = {}
         self.specified_parameters = parameters
-        Dataset.__init__(self, filename, dataset_type)
+        if units_override is None:
+            units_override = {}
+        # This is for backwards-compatibility
+        already_warned = False
+        for k,v in self.specified_parameters.items():
+            if k.endswith("_unit") and k not in units_override:
+                if not already_warned:
+                    mylog.warning("Supplying unit conversions from the parameters dict is deprecated, "+
+                                  "and will be removed in a future release. Use units_override instead.")
+                    already_warned = True
+                units_override[k] = self.specified_parameters.pop(k)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.filename = filename
         if storage_filename is None:
             storage_filename = '%s.yt' % filename.split('/')[-1]
@@ -372,23 +385,21 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
+        if "length_unit" not in self.units_override:
+            self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            val = self.specified_parameters.get("%s_unit" % unit, None)
-            if val is None:
-                if unit == "length": self.no_cgs_equiv_length = True
-                mylog.warning("No %s conversion to cgs provided.  " +
-                              "Assuming 1.0 = 1.0 %s", unit, cgs)
-                val = 1.0
-            if not isinstance(val, tuple):
-                val = (val, cgs)
-            setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
-        self.velocity_unit = self.length_unit/self.time_unit
-        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
-                                  (self.time_unit**2 * self.length_unit))
-        self.magnetic_unit.convert_to_units("gauss")
+            # We set these to cgs for now, but they may be overridden later.
+            mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
+            setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
 
     def set_code_units(self):
         super(AthenaDataset, self).set_code_units()
+        mag_unit = getattr(self, "magnetic_unit", None)
+        if mag_unit is None:
+            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                         (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit.convert_to_units("gauss")
+
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
 
     def _parse_parameter_file(self):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -43,16 +43,16 @@
         test_blast.__name__ = test.description
         yield test
 
-parameters_stripping = {"time_unit":3.086e14,
-                        "length_unit":8.0236e22,
-                        "mass_unit":9.999e-30*8.0236e22**3}
+uo_stripping = {"time_unit":3.086e14,
+                "length_unit":8.0236e22,
+                "mass_unit":9.999e-30*8.0236e22**3}
 
 _fields_stripping = ("temperature", "density", "specific_scalar[0]")
 
 stripping = "RamPressureStripping/id0/rps.0062.vtk"
 @requires_ds(stripping, big_data=True)
 def test_stripping():
-    ds = data_dir_load(stripping, kwargs={"parameters":parameters_stripping})
+    ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
     yield assert_equal, str(ds), "rps.0062"
     for test in small_patch_amr(stripping, _fields_stripping):
         test_stripping.__name__ = test.description

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -366,7 +366,8 @@
                  cparam_filename="inputs",
                  fparam_filename="probin",
                  dataset_type='boxlib_native',
-                 storage_filename=None):
+                 storage_filename=None,
+                 units_override=None):
         """
         The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -380,7 +381,8 @@
         self.fparam_filename = self._localize_check(fparam_filename)
         self.storage_filename = storage_filename
 
-        Dataset.__init__(self, output_dir, dataset_type)
+        Dataset.__init__(self, output_dir, dataset_type,
+                         units_override=units_override)
 
         # These are still used in a few places.
         if "HydroMethod" not in self.parameters.keys():
@@ -721,10 +723,12 @@
                  cparam_filename="inputs",
                  fparam_filename="probin",
                  dataset_type='orion_native',
-                 storage_filename=None):
+                 storage_filename=None,
+                 units_override=None):
 
         BoxlibDataset.__init__(self, output_dir,
-                               cparam_filename, fparam_filename, dataset_type)
+                               cparam_filename, fparam_filename,
+                               dataset_type, units_override=units_override)
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -47,3 +47,9 @@
 @requires_file(rt)
 def test_OrionDataset():
     assert isinstance(data_dir_load(rt), OrionDataset)
+
+ at requires_file(rt)
+def test_units_override():
+    for test in units_override_check(rt):
+        yield test
+

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -250,7 +250,8 @@
     _field_info_class = ChomboFieldInfo
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
-                 storage_filename = None, ini_filename = None):
+                 storage_filename = None, ini_filename = None,
+                 units_override=None):
         self.fluid_types += ("chombo",)
         self._handle = HDF5FileHandler(filename)
         self.dataset_type = dataset_type
@@ -265,7 +266,8 @@
         self.geometry = "cartesian"
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
-        Dataset.__init__(self,filename, self.dataset_type)
+        Dataset.__init__(self,filename, self.dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
         self.cosmological_simulation = False
 
@@ -454,10 +456,12 @@
     _field_info_class = PlutoFieldInfo
 
     def __init__(self, filename, dataset_type='pluto_chombo_native',
-                 storage_filename = None, ini_filename = None):
+                 storage_filename = None, ini_filename = None,
+                 units_override=None):
 
         ChomboDataset.__init__(self, filename, dataset_type, 
-                    storage_filename, ini_filename)
+                               storage_filename, ini_filename,
+                               units_override=units_override)
 
     def _parse_parameter_file(self):
         """
@@ -586,10 +590,12 @@
     _field_info_class = Orion2FieldInfo
 
     def __init__(self, filename, dataset_type='orion_chombo_native',
-                 storage_filename = None, ini_filename = None):
+                 storage_filename = None, ini_filename = None,
+                 units_override=None):
 
         ChomboDataset.__init__(self, filename, dataset_type,
-                    storage_filename, ini_filename)
+                               storage_filename, ini_filename,
+                               units_override=units_override)
 
     def _parse_parameter_file(self):
         """
@@ -672,10 +678,12 @@
     _field_info_class = ChomboPICFieldInfo3D
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
-                 storage_filename=None, ini_filename=None):
+                 storage_filename=None, ini_filename=None,
+                 units_override=None):
 
         ChomboDataset.__init__(self, filename, dataset_type,
-                               storage_filename, ini_filename)
+                               storage_filename, ini_filename,
+                               units_override=units_override)
 
         if self.dimensionality == 1:
             self._field_info_class = ChomboPICFieldInfo1D

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -15,7 +15,8 @@
 
 from yt.testing import \
     requires_file, \
-    assert_equal
+    assert_equal, \
+    units_override_check
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     small_patch_amr, \
@@ -80,3 +81,18 @@
 @requires_file(kho)
 def test_PlutoDataset():
     assert isinstance(data_dir_load(kho), PlutoDataset)
+
+ at requires_file(zp)
+def test_units_override_zp():
+    for test in units_override_check(zp):
+        yield test
+
+ at requires_file(gc)
+def test_units_override_gc():
+    for test in units_override_check(gc):
+        yield test
+
+ at requires_file(kho)
+def test_units_override_kho():
+    for test in units_override_check(kho):
+        yield test
\ No newline at end of file

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -665,7 +665,8 @@
                  file_style = None,
                  parameter_override = None,
                  conversion_override = None,
-                 storage_filename = None):
+                 storage_filename = None,
+                 units_override=None):
         """
         This class is a stripped down class that simply reads and parses
         *filename* without looking at the index.  *dataset_type* gets passed
@@ -682,8 +683,8 @@
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
         self.storage_filename = storage_filename
-
-        Dataset.__init__(self, filename, dataset_type, file_style=file_style)
+        Dataset.__init__(self, filename, dataset_type, file_style=file_style,
+                         units_override=units_override)
 
     def _setup_1d(self):
         self._index_class = EnzoHierarchy1D
@@ -926,6 +927,8 @@
         magnetic_unit = np.float64(magnetic_unit.in_cgs())
         self.magnetic_unit = self.quan(magnetic_unit, "gauss")
 
+        self._override_code_units()
+
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -91,6 +91,11 @@
     # Now we test our species fields
     yield check_color_conservation(ds)
 
+ at requires_file(enzotiny)
+def test_units_override():
+    for test in units_override_check(enzotiny):
+        yield test
+
 @requires_ds(ecp, big_data=True)
 def test_nuclei_density_fields():
     ds = data_dir_load(ecp)

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -313,17 +313,18 @@
     _handle = None
 
     def __init__(self, filename,
-                 dataset_type = 'fits',
-                 auxiliary_files = [],
-                 nprocs = None,
-                 storage_filename = None,
-                 nan_mask = None,
-                 spectral_factor = 1.0,
-                 z_axis_decomp = False,
-                 line_database = None,
-                 line_width = None,
-                 suppress_astropy_warnings = True,
-                 parameters = None):
+                 dataset_type='fits',
+                 auxiliary_files=[],
+                 nprocs=None,
+                 storage_filename=None,
+                 nan_mask=None,
+                 spectral_factor=1.0,
+                 z_axis_decomp=False,
+                 line_database=None,
+                 line_width=None,
+                 suppress_astropy_warnings=True,
+                 parameters=None,
+                 units_override=None):
 
         if parameters is None:
             parameters = {}
@@ -434,7 +435,7 @@
 
         self.refine_by = 2
 
-        Dataset.__init__(self, fn, dataset_type)
+        Dataset.__init__(self, fn, dataset_type, units_override=units_override)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -41,3 +41,9 @@
     for test in small_patch_amr(vf, _fields_vels, input_center="c", input_weight="ones"):
         test_velocity_field.__name__ = test.description
         yield test
+
+ at requires_file(vf)
+def test_units_override():
+    for test in units_override_check(vf):
+        yield test
+

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -73,7 +73,7 @@
     def _detect_output_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [("flash", s) for s in self._handle["/unknown names"][:].flat]
-        if ("/particle names" in self._particle_handle) :
+        if ("/particle names" in self._particle_handle):
             self.field_list += [("io", "particle_" + s[0].strip()) for s
                                 in self._particle_handle["/particle names"][:]]
     
@@ -113,10 +113,10 @@
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
-        if self.num_grids > 1 :
+        if self.num_grids > 1:
             np.add.accumulate(self.grid_particle_count.squeeze(),
                               out=self._particle_indices[1:])
-        else :
+        else:
             self._particle_indices[1] = self.grid_particle_count.squeeze()
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
@@ -191,29 +191,27 @@
     def __init__(self, filename, dataset_type='flash_hdf5',
                  storage_filename = None,
                  particle_filename = None, 
-                 conversion_override = None):
+                 units_override = None):
 
         self.fluid_types += ("flash",)
         if self._handle is not None: return
         self._handle = HDF5FileHandler(filename)
-        if conversion_override is None: conversion_override = {}
-        self._conversion_override = conversion_override
-        
+
         self.particle_filename = particle_filename
 
-        if self.particle_filename is None :
+        if self.particle_filename is None:
             self._particle_handle = self._handle
-        else :
+        else:
             try:
                 self._particle_handle = HDF5FileHandler(self.particle_filename)
-            except :
+            except:
                 raise IOError(self.particle_filename)
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better
         # generalization.
         self.refine_by = 2
 
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.storage_filename = storage_filename
 
         self.parameters["HydroMethod"] = 'flash' # always PPM DE
@@ -311,9 +309,9 @@
                     zipover = zip(self._handle[hn][:,'name'],self._handle[hn][:,'value'])
                 for varname, val in zipover:
                     vn = varname.strip()
-                    if hn.startswith("string") :
+                    if hn.startswith("string"):
                         pval = val.strip()
-                    else :
+                    else:
                         pval = val
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.info("{0} {1} overwrites a simulation "
@@ -327,7 +325,7 @@
             nzb = self.parameters["nzb"]
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
-                              for ax in 'xyz'] # FLASH2 only!
+                             for ax in 'xyz'] # FLASH2 only!
         
         # Determine dimensionality
         try:
@@ -343,18 +341,18 @@
 
         self.geometry = self.parameters["geometry"]
         # Determine base grid parameters
-        if 'lrefine_min' in self.parameters.keys() : # PARAMESH
+        if 'lrefine_min' in self.parameters.keys(): # PARAMESH
             nblockx = self.parameters["nblockx"]
             nblocky = self.parameters["nblocky"]
             nblockz = self.parameters["nblockz"]
-        else : # Uniform Grid
+        else: # Uniform Grid
             nblockx = self.parameters["iprocs"]
             nblocky = self.parameters["jprocs"]
             nblockz = self.parameters["kprocs"]
 
         # In case the user wasn't careful
-        if self.dimensionality <= 2 : nblockz = 1
-        if self.dimensionality == 1 : nblocky = 1
+        if self.dimensionality <= 2: nblockz = 1
+        if self.dimensionality == 1: nblocky = 1
 
         # Determine domain boundaries
         self.domain_left_edge = np.array(

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -47,3 +47,8 @@
 @requires_file(wt)
 def test_FLASHDataset():
     assert isinstance(data_dir_load(wt), FLASHDataset)
+
+ at requires_file(sloshing)
+def test_units_override():
+    for test in units_override_check(sloshing):
+        yield test

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -172,10 +172,11 @@
     _field_info_class = GDFFieldInfo
 
     def __init__(self, filename, dataset_type='grid_data_format',
-                 storage_filename=None, geometry=None):
+                 storage_filename=None, geometry=None,
+                 units_override=None):
         self.geometry = geometry
         self.fluid_types += ("gdf",)
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.storage_filename = storage_filename
         self.filename = filename
 

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/halo_catalogs/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalogs/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/data_structures.py
@@ -52,10 +52,11 @@
     _suffix = ".h5"
 
     def __init__(self, filename, dataset_type="halocatalog_hdf5",
-                 n_ref = 16, over_refine_factor = 1):
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(HaloCatalogDataset, self).__init__(filename, dataset_type)
+        super(HaloCatalogDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
 
     def _parse_parameter_file(self):
         with h5py.File(self.parameter_filename, "r") as f:

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/halo_catalogs/owls_subfind/data_structures.py
--- a/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/data_structures.py
@@ -113,10 +113,11 @@
     _suffix = ".hdf5"
 
     def __init__(self, filename, dataset_type="subfind_hdf5",
-                 n_ref = 16, over_refine_factor = 1):
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(OWLSSubfindDataset, self).__init__(filename, dataset_type)
+        super(OWLSSubfindDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
 
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/halo_catalogs/rockstar/data_structures.py
--- a/yt/frontends/halo_catalogs/rockstar/data_structures.py
+++ b/yt/frontends/halo_catalogs/rockstar/data_structures.py
@@ -56,10 +56,12 @@
     _suffix = ".bin"
 
     def __init__(self, filename, dataset_type="rockstar_binary",
-                 n_ref = 16, over_refine_factor = 1):
+                 n_ref = 16, over_refine_factor = 1,
+                 units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(RockstarDataset, self).__init__(filename, dataset_type)
+        super(RockstarDataset, self).__init__(filename, dataset_type,
+                                              units_override=units_override)
 
     def _parse_parameter_file(self):
         with open(self.parameter_filename, "rb") as f:

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -28,6 +28,7 @@
     io_registry
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.file_handler import HDF5FileHandler
 
 from .fields import MoabFieldInfo, PyneFieldInfo
 
@@ -69,12 +70,13 @@
     periodicity = (False, False, False)
 
     def __init__(self, filename, dataset_type='moab_hex8',
-                 storage_filename = None):
+                 storage_filename = None, units_override=None):
         self.fluid_types += ("moab",)
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
         self.filename = filename
-        self._handle = h5py.File(self.parameter_filename, "r")
+        self._handle = HDF5FileHandler(filename)
 
     def _set_code_unit_attributes(self):
         # Almost everything is regarded as dimensionless in MOAB, so these will
@@ -147,11 +149,12 @@
     periodicity = (False, False, False)
 
     def __init__(self, pyne_mesh, dataset_type='moab_hex8_pyne',
-                 storage_filename = None):
+                 storage_filename = None, units_override=None):
         self.fluid_types += ("pyne",)
         filename = "pyne_mesh_" + str(id(pyne_mesh))
         self.pyne_mesh = pyne_mesh
-        Dataset.__init__(self, str(filename), dataset_type)
+        Dataset.__init__(self, str(filename), dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
         self.filename = filename
 

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -60,3 +60,8 @@
 @requires_file(c5)
 def test_MoabHex8Dataset():
     assert isinstance(data_dir_load(c5), MoabHex8Dataset)
+
+ at requires_file(c5)
+def test_units_override():
+    for test in units_override_check(c5):
+        yield test

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -331,7 +331,6 @@
 class RAMSESIndex(OctreeIndex):
 
     def __init__(self, ds, dataset_type='ramses'):
-        self._ds = ds # TODO: Figure out the class composition better!
         self.fluid_field_list = ds._fields_in_file
         self.dataset_type = dataset_type
         self.dataset = weakref.proxy(ds)
@@ -370,12 +369,12 @@
         
 
         # TODO: copy/pasted from DomainFile; needs refactoring!
-        num = os.path.basename(self._ds.parameter_filename).split("."
+        num = os.path.basename(self.dataset.parameter_filename).split("."
                 )[0].split("_")[1]
         testdomain = 1 # Just pick the first domain file to read
         basename = "%s/%%s_%s.out%05i" % (
             os.path.abspath(
-              os.path.dirname(self._ds.parameter_filename)),
+              os.path.dirname(self.dataset.parameter_filename)),
             num, testdomain)
         hydro_fn = basename % "hydro"
         # Do we have a hydro file?
@@ -462,7 +461,8 @@
     gamma = 1.4 # This will get replaced on hydro_fn open
     
     def __init__(self, filename, dataset_type='ramses',
-                 fields = None, storage_filename = None):
+                 fields = None, storage_filename = None,
+                 units_override=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, types.StringTypes):
             fields = field_aliases[fields]
@@ -472,7 +472,7 @@
         '''
         self.fluid_types += ("ramses",)
         self._fields_in_file = fields
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.storage_filename = storage_filename
 
     def __repr__(self):
@@ -552,6 +552,7 @@
         self.omega_matter = rheader["omega_m"]
         self.hubble_constant = rheader["H0"] / 100.0 # This is H100
         self.max_level = rheader['levelmax'] - self.min_level
+        f.close()
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -49,3 +49,8 @@
 @requires_file(output_00080)
 def test_RAMSESDataset():
     assert isinstance(data_dir_load(output_00080), RAMSESDataset)
+
+ at requires_file(output_00080)
+def test_units_override():
+    for test in units_override_check(output_00080):
+        yield test

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -77,7 +77,8 @@
                  midx_filename = None,
                  midx_header = None,
                  midx_level = None,
-                 field_map = None):
+                 field_map = None,
+                 units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
@@ -102,7 +103,8 @@
         if filename.startswith("http"):
             prefix += 'http_'
         dataset_type = prefix + 'sdf_particles'
-        super(SDFDataset, self).__init__(filename, dataset_type)
+        super(SDFDataset, self).__init__(filename, dataset_type,
+                                         units_override=units_override)
 
     def _parse_parameter_file(self):
         if self.parameter_filename.startswith("http"):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,7 +96,8 @@
                  bounding_box = None,
                  header_spec = "default",
                  field_spec = "default",
-                 ptype_spec = "default"):
+                 ptype_spec = "default",
+                 units_override=None):
         if self._instantiated: return
         self._header_spec = self._setup_binary_spec(
             header_spec, gadget_header_specs)
@@ -120,6 +121,9 @@
             self.domain_right_edge = bbox[:,1]
         else:
             self.domain_left_edge = self.domain_right_edge = None
+        if units_override is not None:
+            raise RuntimeError("units_override is not supported for GadgetDataset. "+
+                               "Use unit_base instead.")
         super(GadgetDataset, self).__init__(filename, dataset_type)
 
     def _setup_binary_spec(self, spec, spec_dict):
@@ -269,9 +273,13 @@
     def __init__(self, filename, dataset_type="gadget_hdf5", 
                  unit_base = None, n_ref=64,
                  over_refine_factor=1,
-                 bounding_box = None):
+                 bounding_box = None,
+                 units_override=None):
         self.storage_filename = None
         filename = os.path.abspath(filename)
+        if units_override is not None:
+            raise RuntimeError("units_override is not supported for GadgetHDF5Dataset. "+
+                               "Use unit_base instead.")
         super(GadgetHDF5Dataset, self).__init__(
             filename, dataset_type, unit_base=unit_base, n_ref=n_ref,
             over_refine_factor=over_refine_factor,
@@ -505,7 +513,8 @@
                  unit_base=None,
                  parameter_file=None,
                  cosmology_parameters=None,
-                 n_ref=64, over_refine_factor=1):
+                 n_ref=64, over_refine_factor=1,
+                 units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if field_dtypes is None:
@@ -534,6 +543,9 @@
             parameter_file = os.path.abspath(parameter_file)
         self._param_file = parameter_file
         filename = os.path.abspath(filename)
+        if units_override is not None:
+            raise RuntimeError("units_override is not supported for TipsyDataset. "+
+                               "Use unit_base instead.")
         super(TipsyDataset, self).__init__(filename, dataset_type)
 
     def __repr__(self):

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -292,8 +292,8 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = 'stream'
 
-    def __init__(self, stream_handler, storage_filename = None,
-                 geometry = "cartesian"):
+    def __init__(self, stream_handler, storage_filename=None,
+                 geometry="cartesian"):
         #if parameter_override is None: parameter_override = {}
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,6 +26,7 @@
     assert_allclose, assert_raises
 from yt.units.yt_array import uconcatenate
 import yt.fields.api as field_api
+from yt.convenience import load
 
 def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True):
     # We have nan checks in here because occasionally we have fields that get
@@ -321,7 +322,30 @@
             return ftrue
         else:
             return ffalse
-                                        
+
+def units_override_check(fn):
+    ytcfg["yt","skip_dataset_cache"] = "True"
+    units_list = ["length","time","mass","velocity",
+                  "magnetic","temperature"]
+    ds1 = load(fn)
+    units_override = {}
+    attrs1 = []
+    attrs2 = []
+    for u in units_list:
+        unit_attr = getattr(ds1, "%s_unit" % u, None)
+        if unit_attr is not None:
+            attrs1.append(unit_attr)
+            units_override["%s_unit" % u] = (unit_attr.v, str(unit_attr.units))
+    del ds1
+    ds2 = load(fn, units_override=units_override)
+    ytcfg["yt","skip_dataset_cache"] = "False"
+    assert(len(ds2.units_override) > 0)
+    for u in units_list:
+        unit_attr = getattr(ds2, "%s_unit" % u, None)
+        if unit_attr is not None:
+            attrs2.append(unit_attr)
+    yield assert_equal, attrs1, attrs2
+
 # This is an export of the 40 grids in IsolatedGalaxy that are of level 4 or
 # lower.  It's just designed to give a sample AMR index to deal with.
 _amr_grid_index = [

diff -r 96b14a15b10218f5dc7889b3c20821f1985ee048 -r 840185d54851673d51b494f973231d9926220a0d yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -5,7 +5,6 @@
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
-from IPython import embed
 from yt.units.yt_array import uconcatenate
 
 def setup() :

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list