[yt-svn] commit/yt: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jan 31 10:42:01 PST 2017


6 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/a5cb2cb5c28b/
Changeset:   a5cb2cb5c28b
Branch:      yt
User:        krafczyk
Date:        2017-01-12 23:06:01+00:00
Summary:     Correct coordinate problem when 'center' is used in plots.

Add a mod function which 'wraps' coordinates so they stay between 0 and 1.
Without the mod, ((ccoord[0]-x0)/(x1-x0)) and others are not in the interval
0.,1. if 'center' is used.
Affected #:  1 file

diff -r ec92132823b016f9056f53d166e7262bca2275a7 -r a5cb2cb5c28b4b2aa59f78e73df9fd7f792b017b yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -138,6 +138,8 @@
         # Convert the data and plot limits to tiled numpy arrays so that
         # convert_to_plot is automatically vectorized.
 
+        unit = np.array(np.tile(1.,ncoord))
+       
         x0 = np.array(np.tile(plot.xlim[0],ncoord))
         x1 = np.array(np.tile(plot.xlim[1],ncoord))
         xx0 = np.tile(plot._axes.get_xlim()[0],ncoord)
@@ -152,11 +154,11 @@
 
         # We need a special case for when we are only given one coordinate.
         if ccoord.shape == (2,):
-            return ((ccoord[0]-x0)/(x1-x0)*(xx1-xx0) + xx0,
-                    (ccoord[1]-y0)/(y1-y0)*(yy1-yy0) + yy0)
+            return (np.mod((ccoord[0]-x0)/(x1-x0),unit)*(xx1-xx0) + xx0,
+                    np.mod((ccoord[1]-y0)/(y1-y0),unit)*(yy1-yy0) + yy0)
         else:
-            return ((ccoord[0][:]-x0)/(x1-x0)*(xx1-xx0) + xx0,
-                    (ccoord[1][:]-y0)/(y1-y0)*(yy1-yy0) + yy0)
+            return (np.mod((ccoord[0][:]-x0)/(x1-x0),unit)*(xx1-xx0) + xx0,
+                    np.mod((ccoord[1][:]-y0)/(y1-y0),unit)*(yy1-yy0) + yy0)
 
     def sanitize_coord_system(self, plot, coord, coord_system):
         """


https://bitbucket.org/yt_analysis/yt/commits/85b778b06f5e/
Changeset:   85b778b06f5e
Branch:      yt
User:        krafczyk
Date:        2017-01-13 00:37:55+00:00
Summary:     eliminate 'unit' since np.mod can broadcast.
Affected #:  1 file

diff -r a5cb2cb5c28b4b2aa59f78e73df9fd7f792b017b -r 85b778b06f5ea0b386d136407b567091ec355c22 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -138,8 +138,6 @@
         # Convert the data and plot limits to tiled numpy arrays so that
         # convert_to_plot is automatically vectorized.
 
-        unit = np.array(np.tile(1.,ncoord))
-       
         x0 = np.array(np.tile(plot.xlim[0],ncoord))
         x1 = np.array(np.tile(plot.xlim[1],ncoord))
         xx0 = np.tile(plot._axes.get_xlim()[0],ncoord)
@@ -154,11 +152,11 @@
 
         # We need a special case for when we are only given one coordinate.
         if ccoord.shape == (2,):
-            return (np.mod((ccoord[0]-x0)/(x1-x0),unit)*(xx1-xx0) + xx0,
-                    np.mod((ccoord[1]-y0)/(y1-y0),unit)*(yy1-yy0) + yy0)
+            return (np.mod((ccoord[0]-x0)/(x1-x0),1.0)*(xx1-xx0) + xx0,
+                    np.mod((ccoord[1]-y0)/(y1-y0),1.0)*(yy1-yy0) + yy0)
         else:
-            return (np.mod((ccoord[0][:]-x0)/(x1-x0),unit)*(xx1-xx0) + xx0,
-                    np.mod((ccoord[1][:]-y0)/(y1-y0),unit)*(yy1-yy0) + yy0)
+            return (np.mod((ccoord[0][:]-x0)/(x1-x0),1.0)*(xx1-xx0) + xx0,
+                    np.mod((ccoord[1][:]-y0)/(y1-y0),1.0)*(yy1-yy0) + yy0)
 
     def sanitize_coord_system(self, plot, coord, coord_system):
         """


https://bitbucket.org/yt_analysis/yt/commits/01c9466b19e4/
Changeset:   01c9466b19e4
Branch:      yt
User:        krafczyk
Date:        2017-01-17 18:51:56+00:00
Summary:     Merge
Affected #:  23 files

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -98,13 +98,13 @@
 
    ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
 
-.. _loading_athena_data:
+.. _loading-athena-data:
 
 Athena Data
 -----------
 
-Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported.
+Athena 4.x VTK data is supported and cared for by John ZuHone. Both uniform grid
+and SMR datasets are supported.
 
 .. note:
    yt also recognizes Fargo3D data written to VTK files as
@@ -138,10 +138,14 @@
 which will pick up all of the files in the different ``id*`` directories for
 the entire dataset.
 
-yt works in cgs ("Gaussian") units by default, but Athena data is not
-normally stored in these units. If you would like to convert data to
-cgs units, you may supply conversions for length, time, and mass to ``load`` using
-the ``units_override`` functionality:
+The default unit system in yt is cgs ("Gaussian") units, but Athena data is not
+normally stored in these units, so the code unit system is the default unit
+system for Athena data. This means that answers to field queries from data
+objects and plots of data will be expressed in code units. Note that the default
+conversions from these units will still be in terms of cgs units, e.g. 1
+``code_length`` equals 1 cm, and so on. If you would like to provided different
+conversions, you may supply conversions for length, time, and mass to ``load``
+using the ``units_override`` functionality:
 
 .. code-block:: python
 
@@ -153,14 +157,16 @@
 
    ds = yt.load("id0/cluster_merger.0250.vtk", units_override=units_override)
 
-This means that the yt fields, e.g. ``("gas","density")``, ``("gas","x-velocity")``,
-``("gas","magnetic_field_x")``, will be in cgs units, but the Athena fields, e.g.,
-``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
-in code units.
+This means that the yt fields, e.g. ``("gas","density")``,
+``("gas","velocity_x")``, ``("gas","magnetic_field_x")``, will be in cgs units
+(or whatever unit system was specified), but the Athena fields, e.g.,
+``("athena","density")``, ``("athena","velocity_x")``,
+``("athena","cell_centered_B_x")``, will be in code units.
 
-Some 3D Athena outputs may have large grids (especially parallel datasets subsequently joined with
-the `join_vtk` script), and may benefit from being subdivided into "virtual grids". For this purpose,
-one can pass in the `nprocs` parameter:
+Some 3D Athena outputs may have large grids (especially parallel datasets
+subsequently joined with the ``join_vtk`` script), and may benefit from being
+subdivided into "virtual grids". For this purpose, one can pass in the
+``nprocs`` parameter:
 
 .. code-block:: python
 
@@ -168,43 +174,100 @@
 
    ds = yt.load("sloshing.0000.vtk", nprocs=8)
 
-which will subdivide each original grid into `nprocs` grids.
+which will subdivide each original grid into ``nprocs`` grids.
 
 .. note::
 
     Virtual grids are only supported (and really only necessary) for 3D data.
 
-Alternative values for the following simulation parameters may be specified using a ``parameters``
-dict, accepting the following keys:
+Alternative values for the following simulation parameters may be specified
+using a ``parameters`` dict, accepting the following keys:
 
 * ``Gamma``: ratio of specific heats, Type: Float
-* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or ``"cylindrical"``
-* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values corresponding to each dimension
+* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or
+  ``"cylindrical"``
+* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values
+  corresponding to each dimension
 
 .. code-block:: python
 
    import yt
 
-   parameters = {"gamma":4./3., "geometry":"cylindrical", "periodicity":(False,False,False)}
+   parameters = {"gamma":4./3., "geometry":"cylindrical",
+                 "periodicity":(False,False,False)}
 
    ds = yt.load("relativistic_jet_0000.vtk", parameters=parameters)
 
 .. rubric:: Caveats
 
-* yt primarily works with primitive variables. If the Athena
-  dataset contains conservative variables, the yt primitive fields will be generated from the
+* yt primarily works with primitive variables. If the Athena dataset contains
+  conservative variables, the yt primitive fields will be generated from the
   conserved variables on disk.
-* Special relativistic datasets may be loaded, but are not fully supported. In particular, the relationships between
-  quantities such as pressure and thermal energy will be incorrect, as it is currently assumed that their relationship
-  is that of an ideal a :math:`\gamma`-law equation of state.
+* Special relativistic datasets may be loaded, but at this time not all of
+  their fields are fully supported. In particular, the relationships between
+  quantities such as pressure and thermal energy will be incorrect, as it is
+  currently assumed that their relationship is that of an ideal a
+  :math:`\gamma`-law equation of state. This will be rectified in a future
+  release.
 * Domains may be visualized assuming periodicity.
 * Particle list data is currently unsupported.
 
 .. note::
 
    The old behavior of supplying unit conversions using a ``parameters``
-   dict supplied to ``load`` for Athena datasets is still supported, but is being deprecated in
-   favor of ``units_override``, which provides the same functionality.
+   dict supplied to ``load`` for Athena datasets is still supported, but is
+   being deprecated in favor of ``units_override``, which provides the same
+   functionality.
+
+.. _loading-athena-pp-data:
+
+Athena++ Data
+-------------
+
+Athena++ HDF5 data is supported and cared for by John ZuHone. Uniform-grid, SMR,
+and AMR datasets in cartesian coordinates are fully supported. Support for
+curvilinear coordinates and logarithmic cell sizes exists, but is preliminary.
+For the latter type of dataset, the data will be loaded in as a semi-structured
+mesh dataset. See :ref:`loading-semi-structured-mesh-data` for more details on
+how this works in yt.
+
+The default unit system in yt is cgs ("Gaussian") units, but Athena++ data is
+not normally stored in these units, so the code unit system is the default unit
+system for Athena++ data. This means that answers to field queries from data
+objects and plots of data will be expressed in code units. Note that the default
+conversions from these units will still be in terms of cgs units, e.g. 1
+``code_length`` equals 1 cm, and so on. If you would like to provided different
+conversions, you may supply conversions for length, time, and mass to ``load``
+using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+
+   units_override = {"length_unit":(1.0,"Mpc"),
+                     "time_unit"(1.0,"Myr"),
+                     "mass_unit":(1.0e14,"Msun")}
+
+   ds = yt.load("AM06/AM06.out1.00400.athdf", units_override=units_override)
+
+This means that the yt fields, e.g. ``("gas","density")``,
+``("gas","velocity_x")``, ``("gas","magnetic_field_x")``, will be in cgs units
+(or whatever unit system was specified), but the Athena fields, e.g.,
+``("athena_pp","density")``, ``("athena_pp","vel1")``, ``("athena_pp","Bcc1")``,
+will be in code units.
+
+.. rubric:: Caveats
+
+* yt primarily works with primitive variables. If the Athena++ dataset contains
+  conservative variables, the yt primitive fields will be generated from the
+  conserved variables on disk.
+* Special relativistic datasets may be loaded, but at this time not all of their
+  fields are fully supported. In particular, the relationships between
+  quantities such as pressure and thermal energy will be incorrect, as it is
+  currently assumed that their relationship is that of an ideal
+  :math:`\gamma`-law equation of state. This will be rectified in a future
+  release.
+* Domains may be visualized assuming periodicity.
 
 .. _loading-orion-data:
 
@@ -1182,6 +1245,8 @@
 * Particles may be difficult to integrate.
 * Data must already reside in memory.
 
+.. _loading-semi-structured-mesh-data:
+
 Semi-Structured Grid Data
 -------------------------
 

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -304,8 +304,10 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. function:: annotate_halos(self, halo_catalog, circle_args=None, \
-                             width=None, annotate_field=None, text_args=None, \
-                             factor=1.0)
+                             width=None, annotate_field=None, \
+                             radius_field='virial_radius', \
+                             center_field_prefix="particle_position", \
+                             text_args=None, factor=1.0)
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
@@ -321,11 +323,22 @@
    with the annotate_field, which accepts a field contained in the halo catalog
    to add text to the plot near the halo (example: ``annotate_field=
    'particle_mass'`` will write the halo mass next to each halo, whereas
-   ``'particle_identifier'`` shows the halo number).  font_kwargs contains the
-   arguments controlling the text appearance of the annotated field.
-   Factor is the number the virial radius is multiplied by for plotting the
-   circles. Ex: ``factor=2.0`` will plot circles with twice the radius of each
-   halo virial radius.
+   ``'particle_identifier'`` shows the halo number). The size of the circles is
+   found from the field ``radius_field`` which is ``'virial_radius'`` by
+   default. If another radius has been found as part of your halo analysis
+   workflow, you can save that field and use it as the ``radius_field`` to
+   change the size of the halos. The position of each halo is determined using
+   ``center_field_prefix`` in the following way. If ``'particle_position'``
+   is the value of ``center_field_prefix`` as is the default, the x value of
+   the halo position is stored in the field ``'particle_position_x'``, y is
+   ``'particle_position_y'``, and z is ``'particle_position_z'``. If you have
+   stored another set of coordinates for each halo as part of your halo
+   analysis as fields such as ``'halo_position_x'``, you can use these fields
+   to determine halo position by passing ``'halo_position'`` to
+   ``center_field_prefix``. font_kwargs contains the arguments controlling the
+   text appearance of the annotated field. Factor is the number the virial
+   radius is multiplied by for plotting the circles. Ex: ``factor=2.0`` will
+   plot circles with twice the radius of each halo virial radius.
 
 .. python-script::
 

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -2,9 +2,12 @@
   local_artio_001:
     - yt/frontends/artio/tests/test_outputs.py
 
-  local_athena_002:
+  local_athena_003:
     - yt/frontends/athena
 
+  local_athena_pp_000:
+    - yt/frontends/athena_pp
+
   local_chombo_002:
     - yt/frontends/chombo/tests/test_outputs.py
 

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -36,15 +36,17 @@
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):
     unit_system = registry.ds.unit_system
 
-    if (ftype,"magnetic_field_x") not in registry:
+    axis_names = registry.ds.coordinates.axis_order
+
+    if (ftype,"magnetic_field_%s" % axis_names[0]) not in registry:
         return
 
-    u = registry[ftype,"magnetic_field_x"].units
+    u = registry[ftype,"magnetic_field_%s" % axis_names[0]].units
 
     def _magnetic_field_strength(field,data):
-        B2 = (data[ftype,"magnetic_field_x"]**2 +
-              data[ftype,"magnetic_field_y"]**2 +
-              data[ftype,"magnetic_field_z"]**2)
+        B2 = (data[ftype,"magnetic_field_%s" % axis_names[0]]**2 +
+              data[ftype,"magnetic_field_%s" % axis_names[1]]**2 +
+              data[ftype,"magnetic_field_%s" % axis_names[2]]**2)
         return np.sqrt(B2)
     registry.add_field((ftype,"magnetic_field_strength"), sampling_type="cell", 
                        function=_magnetic_field_strength,
@@ -69,37 +71,63 @@
              function=_magnetic_pressure,
              units=unit_system["pressure"])
 
-    def _magnetic_field_poloidal(field,data):
-        normal = data.get_field_parameter("normal")
-        d = data[ftype,'magnetic_field_x']
-        Bfields = data.ds.arr(
-                    [data[ftype,'magnetic_field_x'],
-                     data[ftype,'magnetic_field_y'],
-                     data[ftype,'magnetic_field_z']],
-                     d.units)
+    if registry.ds.geometry == "cartesian":
+        def _magnetic_field_poloidal(field,data):
+            normal = data.get_field_parameter("normal")
+            d = data[ftype,'magnetic_field_x']
+            Bfields = data.ds.arr(
+                        [data[ftype,'magnetic_field_x'],
+                         data[ftype,'magnetic_field_y'],
+                         data[ftype,'magnetic_field_z']],
+                         d.units)
 
-        theta = data["index", 'spherical_theta']
-        phi   = data["index", 'spherical_phi']
+            theta = data["index", 'spherical_theta']
+            phi   = data["index", 'spherical_phi']
 
-        return get_sph_theta_component(Bfields, theta, phi, normal)
+            return get_sph_theta_component(Bfields, theta, phi, normal)
+
+        def _magnetic_field_toroidal(field,data):
+            normal = data.get_field_parameter("normal")
+            d = data[ftype,'magnetic_field_x']
+            Bfields = data.ds.arr(
+                        [data[ftype,'magnetic_field_x'],
+                         data[ftype,'magnetic_field_y'],
+                         data[ftype,'magnetic_field_z']],
+                         d.units)
+
+            phi = data["index", 'spherical_phi']
+            return get_sph_phi_component(Bfields, phi, normal)
+
+    elif registry.ds.geometry == "cylindrical":
+        def _magnetic_field_poloidal(field, data):
+            r = data["index", "r"]
+            z = data["index", "z"]
+            d = np.sqrt(r*r+z*z)
+            return (data[ftype, "magnetic_field_r"]*(r/d) +
+                    data[ftype, "magnetic_field_z"]*(z/d))
+
+        def _magnetic_field_toroidal(field, data):
+            return data[ftype,"magnetic_field_theta"]
+
+    elif registry.ds.geometry == "spherical":
+        def _magnetic_field_poloidal(field, data):
+            return data[ftype,"magnetic_field_theta"]
+
+        def _magnetic_field_toroidal(field, data):
+            return data[ftype,"magnetic_field_phi"]
+
+    else:
+
+        # Unidentified geometry--set to None
+
+        _magnetic_field_toroidal = None
+        _magnetic_field_poloidal = None
 
     registry.add_field((ftype, "magnetic_field_poloidal"), sampling_type="cell", 
              function=_magnetic_field_poloidal,
              units=u, validators=[ValidateParameter("normal")])
 
-    def _magnetic_field_toroidal(field,data):
-        normal = data.get_field_parameter("normal")
-        d = data[ftype,'magnetic_field_x']
-        Bfields = data.ds.arr(
-                    [data[ftype,'magnetic_field_x'],
-                     data[ftype,'magnetic_field_y'],
-                     data[ftype,'magnetic_field_z']],
-                     d.units)
-        
-        phi = data["index", 'spherical_phi']
-        return get_sph_phi_component(Bfields, phi, normal)
-
-    registry.add_field((ftype, "magnetic_field_toroidal"), sampling_type="cell", 
+    registry.add_field((ftype, "magnetic_field_toroidal"), sampling_type="cell",
              function=_magnetic_field_toroidal,
              units=u, validators=[ValidateParameter("normal")])
 
@@ -160,7 +188,7 @@
         def _mag_field(field, data):
             return convert(data[fd])
         return _mag_field
-    for ax, fd in zip("xyz", ds_fields):
+    for ax, fd in zip(registry.ds.coordinates.axis_order, ds_fields):
         registry.add_field((ftype,"magnetic_field_%s" % ax), sampling_type="cell", 
                            function=mag_field(fd),
                            units=unit_system[to_units.dimensions])

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -20,6 +20,7 @@
     'art',
     'artio',
     'athena',
+    'athena_pp',
     'boxlib',
     'chombo',
     'eagle',

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -60,29 +60,17 @@
                 self.add_field(("gas","velocity_%s" % comp), sampling_type="cell",
                                function=velocity_field(comp), units = unit_system["velocity"])
         # Add pressure, energy, and temperature fields
-        def ekin1(data):
-            return 0.5*(data["athena","momentum_x"]**2 +
-                        data["athena","momentum_y"]**2 +
-                        data["athena","momentum_z"]**2)/data["athena","density"]
-        def ekin2(data):
-            return 0.5*(data["athena","velocity_x"]**2 +
-                        data["athena","velocity_y"]**2 +
-                        data["athena","velocity_z"]**2)*data["athena","density"]
-        def emag(data):
-            return 0.5*(data["cell_centered_B_x"]**2 +
-                        data["cell_centered_B_y"]**2 +
-                        data["cell_centered_B_z"]**2)
         def eint_from_etot(data):
             eint = data["athena","total_energy"]
-            eint -= ekin1(data)
+            eint -= data["gas","kinetic_energy"]
             if ("athena","cell_centered_B_x") in self.field_list:
-                eint -= emag(data)
+                eint -= data["gas","magnetic_energy"]
             return eint
         def etot_from_pres(data):
             etot = data["athena","pressure"]/(data.ds.gamma-1.)
-            etot += ekin2(data)
+            etot += data["gas", "kinetic_energy"]
             if ("athena","cell_centered_B_x") in self.field_list:
-                etot += emag(data)
+                etot += data["gas", "magnetic_energy"]
             return etot
         if ("athena","pressure") in self.field_list:
             self.add_output_field(("athena","pressure"), sampling_type="cell",
@@ -103,10 +91,6 @@
         elif ("athena","total_energy") in self.field_list:
             self.add_output_field(("athena","total_energy"), sampling_type="cell",
                                   units=pres_units)
-            def _pressure(field, data):
-                return eint_from_etot(data)*(data.ds.gamma-1.0)
-            self.add_field(("gas","pressure"), sampling_type="cell",  function=_pressure,
-                           units=unit_system["pressure"])
             def _thermal_energy(field, data):
                 return eint_from_etot(data)/data["athena","density"]
             self.add_field(("gas","thermal_energy"), sampling_type="cell",
@@ -117,7 +101,7 @@
             self.add_field(("gas","total_energy"), sampling_type="cell",
                            function=_total_energy,
                            units=unit_system["specific_energy"])
-
+        # Add temperature field
         def _temperature(field, data):
             if data.has_field_parameter("mu"):
                 mu = data.get_field_parameter("mu")

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/frontends/athena_pp/api.py
--- /dev/null
+++ b/yt/frontends/athena_pp/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.athena++
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from .data_structures import \
+      AthenaPPGrid, \
+      AthenaPPHierarchy, \
+      AthenaPPDataset
+
+from .fields import \
+      AthenaPPFieldInfo
+
+from .io import \
+      IOHandlerAthenaPP
+
+from . import tests

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/frontends/athena_pp/data_structures.py
--- /dev/null
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -0,0 +1,360 @@
+"""
+Data structures for Athena.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import os
+import weakref
+
+from yt.funcs import \
+    mylog, get_pbar, \
+    ensure_tuple
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
+from yt.data_objects.static_output import \
+    Dataset
+from yt.geometry.geometry_handler import \
+    YTDataChunk
+from yt.utilities.file_handler import \
+    HDF5FileHandler
+from yt.geometry.unstructured_mesh_handler import \
+    UnstructuredIndex
+from yt.data_objects.unstructured_mesh import \
+    SemiStructuredMesh
+from itertools import chain, product
+from .fields import AthenaPPFieldInfo
+
+geom_map = {"cartesian": "cartesian",
+            "cylindrical": "cylindrical",
+            "spherical_polar": "spherical",
+            "minkowski": "cartesian",
+            "tilted": "cartesian",
+            "sinusoidal": "cartesian",
+            "schwarzschild": "spherical",
+            "kerr-schild": "spherical"}
+
+_cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                   dtype=np.int64, count = 8*3)
+_cis.shape = (8, 3)
+
+class AthenaPPLogarithmicMesh(SemiStructuredMesh):
+    _index_offset = 0
+
+    def __init__(self, mesh_id, filename, connectivity_indices,
+                 connectivity_coords, index, blocks, dims):
+        super(AthenaPPLogarithmicMesh, self).__init__(mesh_id, filename, 
+                                                      connectivity_indices,
+                                                      connectivity_coords, index)
+        self.mesh_blocks = blocks
+        self.mesh_dims = dims
+
+class AthenaPPLogarithmicIndex(UnstructuredIndex):
+    def __init__(self, ds, dataset_type = 'athena_pp'):
+        self._handle = ds._handle
+        super(AthenaPPLogarithmicIndex, self).__init__(ds, dataset_type)
+        self.index_filename = self.dataset.filename
+        self.directory = os.path.dirname(self.dataset.filename)
+        self.dataset_type = dataset_type
+
+    def _initialize_mesh(self):
+        mylog.debug("Setting up meshes.")
+        num_blocks = self._handle.attrs["NumMeshBlocks"]
+        log_loc = self._handle['LogicalLocations']
+        levels = self._handle["Levels"]
+        x1f = self._handle["x1f"]
+        x2f = self._handle["x2f"]
+        x3f = self._handle["x3f"]
+        nbx, nby, nbz = tuple(np.max(log_loc, axis=0)+1)
+        nlevel = self._handle.attrs["MaxLevel"]+1
+
+        nb = np.array([nbx, nby, nbz], dtype='int')
+        self.mesh_factors = np.ones(3, dtype='int')*((nb > 1).astype("int")+1)
+
+        block_grid = -np.ones((nbx,nby,nbz,nlevel), dtype=np.int)
+        block_grid[log_loc[:,0],log_loc[:,1],log_loc[:,2],levels[:]] = np.arange(num_blocks)
+
+        block_list = np.arange(num_blocks, dtype='int64')
+        bc = []
+        for i in range(num_blocks):
+            if block_list[i] >= 0:
+                ii, jj, kk = log_loc[i]
+                neigh = block_grid[ii:ii+2,jj:jj+2,kk:kk+2,levels[i]]
+                if np.all(neigh > -1):
+                    loc_ids = neigh.transpose().flatten()
+                    bc.append(loc_ids)
+                    block_list[loc_ids] = -1
+                else:
+                    bc.append(np.array(i))
+                    block_list[i] = -1
+
+        num_meshes = len(bc)
+
+        self.meshes = []
+        pbar = get_pbar("Constructing meshes", num_meshes)
+        for i in range(num_meshes):
+            ob = bc[i][0]
+            x = x1f[ob,:]
+            y = x2f[ob,:]
+            z = x3f[ob,:]
+            if nbx > 1:
+                x = np.concatenate([x, x1f[bc[i][1],1:]])
+            if nby > 1:
+                y = np.concatenate([y, x2f[bc[i][2],1:]])
+            if nbz > 1:
+                z = np.concatenate([z, x3f[bc[i][4],1:]])
+            nxm = x.size
+            nym = y.size
+            nzm = z.size
+            coords = np.zeros((nxm, nym, nzm, 3), dtype="float64", order="C")
+            coords[:,:,:,0] = x[:,None,None]
+            coords[:,:,:,1] = y[None,:,None]
+            coords[:,:,:,2] = z[None,None,:]
+            coords.shape = (nxm * nym * nzm, 3)
+            cycle = np.rollaxis(np.indices((nxm-1,nym-1,nzm-1)), 0, 4)
+            cycle.shape = ((nxm-1)*(nym-1)*(nzm-1), 3)
+            off = _cis + cycle[:, np.newaxis]
+            connectivity = ((off[:,:,0] * nym) + off[:,:,1]) * nzm + off[:,:,2]
+            mesh = AthenaPPLogarithmicMesh(i, self.index_filename, connectivity,
+                                           coords, self, bc[i],
+                                           np.array([nxm-1, nym-1, nzm-1]))
+            self.meshes.append(mesh)
+            pbar.update(i)
+        pbar.finish()
+        mylog.debug("Done setting up meshes.")
+
+    def _detect_output_fields(self):
+        self.field_list = [("athena_pp", k) for k in self.ds._field_map]
+
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in gobjs:
+            yield YTDataChunk(dobj, "io", [subset],
+                              self._count_selection(dobj, [subset]),
+                              cache = cache)
+
+class AthenaPPGrid(AMRGridPatch):
+    _id_offset = 0
+
+    def __init__(self, id, index, level):
+        AMRGridPatch.__init__(self, id, filename = index.index_filename,
+                              index = index)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        LE, RE = self.index.grid_left_edge[id,:], \
+            self.index.grid_right_edge[id,:]
+        self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, "code_length")
+        if self.ds.dimensionality < 2: self.dds[1] = 1.0
+        if self.ds.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+    def __repr__(self):
+        return "AthenaPPGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class AthenaPPHierarchy(GridIndex):
+
+    grid = AthenaPPGrid
+    _dataset_type='athena_pp'
+    _data_file = None
+
+    def __init__(self, ds, dataset_type='athena_pp'):
+        self.dataset = weakref.proxy(ds)
+        self.directory = os.path.dirname(self.dataset.filename)
+        self.dataset_type = dataset_type
+        # for now, the index file is the dataset!
+        self.index_filename = self.dataset.filename
+        self._handle = ds._handle
+        GridIndex.__init__(self, ds, dataset_type)
+
+    def _detect_output_fields(self):
+        self.field_list = [("athena_pp", k) for k in self.dataset._field_map]
+
+    def _count_grids(self):
+        self.num_grids = self._handle.attrs["NumMeshBlocks"]
+
+    def _parse_index(self):
+        num_grids = self._handle.attrs["NumMeshBlocks"]
+
+        self.grid_left_edge = np.zeros((num_grids, 3), dtype='float64')
+        self.grid_right_edge = np.zeros((num_grids, 3), dtype='float64')
+        self.grid_dimensions = np.zeros((num_grids, 3), dtype='int32')
+
+        for i in range(num_grids):
+            x = self._handle["x1f"][i,:]
+            y = self._handle["x2f"][i,:]
+            z = self._handle["x3f"][i,:]
+            self.grid_left_edge[i] = np.array([x[0], y[0], z[0]], dtype='float64')
+            self.grid_right_edge[i] = np.array([x[-1], y[-1], z[-1]], dtype='float64')
+            self.grid_dimensions[i] = self._handle.attrs["MeshBlockSize"]
+        levels = self._handle["Levels"][:]
+
+        self.grid_left_edge = self.ds.arr(self.grid_left_edge, "code_length")
+        self.grid_right_edge = self.ds.arr(self.grid_right_edge, "code_length")
+
+        self.grids = np.empty(self.num_grids, dtype='object')
+        for i in range(num_grids):
+            self.grids[i] = self.grid(i, self, levels[i])
+
+        if self.dataset.dimensionality <= 2:
+            self.grid_right_edge[:,2] = self.dataset.domain_right_edge[2]
+        if self.dataset.dimensionality == 1:
+            self.grid_right_edge[:,1:] = self.dataset.domain_right_edge[1:]
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+        self.max_level = self._handle.attrs["MaxLevel"]
+
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in gobjs:
+            yield YTDataChunk(dobj, "io", [subset],
+                              self._count_selection(dobj, [subset]),
+                              cache = cache)
+
+class AthenaPPDataset(Dataset):
+    _field_info_class = AthenaPPFieldInfo
+    _dataset_type = "athena_pp"
+
+    def __init__(self, filename, dataset_type='athena_pp',
+                 storage_filename=None, parameters=None,
+                 units_override=None, unit_system="code"):
+        self.fluid_types += ("athena_pp",)
+        if parameters is None:
+            parameters = {}
+        self.specified_parameters = parameters
+        if units_override is None:
+            units_override = {}
+        self._handle = HDF5FileHandler(filename)
+        xrat = self._handle.attrs["RootGridX1"][2]
+        yrat = self._handle.attrs["RootGridX2"][2]
+        zrat = self._handle.attrs["RootGridX3"][2]
+        if xrat != 1.0 or yrat != 1.0 or zrat != 1.0:
+            self._index_class = AthenaPPLogarithmicIndex
+            self.logarithmic = True
+        else:
+            self._index_class = AthenaPPHierarchy
+            self.logarithmic = False
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
+        self.filename = filename
+        if storage_filename is None:
+            storage_filename = '%s.yt' % filename.split('/')[-1]
+        self.storage_filename = storage_filename
+        self.backup_filename = self.filename[:-4] + "_backup.gdf"
+
+    def _set_code_unit_attributes(self):
+        """
+        Generates the conversion to various physical _units based on the
+        parameter file
+        """
+        if "length_unit" not in self.units_override:
+            self.no_cgs_equiv_length = True
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
+                          ("temperature", "K")]:
+            # We set these to cgs for now, but they may have been overriden
+            if getattr(self, unit+'_unit', None) is not None:
+                continue
+            mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
+            setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
+
+    def set_code_units(self):
+        super(AthenaPPDataset, self).set_code_units()
+        mag_unit = getattr(self, "magnetic_unit", None)
+        if mag_unit is None:
+            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                         (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit.convert_to_units("gauss")
+        temp_unit = getattr(self, "temperature_unit", None)
+        if temp_unit is None:
+            self.temperature_unit = self.quan(1.0, "K")
+        self.velocity_unit = self.length_unit / self.time_unit
+
+    def _parse_parameter_file(self):
+
+        xmin, xmax = self._handle.attrs["RootGridX1"][:2]
+        ymin, ymax = self._handle.attrs["RootGridX2"][:2]
+        zmin, zmax = self._handle.attrs["RootGridX3"][:2]
+
+        self.domain_left_edge = np.array([xmin, ymin, zmin], dtype='float64')
+        self.domain_right_edge = np.array([xmax, ymax, zmax], dtype='float64')
+
+        self.geometry = geom_map[self._handle.attrs["Coordinates"].decode('utf-8')]
+        self.domain_width = self.domain_right_edge-self.domain_left_edge
+        self.domain_dimensions = self._handle.attrs["RootGridSize"]
+
+        self._field_map = {}
+        k = 0
+        for (i, dname), num_var in zip(enumerate(self._handle.attrs["DatasetNames"]),
+                                       self._handle.attrs["NumVariables"]):
+            for j in range(num_var):
+                fname = self._handle.attrs["VariableNames"][k].decode("ascii","ignore")
+                self._field_map[fname] = (dname.decode("ascii","ignore"), j)
+                k += 1
+
+        self.refine_by = 2
+        dimensionality = 3
+        if self.domain_dimensions[2] == 1:
+            dimensionality = 2
+        if self.domain_dimensions[1] == 1:
+            dimensionality = 1
+        self.dimensionality = dimensionality
+        self.current_time = self._handle.attrs["Time"]
+        self.unique_identifier = self.parameter_filename.__hash__()
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.field_ordering = 'fortran'
+        self.boundary_conditions = [1]*6
+        if 'periodicity' in self.specified_parameters:
+            self.periodicity = ensure_tuple(self.specified_parameters['periodicity'])
+        else:
+            self.periodicity = (True,True,True,)
+        if 'gamma' in self.specified_parameters:
+            self.gamma = float(self.specified_parameters['gamma'])
+        else:
+            self.gamma = 5./3.
+
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        if "gamma" in self.specified_parameters:
+            self.parameters["Gamma"] = self.specified_parameters["gamma"]
+        else:
+            self.parameters["Gamma"] = 5./3.
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if args[0].endswith('athdf'):
+                return True
+        except:
+            pass
+        return False
+
+    @property
+    def _skip_cache(self):
+        return True
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/frontends/athena_pp/definitions.py
--- /dev/null
+++ b/yt/frontends/athena_pp/definitions.py
@@ -0,0 +1,14 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/frontends/athena_pp/fields.py
--- /dev/null
+++ b/yt/frontends/athena_pp/fields.py
@@ -0,0 +1,91 @@
+"""
+Athena++-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+from yt.utilities.physical_constants import \
+    kboltz, mh
+
+b_units = "code_magnetic"
+pres_units = "code_mass/(code_length*code_time**2)"
+rho_units = "code_mass / code_length**3"
+vel_units = "code_length / code_time"
+
+def velocity_field(j):
+    def _velocity(field, data):
+        return data["athena_pp", "mom%d" % j]/data["athena_pp","dens"]
+    return _velocity
+
+class AthenaPPFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        ("rho", (rho_units, ["density"], None)),
+        ("dens", (rho_units, ["density"], None)),
+        ("Bcc1", (b_units, [], None)),
+        ("Bcc2", (b_units, [], None)),
+        ("Bcc3", (b_units, [], None)),
+    )
+
+    def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
+        unit_system = self.ds.unit_system
+        # Add velocity fields
+        vel_prefix = "velocity"
+        for i, comp in enumerate(self.ds.coordinates.axis_order):
+            vel_field = ("athena_pp", "vel%d" % (i+1))
+            mom_field = ("athena_pp", "mom%d" % (i+1))
+            if vel_field in self.field_list:
+                self.add_output_field(vel_field, sampling_type="cell", units="code_length/code_time")
+                self.alias(("gas","%s_%s" % (vel_prefix, comp)), vel_field,
+                           units=unit_system["velocity"])
+            elif mom_field in self.field_list:
+                self.add_output_field(mom_field, sampling_type="cell",
+                                      units="code_mass/code_time/code_length**2")
+                self.add_field(("gas","%s_%s" % (vel_prefix, comp)), sampling_type="cell",
+                               function=velocity_field(i+1), units=unit_system["velocity"])
+        # Figure out thermal energy field
+        if ("athena_pp","press") in self.field_list:
+            self.add_output_field(("athena_pp","press"), sampling_type="cell",
+                                  units=pres_units)
+            self.alias(("gas","pressure"),("athena_pp","press"),
+                       units=unit_system["pressure"])
+            def _thermal_energy(field, data):
+                return data["athena_pp","press"] / \
+                       (data.ds.gamma-1.)/data["athena_pp","rho"]
+            self.add_field(("gas","thermal_energy"), sampling_type="cell",
+                           function=_thermal_energy,
+                           units=unit_system["specific_energy"])
+        elif ("athena_pp","Etot") in self.field_list:
+            self.add_output_field(("athena_pp","Etot"), sampling_type="cell",
+                                  units=pres_units)
+            def _thermal_energy(field, data):
+                eint = data["athena_pp", "Etot"] - data["gas","kinetic_energy"]
+                if ("athena_pp", "B1") in self.field_list:
+                    eint -= data["gas","magnetic_energy"]
+                return eint/data["athena_pp","dens"]
+            self.add_field(("gas","thermal_energy"), sampling_type="cell",
+                           function=_thermal_energy,
+                           units=unit_system["specific_energy"])
+        # Add temperature field
+        def _temperature(field, data):
+            if data.has_field_parameter("mu"):
+                mu = data.get_field_parameter("mu")
+            else:
+                mu = 0.6
+            return (data["gas","pressure"]/data["gas","density"])*mu*mh/kboltz
+        self.add_field(("gas", "temperature"), sampling_type="cell", function=_temperature,
+                       units=unit_system["temperature"])
+
+        setup_magnetic_field_aliases(self, "athena_pp", ["Bcc%d" % ax for ax in (1,2,3)])

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/frontends/athena_pp/io.py
--- /dev/null
+++ b/yt/frontends/athena_pp/io.py
@@ -0,0 +1,100 @@
+"""
+Athena++-specific IO functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from itertools import groupby
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
+
+# http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
+def grid_sequences(grids):
+    g_iter = sorted(grids, key = lambda g: g.id)
+    for k, g in groupby(enumerate(g_iter), lambda i_x1:i_x1[0]-i_x1[1].id):
+        seq = list(v[1] for v in g)
+        yield seq
+
+ii = [0, 1, 0, 1, 0, 1, 0, 1]
+jj = [0, 0, 1, 1, 0, 0, 1, 1]
+kk = [0, 0, 0, 0, 1, 1, 1, 1]
+
+class IOHandlerAthenaPP(BaseIOHandler):
+    _particle_reader = False
+    _dataset_type = "athena_pp"
+
+    def __init__(self, ds):
+        super(IOHandlerAthenaPP, self).__init__(ds)
+        self._handle = ds._handle
+
+    def _read_particles(self, fields_to_read, type, args, grid_list,
+            count_list, conv_factors):
+        pass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "athena_pp" for ftype, fname in fields)):
+            raise NotImplementedError
+        f = self._handle
+        rv = {}
+        for field in fields:
+            # Always use *native* 64-bit float.
+            rv[field] = np.empty(size, dtype="=f8")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [f2 for f1, f2 in fields], ng)
+        for field in fields:
+            ftype, fname = field
+            dname, fdi = self.ds._field_map[fname]
+            ds = f["/%s" % dname]
+            ind = 0
+            for chunk in chunks:
+                if self.ds.logarithmic:
+                    for mesh in chunk.objs:
+                        nx, ny, nz = mesh.mesh_dims // self.ds.index.mesh_factors
+                        data = np.empty(mesh.mesh_dims, dtype="=f8")
+                        for n, id in enumerate(mesh.mesh_blocks):
+                            data[ii[n]*nx:(ii[n]+1)*nx,jj[n]*ny:(jj[n]+1)*ny,kk[n]*nz:(kk[n]+1)*nz] = \
+                                 ds[fdi,id,:,:,:].transpose()
+                        ind += mesh.select(selector, data, rv[field], ind)  # caches
+                else:
+                    for gs in grid_sequences(chunk.objs):
+                        start = gs[0].id - gs[0]._id_offset
+                        end = gs[-1].id - gs[-1]._id_offset + 1
+                        data = ds[fdi,start:end,:,:,:].transpose()
+                        for i, g in enumerate(gs):
+                            ind += g.select(selector, data[...,i], rv[field], ind)
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        if self.ds.logarithmic:
+            pass
+        f = self._handle
+        rv = {}
+        for g in chunk.objs:
+            rv[g.id] = {}
+        if len(fields) == 0:
+            return rv
+        for field in fields:
+            ftype, fname = field
+            dname, fdi = self.ds._field_map[fname]
+            ds = f["/%s" % dname]
+            for gs in grid_sequences(chunk.objs):
+                start = gs[0].id - gs[0]._id_offset
+                end = gs[-1].id - gs[-1]._id_offset + 1
+                data = ds[fdi,start:end,:,:,:].transpose()
+                for i, g in enumerate(gs):
+                    rv[g.id][field] = np.asarray(data[...,i], "=f8")
+        return rv
\ No newline at end of file

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/frontends/athena_pp/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/athena_pp/tests/test_outputs.py
@@ -0,0 +1,79 @@
+"""
+Athena++ frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check, \
+    assert_allclose
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load, \
+    GenericArrayTest
+from yt.frontends.athena_pp.api import AthenaPPDataset
+from yt.convenience import load
+
+_fields_disk = ("density", "velocity_r")
+
+disk = "KeplerianDisk/disk.out1.00000.athdf"
+ at requires_ds(disk)
+def test_disk():
+    ds = data_dir_load(disk)
+    assert_equal(str(ds), "disk.out1.00000")
+    dd = ds.all_data()
+    vol = (ds.domain_right_edge[0]**3-ds.domain_left_edge[0]**3)/3.0
+    vol *= np.cos(ds.domain_left_edge[1])-np.cos(ds.domain_right_edge[1])
+    vol *= ds.domain_right_edge[2].v-ds.domain_left_edge[2].v
+    assert_allclose(dd.quantities.total_quantity("cell_volume"), vol)
+    for field in _fields_disk:
+        def field_func(name):
+            return dd[field]
+        yield GenericArrayTest(ds, field_func, args=[field])
+
+_fields_AM06 = ("temperature", "density", "velocity_magnitude", "magnetic_field_x")
+
+AM06 = "AM06/AM06.out1.00400.athdf"
+ at requires_ds(AM06)
+def test_AM06():
+    ds = data_dir_load(AM06)
+    assert_equal(str(ds), "AM06.out1.00400")
+    for test in small_patch_amr(ds, _fields_AM06):
+        test_AM06.__name__ = test.description
+        yield test
+
+uo_AM06 = {
+    'length_unit': (1.0, 'kpc'),
+    'mass_unit': (1.0, 'Msun'),
+    'time_unit': (1.0, 'Myr'),
+}
+
+ at requires_file(AM06)
+def test_AM06_override():
+    # verify that overriding units causes derived unit values to be updated.
+    # see issue #1259
+    ds = load(AM06, units_override=uo_AM06)
+    assert_equal(float(ds.magnetic_unit.in_units('gauss')),
+                 9.01735778342523e-08)
+
+ at requires_file(AM06)
+def test_units_override():
+    for test in units_override_check(AM06):
+        yield test
+
+ at requires_file(AM06)
+def test_AthenaPPDataset():
+    assert isinstance(data_dir_load(AM06), AthenaPPDataset)

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -108,7 +108,7 @@
         ad = ds.all_data()
         dens = ad['density']
         magx = ad['magx']
-        magnetic_field_x = ad['magnetic_field_x']
+        magnetic_field_x = ad['magnetic_field_r']
 
         if us == 'cgs':
             assert str(dens.units) == 'g/cm**3'

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1414,6 +1414,15 @@
         halo catalog to add text to the plot near the halo.
         Example: annotate_field = 'particle_mass' will
         write the halo mass next to each halo.
+    radius_field: Accepts a field contained in the halo
+        catalog to set the radius of the circle which will
+        surround each halo.
+    center_field_prefix: Accepts a field prefix which will
+        be used to find the fields containing the coordinates
+        of the center of each halo. Ex: 'particle_position'
+        will result in the fields 'particle_position_x' for x
+        'particle_position_y' for y, and 'particle_position_z' 
+        for z.
     text_args: Contains the arguments controlling the text
         appearance of the annotated field.
     factor: A number the virial radius is multiplied by for
@@ -1427,14 +1436,17 @@
     _supported_geometries = ("cartesian", "spectral_cube")
 
     def __init__(self, halo_catalog, circle_args=None, circle_kwargs=None,
-                 width=None, annotate_field=None, text_args=None,
-                 font_kwargs=None, factor=1.0):
+                 width=None, annotate_field=None, radius_field='virial_radius',
+                 center_field_prefix="particle_position",
+                 text_args=None, font_kwargs=None, factor=1.0):
 
         PlotCallback.__init__(self)
         def_circle_args = {'edgecolor':'white', 'facecolor':'None'}
         def_text_args = {'color':'white'}
         self.halo_catalog = halo_catalog
         self.width = width
+        self.radius_field = radius_field
+        self.center_field_prefix = center_field_prefix
         self.annotate_field = annotate_field
         if circle_kwargs is not None:
             circle_args = circle_kwargs
@@ -1462,9 +1474,9 @@
         axis_names = plot.data.ds.coordinates.axis_name
         xax = plot.data.ds.coordinates.x_axis[data.axis]
         yax = plot.data.ds.coordinates.y_axis[data.axis]
-        field_x = "particle_position_%s" % axis_names[xax]
-        field_y = "particle_position_%s" % axis_names[yax]
-        field_z = "particle_position_%s" % axis_names[data.axis]
+        field_x = "%s_%s" % (self.center_field_prefix, axis_names[xax])
+        field_y = "%s_%s" % (self.center_field_prefix, axis_names[yax])
+        field_z = "%s_%s" % (self.center_field_prefix, axis_names[data.axis])
 
         # Set up scales for pixel size and original data
         pixel_scale = self.pixel_scale(plot)[0]
@@ -1478,7 +1490,7 @@
         px, py = self.convert_to_plot(plot,[px,py])
 
         # Convert halo radii to a radius in pixels
-        radius = halo_data['virial_radius'][:].in_units(units)
+        radius = halo_data[self.radius_field][:].in_units(units)
         radius = np.array(radius*pixel_scale*self.factor/data_scale)
 
         if self.width:

diff -r 85b778b06f5ea0b386d136407b567091ec355c22 -r 01c9466b19e469788dac9e775775449c4b969b56 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1283,9 +1283,9 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
-    data_source : YTSelectionContainer Object
-         Object to be used for data selection.  Defaults to a region covering
-         the entire simulation.
+    data_source: YTSelectionContainer object
+         Object to be used for data selection. Defaults to ds.all_data(), a 
+         region covering the full domain
 
     Examples
     --------
@@ -1314,6 +1314,10 @@
         if field_parameters is None:
             field_parameters = {}
 
+        if ds.geometry == "spherical" or ds.geometry == "cylindrical":
+            mylog.info("Setting origin='native' for %s geometry." % ds.geometry)
+            origin = 'native'
+
         if isinstance(ds, YTSpatialPlotDataset):
             slc = ds.all_data()
             slc.axis = axis
@@ -1460,6 +1464,9 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
+    data_source: YTSelectionContainer object
+         Object to be used for data selection. Defaults to ds.all_data(), a 
+         region covering the full domain
 
     Examples
     --------
@@ -1587,8 +1594,8 @@
          A dictionary of field parameters than can be accessed by derived
          fields.
     data_source : YTSelectionContainer Object
-         Object to be used for data selection.  Defaults to a region covering
-         the entire simulation.
+         Object to be used for data selection.  Defaults ds.all_data(), a 
+         region covering the full domain.
     """
 
     _plot_type = 'OffAxisSlice'
@@ -1626,13 +1633,17 @@
     def __init__(self, center, ds, normal_vector, width, fields,
                  interpolated, resolution = (800,800), weight=None,
                  volume=None, no_ghost=False, le=None, re=None,
-                 north_vector=None, method="integrate"):
+                 north_vector=None, method="integrate",
+                 data_source=None):
         self.center = center
         self.ds = ds
         self.axis = 4 # always true for oblique data objects
         self.normal_vector = normal_vector
         self.width = width
-        self.dd = ds.all_data()
+        if data_source is None:
+            self.dd = ds.all_data()
+        else:
+            self.dd = data_source
         fields = self.dd._determine_fields(fields)
         self.fields = fields
         self.interpolated = interpolated
@@ -1737,6 +1748,9 @@
          just a straight summation of the field along the given axis. WARNING:
          This should only be used for uniform resolution grid datasets, as other
          datasets may result in unphysical images.
+    data_source: YTSelectionContainer object
+         Object to be used for data selection. Defaults to ds.all_data(), a 
+         region covering the full domain
     """
     _plot_type = 'OffAxisProjection'
     _frb_generator = OffAxisProjectionFixedResolutionBuffer
@@ -1745,7 +1759,8 @@
                  depth=(1, '1'), axes_unit=None, weight_field=None,
                  max_level=None, north_vector=None, right_handed=True,
                  volume=None, no_ghost=False, le=None, re=None,
-                 interpolated=False, fontsize=18, method="integrate"):
+                 interpolated=False, fontsize=18, method="integrate",
+                 data_source=None):
         (bounds, center_rot) = \
           get_oblique_window_parameters(normal,center,width,ds,depth=depth)
         fields = ensure_list(fields)[:]
@@ -1755,7 +1770,8 @@
         OffAxisProj = OffAxisProjectionDummyDataSource(
             center_rot, ds, normal, oap_width, fields, interpolated,
             weight=weight_field,  volume=volume, no_ghost=no_ghost,
-            le=le, re=re, north_vector=north_vector, method=method)
+            le=le, re=re, north_vector=north_vector, method=method,
+            data_source=data_source)
 
         validate_mesh_fields(OffAxisProj, fields)
 


https://bitbucket.org/yt_analysis/yt/commits/2cb4a8cf44e7/
Changeset:   2cb4a8cf44e7
Branch:      yt
User:        ngoldbaum
Date:        2017-01-31 17:26:59+00:00
Summary:     use units of plot to scale halo positions in annotate_halos
Affected #:  1 file

diff -r 01c9466b19e469788dac9e775775449c4b969b56 -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -152,11 +152,11 @@
 
         # We need a special case for when we are only given one coordinate.
         if ccoord.shape == (2,):
-            return (np.mod((ccoord[0]-x0)/(x1-x0),1.0)*(xx1-xx0) + xx0,
-                    np.mod((ccoord[1]-y0)/(y1-y0),1.0)*(yy1-yy0) + yy0)
+            return (np.mod((ccoord[0]-x0)/(x1-x0), 1.0)*(xx1-xx0) + xx0,
+                    np.mod((ccoord[1]-y0)/(y1-y0), 1.0)*(yy1-yy0) + yy0)
         else:
-            return (np.mod((ccoord[0][:]-x0)/(x1-x0),1.0)*(xx1-xx0) + xx0,
-                    np.mod((ccoord[1][:]-y0)/(y1-y0),1.0)*(yy1-yy0) + yy0)
+            return (np.mod((ccoord[0][:]-x0)/(x1-x0), 1.0)*(xx1-xx0) + xx0,
+                    np.mod((ccoord[1][:]-y0)/(y1-y0), 1.0)*(yy1-yy0) + yy0)
 
     def sanitize_coord_system(self, plot, coord, coord_system):
         """
@@ -1480,22 +1480,21 @@
 
         # Set up scales for pixel size and original data
         pixel_scale = self.pixel_scale(plot)[0]
-        data_scale = data.ds.length_unit
-        units = data_scale.units
+        units = plot.xlim[0].units
 
         # Convert halo positions to code units of the plotted data
         # and then to units of the plotted window
-        px = halo_data[field_x][:].in_units(units) / data_scale
-        py = halo_data[field_y][:].in_units(units) / data_scale
+        px = halo_data[field_x][:].in_units(units)
+        py = halo_data[field_y][:].in_units(units)
+
         px, py = self.convert_to_plot(plot,[px,py])
 
         # Convert halo radii to a radius in pixels
         radius = halo_data[self.radius_field][:].in_units(units)
-        radius = np.array(radius*pixel_scale*self.factor/data_scale)
+        radius = np.array(radius*pixel_scale*self.factor)
 
         if self.width:
-            pz = halo_data[field_z][:].in_units(units)/data_scale
-            pz = data.ds.arr(pz, 'code_length')
+            pz = halo_data[field_z][:].in_units('code_length')
             c = data.center[data.axis]
 
             # I should catch an error here if width isn't in this form


https://bitbucket.org/yt_analysis/yt/commits/b312eec01230/
Changeset:   b312eec01230
Branch:      yt
User:        krafczyk
Date:        2017-01-31 17:43:15+00:00
Summary:     Merge
Affected #:  47 files

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 doc/source/analyzing/XrayEmissionFields.ipynb
--- /dev/null
+++ b/doc/source/analyzing/XrayEmissionFields.ipynb
@@ -0,0 +1,220 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "> Note: If you came here trying to figure out how to create simulated X-ray photons and observations,\n",
+    "  you should go [here](analysis_modules/photon_simulator.html) instead."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "This functionality provides the ability to create metallicity-dependent X-ray luminosity, emissivity, and photon emissivity fields for a given photon energy range.  This works by interpolating from emission tables created from the photoionization code [Cloudy](http://nublado.org/) or the collisional ionization database [AtomDB](http://www.atomdb.org). These can be downloaded from http://yt-project.org/data from the command line like so:\n",
+    "\n",
+    "`# Put the data in a directory you specify`  \n",
+    "`yt download cloudy_emissivity_v2.h5 /path/to/data`\n",
+    "\n",
+    "`# Put the data in the location set by \"supp_data_dir\"`  \n",
+    "`yt download apec_emissivity_v2.h5 supp_data_dir`\n",
+    "\n",
+    "The data path can be a directory on disk, or it can be \"supp_data_dir\", which will download the data to the directory specified by the `\"supp_data_dir\"` yt configuration entry. It is easiest to put these files in the directory from which you will be running yt or `\"supp_data_dir\"`, but see the note below about putting them in alternate locations."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Emission fields can be made for any energy interval between 0.1 keV and 100 keV, and will always be created for luminosity $(\\rm{erg~s^{-1}})$, emissivity $\\rm{(erg~s^{-1}~cm^{-3})}$, and photon emissivity $\\rm{(photons~s^{-1}~cm^{-3})}$.  The only required arguments are the\n",
+    "dataset object, and the minimum and maximum energies of the energy band. However, typically one needs to decide what will be used for the metallicity. This can either be a floating-point value representing a spatially constant metallicity, or a prescription for a metallicity field, e.g. `(\"gas\", \"metallicity\")`. For this first example, where the dataset has no metallicity field, we'll just assume $Z = 0.3~Z_\\odot$ everywhere:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "import yt\n",
+    "\n",
+    "ds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\")\n",
+    "\n",
+    "xray_fields = yt.add_xray_emissivity_field(ds, 0.5, 7.0, table_type='apec', metallicity=0.3)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "> Note: If you place the HDF5 emissivity tables in a location other than the current working directory or the location \n",
+    "  specified by the \"supp_data_dir\" configuration value, you will need to specify it in the call to \n",
+    "  `add_xray_emissivity_field`:  \n",
+    "  `xray_fields = yt.add_xray_emissivity_field(ds, 0.5, 7.0, data_dir=\"/path/to/data\", table_type='apec', metallicity=0.3)`"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Having made the fields, one can see which fields were made:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (xray_fields)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The luminosity field is useful for summing up in regions like this:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "sp = ds.sphere(\"c\", (2.0, \"Mpc\"))\n",
+    "print (sp.quantities.total_quantity(\"xray_luminosity_0.5_7.0_keV\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Whereas the emissivity fields may be useful in derived fields or for plotting:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "slc = yt.SlicePlot(ds, 'z', ['xray_emissivity_0.5_7.0_keV','xray_photon_emissivity_0.5_7.0_keV'],\n",
+    "                   width=(0.75, \"Mpc\"))\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The emissivity and the luminosity fields take the values one would see in the frame of the source. However, if one wishes to make projections of the X-ray emission from a cosmologically distant object, the energy band will be redshifted. For this case, one can supply a `redshift` parameter and a `Cosmology` object (either from the dataset or one made on your own) to compute X-ray intensity fields along with the emissivity and luminosity fields.\n",
+    "\n",
+    "This example shows how to do that, Where we also use a spatially dependent metallicity field and the Cloudy tables instead of the APEC tables we used previously:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "ds2 = yt.load(\"D9p_500/10MpcBox_HartGal_csf_a0.500.d\")\n",
+    "\n",
+    "# In this case, use the redshift and cosmology from the dataset, \n",
+    "# but in theory you could put in something different\n",
+    "xray_fields2 = yt.add_xray_emissivity_field(ds2, 0.5, 2.0, redshift=ds2.current_redshift, cosmology=ds2.cosmology,\n",
+    "                                            metallicity=(\"gas\", \"metallicity\"), table_type='cloudy')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now, one can see that two new fields have been added, corresponding to X-ray intensity / surface brightness when projected:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (xray_fields2)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note also that the energy range now corresponds to the *observer* frame, whereas in the source frame the energy range is between `emin*(1+redshift)` and `emax*(1+redshift)`. Let's zoom in on a galaxy and make a projection of the intensity fields:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "prj = yt.ProjectionPlot(ds2, \"x\", [\"xray_intensity_0.5_2.0_keV\", \"xray_photon_intensity_0.5_2.0_keV\"],\n",
+    "                        center=\"max\", width=(40, \"kpc\"))\n",
+    "prj.set_zlim(\"xray_intensity_0.5_2.0_keV\", 1.0e-32, 5.0e-24)\n",
+    "prj.set_zlim(\"xray_photon_intensity_0.5_2.0_keV\", 1.0e-24, 5.0e-16)\n",
+    "prj.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "> Warning: The X-ray fields depend on the number density of hydrogen atoms, given by the yt field\n",
+    "  `H_nuclei_density`. In the case of the APEC model, this assumes that all of the hydrogen in your\n",
+    "  dataset is ionized, whereas in the Cloudy model the ionization level is taken into account. If \n",
+    "  this field is not defined (either in the dataset or by the user), it will be constructed using\n",
+    "  abundance information from your dataset. Finally, if your dataset contains no abundance information,\n",
+    "  a primordial hydrogen mass fraction (X = 0.76) will be assumed."
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -3,6 +3,14 @@
 Constructing Mock X-ray Observations
 ------------------------------------
 
+.. warning:: 
+
+  The ``photon_simulator`` analysis module has been deprecated; it is
+  no longer being updated, and it will be removed in a future version
+  of yt. Users are encouraged to download and use the
+  `pyXSIM <http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim>`_ package 
+  instead. 
+
 .. note::
 
   If you just want to create derived fields for X-ray emission,

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ /dev/null
@@ -1,78 +0,0 @@
-.. _xray_emission_fields:
-
-X-ray Emission Fields
-=====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>, John ZuHone <jzuhone at gmail.com>
-
-.. note::
-
-  If you came here trying to figure out how to create simulated X-ray photons and observations,
-  you should go `here <photon_simulator.html>`_ instead.
-
-This functionality provides the ability to create metallicity-dependent
-X-ray luminosity, emissivity, and photon emissivity fields for a given
-photon energy range.  This works by interpolating from emission tables
-created from the photoionization code `Cloudy <http://nublado.org/>`_ or
-the collisional ionization database `AtomDB <http://www.atomdb.org>`_. If
-you installed yt with the install script, these data files should be located in
-the *data* directory inside the installation directory, or can be downloaded
-from `<http://yt-project.org/data>`_. Emission fields can be made for any
-interval between 0.1 keV and 100 keV.
-
-Adding Emission Fields
-----------------------
-
-Fields will be created for luminosity :math:`{\rm (erg~s^{-1})}`, emissivity :math:`{\rm (erg~s^{-1}~cm^{-3})}`,
-and photon emissivity :math:`{\rm (photons~s^{-1}~cm^{-3})}`.  The only required arguments are the
-dataset object, and the minimum and maximum energies of the energy band.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.spectral_integrator.api import \
-       add_xray_emissivity_field
-
-  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0)
-
-Additional keyword arguments are:
-
- * **filename** (*string*): Path to data file containing emissivity values. If None,
-   a file called "cloudy_emissivity.h5" is used, for photoionized plasmas. A second
-   option, for collisionally ionized plasmas, is in the file "apec_emissivity.h5",
-   available at http://yt-project.org/data. These files contain emissivity tables
-   for primordial elements and for metals at solar metallicity for the energy range
-   0.1 to 100 keV. Default: None.
-
- * **with_metals** (*bool*): If True, use the metallicity field to add the
-   contribution from metals.  If False, only the emission from H/He is
-   considered.  Default: True.
-
- * **constant_metallicity** (*float*): If specified, assume a constant
-   metallicity for the emission from metals.  The *with_metals* keyword
-   must be set to False to use this. It should be given in unit of solar metallicity.
-   Default: None.
-
-The resulting fields can be used like all normal fields. The function will return the names of
-the created fields in a Python list.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.spectral_integrator.api import \
-       add_xray_emissivity_field
-
-  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0, filename="apec_emissivity.h5")
-
-  ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-  plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')
-  plot.save()
-  plot = yt.ProjectionPlot(ds, 'x', 'xray_emissivity_0.5_7.0_keV')
-  plot.save()
-  plot = yt.ProjectionPlot(ds, 'x', 'xray_photon_emissivity_0.5_7.0_keV')
-  plot.save()
-
-.. warning::
-
-  The X-ray fields depend on the number density of hydrogen atoms, in the yt field
-  ``H_number_density``. If this field is not defined (either in the dataset or by the user),
-  the primordial hydrogen mass fraction (X = 0.76) will be used to construct it.

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 doc/source/analyzing/xray_emission_fields.rst
--- /dev/null
+++ b/doc/source/analyzing/xray_emission_fields.rst
@@ -0,0 +1,3 @@
+.. _xray_emission_fields:
+
+.. notebook:: XrayEmissionFields.ipynb

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -274,13 +274,13 @@
 BoxLib Data
 -----------
 
-yt has been tested with BoxLib data generated by Orion, Nyx, Maestro and
-Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
+yt has been tested with BoxLib data generated by Orion, Nyx, Maestro, Castro, 
+and WarpX. Currently it is cared for by a combination of Andrew Myers, Chris
 Malone, Matthew Turk, and Mike Zingale.
 
 To load a BoxLib dataset, you can use the ``yt.load`` command on
 the plotfile directory name.  In general, you must also have the
-``inputs`` file in the base directory, but Maestro and Castro will get
+``inputs`` file in the base directory, but Maestro, Castro, and WarpX will get
 all the necessary parameter information from the ``job_info`` file in
 the plotfile directory.  For instance, if you were in a
 directory with the following files:
@@ -308,7 +308,7 @@
    import yt
    ds = yt.load("pltgmlcs5600")
 
-For Maestro and Castro, you would not need the ``inputs`` file, and you
+For Maestro, Castro, and WarpX, you would not need the ``inputs`` file, and you
 would have a ``job_info`` file in the plotfile directory.
 
 .. rubric:: Caveats
@@ -316,7 +316,9 @@
 * yt does not read the Maestro base state (although you can have Maestro
   map it to a full Cartesian state variable before writing the plotfile
   to get around this).  E-mail the dev list if you need this support.
-* yt does not know about particles in Maestro.
+* yt supports BoxLib particle data stored in the standard format used 
+  by Nyx and WarpX. It currently does not support the ASCII particle
+  data used by Maestro and Castro.
 * For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
   depending on the value of the ``use_tfromp`` runtime parameter.
 * For Maestro, some velocity fields like ``velocity_magnitude`` or

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -621,8 +621,8 @@
    :toctree: generated/
 
    ~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum
-   ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.EmissivityIntegrator
-   ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.add_xray_emissivity_field
+   ~yt.fields.xray_emission_fields.XrayEmissivityIntegrator
+   ~yt.fields.xray_emission_fields.add_xray_emissivity_field
 
 Absorption spectra fitting:
 

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -46,7 +46,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | MOAB                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| Nyx                   |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
+| Nyx                   |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | openPMD               |     Y      |     Y     |      N     |   Y   |    Y     |    Y     |     N      | Partial  |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
@@ -62,6 +62,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Tipsy                 |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| WarpX                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 
 .. [#f1] one-dimensional base-state not read in currently.
 .. [#f2] These handle mesh fields using an in-memory octree that has not been parallelized.

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -441,7 +441,7 @@
    sl = yt.SlicePlot(ds, 2, ('connect2', 'diffused'))
    sl.save()
 
-Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
+Slices can also be used to examine 2D unstructured mesh datasets, but the
 slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
 an example using another MOOSE dataset that uses triangular mesh elements:
 
@@ -452,6 +452,17 @@
    sl = yt.SlicePlot(ds, 2, ('connect1', 'nodal_aux'))
    sl.save()
 
+You may run into situations where you have a variable you want to visualize that
+exists on multiple mesh blocks. To view the variable on ``all`` mesh blocks,
+simply pass ``all`` as the first argument of the field tuple:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("MultiRegion/two_region_example_out.e", step=-1)
+   sl = yt.SlicePlot(ds, 'z', ('all', 'diffused'))
+   sl.save()
+
 
 Plot Customization: Recentering, Resizing, Colormaps, and More
 --------------------------------------------------------------

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -53,17 +53,36 @@
   local_tipsy_002:
     - yt/frontends/tipsy/tests/test_outputs.py
 
-  local_varia_007:
+  local_varia_008:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/fields/tests/test_xray_fields.py
+
+  local_photon_001:
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
     - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+  local_unstructured_002:
     - yt/visualization/volume_rendering/tests/test_mesh_render.py
     - yt/visualization/tests/test_mesh_slices.py:test_tri2
+    - yt/visualization/tests/test_mesh_slices.py:test_multi_region
 
-  local_orion_001:
-    - yt/frontends/boxlib/tests/test_orion.py
+  local_boxlib_002:
+    - yt/frontends/boxlib/tests/test_outputs.py:test_radadvect
+    - yt/frontends/boxlib/tests/test_outputs.py:test_radtube
+    - yt/frontends/boxlib/tests/test_outputs.py:test_star
+    - yt/frontends/boxlib/tests/test_outputs.py:test_OrionDataset
+    - yt/frontends/boxlib/tests/test_outputs.py:test_units_override
+
+  local_boxlib_particles_001:
+    - yt/frontends/boxlib/tests/test_outputs.py:test_LyA
+    - yt/frontends/boxlib/tests/test_outputs.py:test_nyx_particle_io
+    - yt/frontends/boxlib/tests/test_outputs.py:test_langmuir
+    - yt/frontends/boxlib/tests/test_outputs.py:test_plasma
+    - yt/frontends/boxlib/tests/test_outputs.py:test_warpx_particle_io
+    - yt/frontends/boxlib/tests/test_outputs.py:test_NyxDataset
+    - yt/frontends/boxlib/tests/test_outputs.py:test_WarpXDataset
 
   local_ramses_001:
     - yt/frontends/ramses/tests/test_outputs.py

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -125,7 +125,8 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
-    derived_field
+    derived_field, \
+    add_xray_emissivity_field
 
 from yt.data_objects.api import \
     DatasetSeries, ImageArray, \

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/analysis_modules/spectral_integrator/api.py
--- a/yt/analysis_modules/spectral_integrator/api.py
+++ b/yt/analysis_modules/spectral_integrator/api.py
@@ -1,18 +1,8 @@
-"""
-API for spectral_integrator
-
-
-
-"""
+from yt.funcs import issue_deprecation_warning
 
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
+issue_deprecation_warning("The spectral_integrator module is deprecated. "
+                          "'add_xray_emissivity_field' can now be imported "
+                          "from the yt module.")
 
-from .spectral_frequency_integrator import \
-    EmissivityIntegrator, \
+from yt.fields.xray_emission_fields import \
     add_xray_emissivity_field
\ No newline at end of file

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ /dev/null
@@ -1,279 +0,0 @@
-"""
-Integrator classes to deal with interpolation and integration of input spectral
-bins.  Currently only supports Cloudy and APEC-style data.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-import os
-
-from yt.funcs import \
-     download_file, \
-     mylog, \
-     only_on_root
-
-from yt.utilities.exceptions import YTFieldNotFound
-from yt.utilities.exceptions import YTException
-from yt.utilities.linear_interpolators import \
-    UnilinearFieldInterpolator, BilinearFieldInterpolator
-from yt.utilities.physical_constants import \
-    hcgs, mp
-from yt.units.yt_array import YTArray, YTQuantity
-from yt.utilities.physical_ratios import \
-    primordial_H_mass_fraction, erg_per_keV
-
-xray_data_version = 1
-
-def _get_data_file(data_file=None):
-    if data_file is None:
-        data_file = "cloudy_emissivity.h5"
-    data_url = "http://yt-project.org/data"
-    if "YT_DEST" in os.environ and \
-      os.path.isdir(os.path.join(os.environ["YT_DEST"], "data")):
-        data_dir = os.path.join(os.environ["YT_DEST"], "data")
-    else:
-        data_dir = "."
-    data_path = os.path.join(data_dir, data_file)
-    if not os.path.exists(data_path):
-        mylog.info("Attempting to download supplementary data from %s to %s." % 
-                   (data_url, data_dir))
-        fn = download_file(os.path.join(data_url, data_file), data_path)
-        if fn != data_path:
-            raise RuntimeError("Failed to download supplementary data.")
-    return data_path
-
-class EnergyBoundsException(YTException):
-    def __init__(self, lower, upper):
-        self.lower = lower
-        self.upper = upper
-
-    def __str__(self):
-        return "Energy bounds are %e to %e keV." % \
-          (self.lower, self.upper)
-
-class ObsoleteDataException(YTException):
-    def __str__(self):
-        return "X-ray emissivity data is out of date.\n" + \
-               "Download the latest data from http://yt-project.org/data/cloudy_emissivity.h5 and move it to %s." % \
-          os.path.join(os.environ["YT_DEST"], "data", "cloudy_emissivity.h5")
-          
-class EmissivityIntegrator(object):
-    r"""Class for making X-ray emissivity fields with hdf5 data tables 
-    from Cloudy.
-    
-    Initialize an EmissivityIntegrator object.
-
-    Parameters
-    ----------
-    filename: string, default None
-        Path to data file containing emissivity values.  If None,
-        a file called "cloudy_emissivity.h5" is used, for photoionized
-        plasmas. A second option, for collisionally ionized plasmas, is
-        in the file "apec_emissivity.h5", available at http://yt-project.org/data.
-        These files contain emissivity tables for primordial elements and
-        for metals at solar metallicity for the energy range 0.1 to 100 keV.
-        Default: None.
-        
-    """
-    def __init__(self, filename=None):
-
-        default_filename = False
-        if filename is None:
-            filename = _get_data_file()
-            default_filename = True
-
-        if not os.path.exists(filename):
-            mylog.warning("File %s does not exist, will attempt to find it." % filename)
-            filename = _get_data_file(data_file=filename)
-        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
-        in_file = h5py.File(filename, "r")
-        if "info" in in_file.attrs:
-            only_on_root(mylog.info, in_file.attrs["info"])
-        if default_filename and \
-          in_file.attrs["version"] < xray_data_version:
-            raise ObsoleteDataException()
-        else:
-            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
-                         in_file.attrs["version"])
-
-        for field in ["emissivity_primordial", "emissivity_metals",
-                      "log_nH", "log_T", "log_E"]:
-            if field in in_file:
-                setattr(self, field, in_file[field][:])
-        in_file.close()
-
-        E_diff = np.diff(self.log_E)
-        self.E_bins = \
-                  YTArray(np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
-                                                      [self.log_E[-1] - 0.5 * E_diff[-1],
-                                                       self.log_E[-1] + 0.5 * E_diff[-1]]])),
-                          "keV")
-        self.dnu = (np.diff(self.E_bins)/hcgs).in_units("Hz")
-
-    def get_interpolator(self, data, e_min, e_max):
-        e_min = YTQuantity(e_min, "keV")
-        e_max = YTQuantity(e_max, "keV")
-        if (e_min - self.E_bins[0]) / e_min < -1e-3 or \
-          (e_max - self.E_bins[-1]) / e_max > 1e-3:
-            raise EnergyBoundsException(self.E_bins[0], self.E_bins[-1])
-        e_is, e_ie = np.digitize([e_min, e_max], self.E_bins)
-        e_is = np.clip(e_is - 1, 0, self.E_bins.size - 1)
-        e_ie = np.clip(e_ie, 0, self.E_bins.size - 1)
-
-        my_dnu = self.dnu[e_is: e_ie].copy()
-        # clip edge bins if the requested range is smaller
-        my_dnu[0] -= ((e_min - self.E_bins[e_is])/hcgs).in_units("Hz")
-        my_dnu[-1] -= ((self.E_bins[e_ie] - e_max)/hcgs).in_units("Hz")
-
-        interp_data = (data[..., e_is:e_ie] * my_dnu).sum(axis=-1)
-        if len(data.shape) == 2:
-            emiss = UnilinearFieldInterpolator(np.log10(interp_data),
-                                               [self.log_T[0],  self.log_T[-1]],
-                                               "log_T", truncate=True)
-        else:
-            emiss = BilinearFieldInterpolator(np.log10(interp_data),
-                                              [self.log_nH[0], self.log_nH[-1],
-                                               self.log_T[0],  self.log_T[-1]],
-                                              ["log_nH", "log_T"], truncate=True)
-
-        return emiss
-
-def add_xray_emissivity_field(ds, e_min, e_max,
-                              filename=None,
-                              with_metals=True,
-                              constant_metallicity=None):
-    r"""Create X-ray emissivity fields for a given energy range.
-
-    Parameters
-    ----------
-    e_min: float
-        the minimum energy in keV for the energy band.
-    e_min: float
-        the maximum energy in keV for the energy band.
-    filename: string, optional
-        Path to data file containing emissivity values.  If None,
-        a file called "cloudy_emissivity.h5" is used, for photoionized
-        plasmas. A second option, for collisionally ionized plasmas, is
-        in the file "apec_emissivity.h5", available at http://yt-project.org/data.
-        These files contain emissivity tables for primordial elements and
-        for metals at solar metallicity for the energy range 0.1 to 100 keV.
-        Default: None.
-    with_metals: bool, optional
-        If True, use the metallicity field to add the contribution from 
-        metals.  If False, only the emission from H/He is considered.
-        Default: True.
-    constant_metallicity: float, optional
-        If specified, assume a constant metallicity for the emission 
-        from metals.  The *with_metals* keyword must be set to False 
-        to use this. It should be given in unit of solar metallicity.
-        Default: None.
-
-    This will create three fields:
-
-    "xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3)
-    "xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1)
-    "xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3)
-
-    Examples
-    --------
-
-    >>> from yt.mods import *
-    >>> from yt.analysis_modules.spectral_integrator.api import *
-    >>> ds = load(dataset)
-    >>> add_xray_emissivity_field(ds, 0.5, 2)
-    >>> p = ProjectionPlot(ds, 'x', "xray_emissivity_0.5_2_keV")
-    >>> p.save()
-
-    """
-
-    if with_metals:
-        try:
-            ds._get_field_info("metal_density")
-        except YTFieldNotFound:
-            raise RuntimeError("Your dataset does not have a \"metal_density\" field! " +
-                               "Perhaps you should specify a constant metallicity?")
-
-    my_si = EmissivityIntegrator(filename=filename)
-
-    em_0 = my_si.get_interpolator(my_si.emissivity_primordial, e_min, e_max)
-    em_Z = None
-    if with_metals or constant_metallicity is not None:
-        em_Z = my_si.get_interpolator(my_si.emissivity_metals, e_min, e_max)
-
-    energy_erg = np.power(10, my_si.log_E) * erg_per_keV
-    emp_0 = my_si.get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
-                                   e_min, e_max)
-    emp_Z = None
-    if with_metals or constant_metallicity is not None:
-        emp_Z = my_si.get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
-                                       e_min, e_max)
-
-    try:
-        ds._get_field_info("H_number_density")
-    except YTFieldNotFound:
-        mylog.warning("Could not find a field for \"H_number_density\". Assuming primordial H " +
-                      "mass fraction.")
-        def _nh(field, data):
-            return primordial_H_mass_fraction*data["gas","density"]/mp
-        ds.add_field(("gas", "H_number_density"), function=_nh, units="cm**-3")
-
-    def _emissivity_field(field, data):
-        dd = {"log_nH" : np.log10(data["gas","H_number_density"]),
-              "log_T"   : np.log10(data["gas","temperature"])}
-
-        my_emissivity = np.power(10, em_0(dd))
-        if em_Z is not None:
-            if with_metals:
-                my_Z = data["gas","metallicity"]
-            elif constant_metallicity is not None:
-                my_Z = constant_metallicity
-            my_emissivity += my_Z * np.power(10, em_Z(dd))
-
-        return data["gas","H_number_density"]**2 * \
-            YTArray(my_emissivity, "erg*cm**3/s")
-
-    emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
-    ds.add_field(("gas", emiss_name), function=_emissivity_field,
-                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
-                 units="erg/cm**3/s")
-
-    def _luminosity_field(field, data):
-        return data[emiss_name] * data["cell_volume"]
-
-    lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
-    ds.add_field(("gas", lum_name), function=_luminosity_field,
-                 display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
-                 units="erg/s")
-
-    def _photon_emissivity_field(field, data):
-        dd = {"log_nH" : np.log10(data["gas","H_number_density"]),
-              "log_T"   : np.log10(data["gas","temperature"])}
-
-        my_emissivity = np.power(10, emp_0(dd))
-        if emp_Z is not None:
-            if with_metals:
-                my_Z = data["gas","metallicity"]
-            elif constant_metallicity is not None:
-                my_Z = constant_metallicity
-            my_emissivity += my_Z * np.power(10, emp_Z(dd))
-
-        return data["gas","H_number_density"]**2 * \
-            YTArray(my_emissivity, "photons*cm**3/s")
-
-    phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
-    ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
-                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
-                 units="photons/cm**3/s")
-
-    return emiss_name, lum_name, phot_name

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -666,13 +666,17 @@
                          for field in fields]
         domain_dims = self.ds.domain_dimensions.astype("int64") \
                     * self.ds.relative_refinement(0, self.level)
+        refine_by = self.ds.refine_by
+        if not iterable(self.ds.refine_by):
+            refine_by = [refine_by, refine_by, refine_by]
+        refine_by = np.array(refine_by, dtype="i8")
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
             # NOTE: This usage of "refine_by" is actually *okay*, because it's
             # being used with respect to iref, which is *already* scaled!
             fill_region(input_fields, output_fields, self.level,
                         self.global_startindex, chunk.icoords, chunk.ires,
-                        domain_dims, self.ds.refine_by)
+                        domain_dims, refine_by)
         for name, v in zip(fields, output_fields):
             fi = self.ds._get_field_info(*name)
             self[name] = self.ds.arr(v, fi.units)
@@ -940,6 +944,12 @@
         if len(fields) == 0: return
         ls = self._initialize_level_state(fields)
         min_level = self._compute_minimum_level()
+        # NOTE: This usage of "refine_by" is actually *okay*, because it's
+        # being used with respect to iref, which is *already* scaled!
+        refine_by = self.ds.refine_by
+        if not iterable(self.ds.refine_by):
+            refine_by = [refine_by, refine_by, refine_by]
+        refine_by = np.array(refine_by, dtype="i8")
         for level in range(self.level + 1):
             if level < min_level:
                 self._update_level_state(ls)
@@ -954,11 +964,9 @@
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
                 input_fields = [chunk[field] for field in fields]
-                # NOTE: This usage of "refine_by" is actually *okay*, because it's
-                # being used with respect to iref, which is *already* scaled!
                 tot -= fill_region(input_fields, ls.fields, ls.current_level,
                             ls.global_startindex, chunk.icoords,
-                            chunk.ires, domain_dims, self.ds.refine_by)
+                            chunk.ires, domain_dims, refine_by)
             if level == 0 and tot != 0:
                 raise RuntimeError
             self._update_level_state(ls)

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -190,7 +190,7 @@
                          dtype="float64")
         # We should not need the following if we know in advance all our fields
         # need no casting.
-        fields = [np.asarray(f, dtype="float64") for f in fields]
+        fields = [np.ascontiguousarray(f, dtype="float64") for f in fields]
         op.process_octree(self.oct_handler, self.domain_ind, pos, fields,
             self.domain_id, self._domain_offset)
         vals = op.finalize()

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/data_objects/particle_unions.py
--- a/yt/data_objects/particle_unions.py
+++ b/yt/data_objects/particle_unions.py
@@ -15,13 +15,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.funcs import ensure_list
+from .unions import Union
 
-class ParticleUnion(object):
+class ParticleUnion(Union):
     def __init__(self, name, sub_types):
-        self.name = name
-        self.sub_types = ensure_list(sub_types)
-
-    def __iter__(self):
-        for st in self.sub_types:
-            yield st
+        super(ParticleUnion, self).__init__(name, sub_types)

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -717,7 +717,7 @@
             cname = cls.__name__
             if cname.endswith("Base"): cname = cname[:-4]
             self._add_object_class(name, cls)
-        if self.refine_by != 2 and hasattr(self, 'proj') and \
+        if not np.all(self.refine_by == 2) and hasattr(self, 'proj') and \
             hasattr(self, 'overlap_proj'):
             mylog.warning("Refine by something other than two: reverting to"
                         + " overlap_proj")

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/data_objects/tests/test_refinement.py
--- /dev/null
+++ b/yt/data_objects/tests/test_refinement.py
@@ -0,0 +1,49 @@
+from yt.testing import \
+    assert_array_equal, \
+    assert_equal
+import yt
+import numpy as np
+
+def setup_fake_refby():
+    refine_by=np.array([5, 1, 1])
+    top_grid_dim = [100,  10, 2]
+    n1=100
+    n2=10
+    n3=2
+
+    grid_data = [
+        dict(left_edge = [0.0, 0.0, 0.0],
+             right_edge = [1.0, np.pi, np.pi*2.],
+             level = 0,
+             dimensions = np.array([n1, n2, n3])),
+        dict(left_edge = [0., 0., 0.],
+             right_edge = [0.5, np.pi, np.pi*2.],
+             level = 1,
+             dimensions = refine_by*[n1/2.0, n2, n3]),
+    ]
+
+    for g in grid_data:
+        g["density"] = (np.random.random(g["dimensions"].astype("i8")),
+                        "g/cm**3")
+    bbox = np.array([[0.0, 1.0], [0.0, np.pi], [0.0, np.pi*2]])
+
+    ds = yt.load_amr_grids(grid_data, top_grid_dim,
+                           bbox = bbox, geometry='spherical',
+                           refine_by=refine_by, length_unit='kpc')
+    return ds
+
+def test_refine_by():
+    ds = setup_fake_refby()
+    dd = ds.all_data()
+    # This checks that we always refine_by 1 in dimensions 2 and 3
+    dims = ds.domain_dimensions*ds.refine_by**ds.max_level
+    for i in range(1, 3):
+        # Check the refine_by == 1
+        ncoords = np.unique(dd.icoords[:,i]).size
+        assert_equal(ncoords, dims[i])
+    for g in ds.index.grids:
+        dims = ds.domain_dimensions*ds.refine_by**g.Level
+        # Now we can check converting back to the reference space
+        v = ((g.icoords + 1) / dims.astype("f8")).max(axis=0)
+        v *= ds.domain_width
+        assert_array_equal(v, g.RightEdge.d)

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/data_objects/unions.py
--- /dev/null
+++ b/yt/data_objects/unions.py
@@ -0,0 +1,31 @@
+"""
+Union structures which can be used to form unions of particles, meshes,
+etc. Union is the base class from which trivial named union classes
+can be derived
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import ensure_list
+
+class Union(object):
+    def __init__(self, name, sub_types):
+        self.name = name
+        self.sub_types = ensure_list(sub_types)
+
+    def __iter__(self):
+        for st in self.sub_types:
+            yield st
+
+class MeshUnion(Union):
+    def __init__(self, name, sub_types):
+        super(MeshUnion, self).__init__(name, sub_types)

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/fields/api.py
--- a/yt/fields/api.py
+++ b/yt/fields/api.py
@@ -43,3 +43,5 @@
     FieldDetector
 from .field_info_container import \
     FieldInfoContainer
+from .xray_emission_fields import \
+    add_xray_emissivity_field
\ No newline at end of file

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/fields/tests/test_xray_fields.py
--- /dev/null
+++ b/yt/fields/tests/test_xray_fields.py
@@ -0,0 +1,40 @@
+from yt.fields.xray_emission_fields import \
+    add_xray_emissivity_field
+from yt.utilities.answer_testing.framework import \
+    requires_ds, can_run_ds, data_dir_load, \
+    ProjectionValuesTest, FieldValuesTest
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def check_xray_fields(ds_fn, fields):
+    if not can_run_ds(ds_fn): return
+    dso = [ None, ("sphere", ("m", (0.1, 'unitary')))]
+    for field in fields:
+        for axis in [0, 1, 2]:
+            for dobj_name in dso:
+                yield ProjectionValuesTest(ds_fn, axis, field, 
+                                           None, dobj_name)
+                yield FieldValuesTest(ds_fn, field, dobj_name)
+
+sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+ at requires_ds(sloshing, big_data=True)
+def test_sloshing_apec():
+    ds = data_dir_load(sloshing)
+    fields = add_xray_emissivity_field(ds, 0.5, 7.0, table_type="apec", 
+                                       metallicity=0.3)
+    for test in check_xray_fields(ds, fields):
+        test_sloshing_apec.__name__ = test.description
+        yield test
+
+d9p = "D9p_500/10MpcBox_HartGal_csf_a0.500.d"
+ at requires_ds(d9p, big_data=True)
+def test_d9p_cloudy():
+    ds = data_dir_load(d9p)
+    fields = add_xray_emissivity_field(ds, 0.5, 2.0, redshift=ds.current_redshift,
+                                       table_type="cloudy", cosmology=ds.cosmology,
+                                       metallicity=("gas", "metallicity"))
+    for test in check_xray_fields(ds, fields):
+        test_d9p_cloudy.__name__ = test.description
+        yield test

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/fields/xray_emission_fields.py
--- /dev/null
+++ b/yt/fields/xray_emission_fields.py
@@ -0,0 +1,304 @@
+"""
+Integrator classes to deal with interpolation and integration of input spectral
+bins.  Currently only supports Cloudy and APEC-style data.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.on_demand_imports import _h5py as h5py
+import numpy as np
+import os
+
+from yt.config import ytcfg
+from yt.fields.derived_field import DerivedField
+from yt.funcs import mylog, only_on_root, issue_deprecation_warning
+from yt.utilities.exceptions import YTFieldNotFound
+from yt.utilities.exceptions import YTException
+from yt.utilities.linear_interpolators import \
+    UnilinearFieldInterpolator, BilinearFieldInterpolator
+from yt.units.yt_array import YTArray, YTQuantity
+from yt.utilities.cosmology import Cosmology
+
+data_version = {"cloudy": 2,
+                "apec": 2}
+
+data_url = "http://yt-project.org/data"
+
+def _get_data_file(table_type, data_dir=None):
+    data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type])
+    if data_dir is None:
+        supp_data_dir = ytcfg.get("yt", "supp_data_dir")
+        data_dir = supp_data_dir if os.path.exists(supp_data_dir) else "."
+    data_path = os.path.join(data_dir, data_file)
+    if not os.path.exists(data_path):
+        msg = "Failed to find emissivity data file %s! " % data_file + \
+            "Please download from http://yt-project.org/data!"
+        mylog.error(msg)
+        raise IOError(msg)
+    return data_path
+
+class EnergyBoundsException(YTException):
+    def __init__(self, lower, upper):
+        self.lower = lower
+        self.upper = upper
+
+    def __str__(self):
+        return "Energy bounds are %e to %e keV." % \
+          (self.lower, self.upper)
+
+class ObsoleteDataException(YTException):
+    def __init__(self, table_type):
+        data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type])
+        self.msg = "X-ray emissivity data is out of date.\n"
+        self.msg += "Download the latest data from %s/%s." % (data_url, data_file)
+
+    def __str__(self):
+        return self.msg
+
+class XrayEmissivityIntegrator(object):
+    r"""Class for making X-ray emissivity fields. Uses hdf5 data tables
+    generated from Cloudy and AtomDB/APEC.
+
+    Initialize an XrayEmissivityIntegrator object.
+
+    Parameters
+    ----------
+    table_type: string
+        The type of data to use when computing the emissivity values. If "cloudy",
+        a file called "cloudy_emissivity.h5" is used, for photoionized
+        plasmas. If, "apec", a file called "apec_emissivity.h5" is used for 
+        collisionally ionized plasmas. These files contain emissivity tables 
+        for primordial elements and for metals at solar metallicity for the 
+        energy range 0.1 to 100 keV.
+    redshift : float, optional
+        The cosmological redshift of the source of the field. Default: 0.0.
+    data_dir : string, optional
+        The location to look for the data table in. If not supplied, the file
+        will be looked for in the location of the YT_DEST environment variable
+        or in the current working directory.
+    use_metals : boolean, optional
+        If set to True, the emissivity will include contributions from metals.
+        Default: True
+    """
+    def __init__(self, table_type, redshift=0.0, data_dir=None, use_metals=True):
+
+        filename = _get_data_file(table_type, data_dir=data_dir)
+        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
+        in_file = h5py.File(filename, "r")
+        if "info" in in_file.attrs:
+            only_on_root(mylog.info, in_file.attrs["info"].decode('utf8'))
+        if in_file.attrs["version"] != data_version[table_type]:
+            raise ObsoleteDataException(table_type)
+        else:
+            only_on_root(mylog.info, "X-ray '%s' emissivity data version: %s." % \
+                         (table_type, in_file.attrs["version"]))
+
+        self.log_T = in_file["log_T"][:]
+        self.emissivity_primordial = in_file["emissivity_primordial"][:]
+        if "log_nH" in in_file:
+            self.log_nH = in_file["log_nH"][:]
+        if use_metals:
+            self.emissivity_metals = in_file["emissivity_metals"][:]
+        self.ebin = YTArray(in_file["E"], "keV")
+        in_file.close()
+        self.dE = np.diff(self.ebin)
+        self.emid = 0.5*(self.ebin[1:]+self.ebin[:-1]).to("erg")
+        self.redshift = redshift
+
+    def get_interpolator(self, data_type, e_min, e_max, energy=True):
+        data = getattr(self, "emissivity_%s" % data_type)
+        if not energy:
+            data = data[..., :] / self.emid.v
+        e_min = YTQuantity(e_min, "keV")*(1.0+self.redshift)
+        e_max = YTQuantity(e_max, "keV")*(1.0+self.redshift)
+        if (e_min - self.ebin[0]) / e_min < -1e-3 or \
+          (e_max - self.ebin[-1]) / e_max > 1e-3:
+            raise EnergyBoundsException(self.ebin[0], self.ebin[-1])
+        e_is, e_ie = np.digitize([e_min, e_max], self.ebin)
+        e_is = np.clip(e_is - 1, 0, self.ebin.size - 1)
+        e_ie = np.clip(e_ie, 0, self.ebin.size - 1)
+
+        my_dE = self.dE[e_is: e_ie].copy()
+        # clip edge bins if the requested range is smaller
+        my_dE[0] -= e_min - self.ebin[e_is]
+        my_dE[-1] -= self.ebin[e_ie] - e_max
+
+        interp_data = (data[..., e_is:e_ie]*my_dE).sum(axis=-1)
+        if data.ndim == 2:
+            emiss = UnilinearFieldInterpolator(np.log10(interp_data),
+                                               [self.log_T[0],  self.log_T[-1]],
+                                               "log_T", truncate=True)
+        else:
+            emiss = BilinearFieldInterpolator(np.log10(interp_data),
+                                              [self.log_nH[0], self.log_nH[-1],
+                                               self.log_T[0],  self.log_T[-1]],
+                                              ["log_nH", "log_T"], truncate=True)
+
+        return emiss
+
+def add_xray_emissivity_field(ds, e_min, e_max, redshift=0.0,
+                              metallicity=("gas", "metallicity"), 
+                              table_type="cloudy", data_dir=None,
+                              cosmology=None, **kwargs):
+    r"""Create X-ray emissivity fields for a given energy range.
+
+    Parameters
+    ----------
+    e_min : float
+        The minimum energy in keV for the energy band.
+    e_min : float
+        The maximum energy in keV for the energy band.
+    redshift : float, optional
+        The cosmological redshift of the source of the field. Default: 0.0.
+    metallicity : field or float, optional
+        Either the name of a metallicity field or a single floating-point
+        number specifying a spatially constant metallicity. Must be in
+        solar units. If set to None, no metals will be assumed. Default: 
+        ("gas", "metallicity")
+    table_type : string, optional
+        The type of emissivity table to be used when creating the fields. 
+        Options are "cloudy" or "apec". Default: "cloudy"
+    data_dir : string, optional
+        The location to look for the data table in. If not supplied, the file
+        will be looked for in the location of the YT_DEST environment variable
+        or in the current working directory.
+    cosmology : :class:`~yt.utilities.cosmology.Cosmology`, optional
+        If set and redshift > 0.0, this cosmology will be used when computing the
+        cosmological dependence of the emission fields. If not set, yt's default
+        LCDM cosmology will be used.
+
+    This will create three fields:
+
+    "xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3)
+    "xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1)
+    "xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3)
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> ds = yt.load("sloshing_nomag2_hdf5_plt_cnt_0100")
+    >>> yt.add_xray_emissivity_field(ds, 0.5, 2)
+    >>> p = yt.ProjectionPlot(ds, 'x', "xray_emissivity_0.5_2_keV")
+    >>> p.save()
+    """
+    # The next several if constructs are for backwards-compatibility
+    if "constant_metallicity" in kwargs:
+        issue_deprecation_warning("The \"constant_metallicity\" parameter is deprecated. Set "
+                                  "the \"metallicity\" parameter to a constant float value instead.")
+        metallicity = kwargs["constant_metallicity"]
+    if "with_metals" in kwargs:
+        issue_deprecation_warning("The \"with_metals\" parameter is deprecated. Use the "
+                                  "\"metallicity\" parameter to choose a constant or "
+                                  "spatially varying metallicity.")
+        if kwargs["with_metals"] and isinstance(metallicity, float):
+            raise RuntimeError("\"with_metals=True\", but you specified a constant metallicity!")
+        if not kwargs["with_metals"] and not isinstance(metallicity, float):
+            raise RuntimeError("\"with_metals=False\", but you didn't specify a constant metallicity!")
+    if not isinstance(metallicity, float) and metallicity is not None:
+        try:
+            metallicity = ds._get_field_info(*metallicity)
+        except YTFieldNotFound:
+            raise RuntimeError("Your dataset does not have a {} field! ".format(metallicity) +
+                               "Perhaps you should specify a constant metallicity instead?")
+
+    my_si = XrayEmissivityIntegrator(table_type, data_dir=data_dir, redshift=redshift)
+
+    em_0 = my_si.get_interpolator("primordial", e_min, e_max)
+    emp_0 = my_si.get_interpolator("primordial", e_min, e_max, energy=False)
+    if metallicity is not None:
+        em_Z = my_si.get_interpolator("metals", e_min, e_max)
+        emp_Z = my_si.get_interpolator("metals", e_min, e_max, energy=False)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH": np.log10(data["gas", "H_nuclei_density"]),
+              "log_T": np.log10(data["gas", "temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if metallicity is not None:
+            if isinstance(metallicity, DerivedField):
+                my_Z = data[metallicity.name]
+            else:
+                my_Z = metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["gas","H_nuclei_density"]**2 * \
+            YTArray(my_emissivity, "erg*cm**3/s")
+
+    emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
+    ds.add_field(("gas", emiss_name), function=_emissivity_field,
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
+                 sampling_type="cell", units="erg/cm**3/s")
+
+    def _luminosity_field(field, data):
+        return data[emiss_name] * data["cell_volume"]
+
+    lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
+    ds.add_field(("gas", lum_name), function=_luminosity_field,
+                 display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
+                 sampling_type="cell", units="erg/s")
+
+    def _photon_emissivity_field(field, data):
+        dd = {"log_nH": np.log10(data["gas", "H_nuclei_density"]),
+              "log_T": np.log10(data["gas", "temperature"])}
+
+        my_emissivity = np.power(10, emp_0(dd))
+        if metallicity is not None:
+            if isinstance(metallicity, DerivedField):
+                my_Z = data[metallicity.name]
+            else:
+                my_Z = metallicity
+            my_emissivity += my_Z * np.power(10, emp_Z(dd))
+
+        return data["gas", "H_nuclei_density"]**2 * \
+            YTArray(my_emissivity, "photons*cm**3/s")
+
+    phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
+    ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
+                 sampling_type="cell", units="photons/cm**3/s")
+
+    fields = [emiss_name, lum_name, phot_name]
+
+    if redshift > 0.0:
+
+        if cosmology is None:
+            if hasattr(ds, "cosmology"):
+                cosmology = ds.cosmology
+            else:
+                cosmology = Cosmology()
+
+        D_L = cosmology.luminosity_distance(0.0, redshift)
+        angular_scale = 1.0/cosmology.angular_scale(0.0, redshift)
+        dist_fac = 1.0/(4.0*np.pi*D_L*D_L*angular_scale*angular_scale)
+
+        ei_name = "xray_intensity_%s_%s_keV" % (e_min, e_max)
+        def _intensity_field(field, data):
+            I = dist_fac*data[emiss_name]
+            return I.in_units("erg/cm**3/s/arcsec**2")
+        ds.add_field(("gas", ei_name), function=_intensity_field,
+                     display_name=r"I_{X} (%s-%s keV)" % (e_min, e_max),
+                     sampling_type="cell", units="erg/cm**3/s/arcsec**2")
+
+        i_name = "xray_photon_intensity_%s_%s_keV" % (e_min, e_max)
+        def _photon_intensity_field(field, data):
+            I = (1.0+redshift)*dist_fac*data[phot_name]
+            return I.in_units("photons/cm**3/s/arcsec**2")
+        ds.add_field(("gas", i_name), function=_photon_intensity_field,
+                     display_name=r"I_{X} (%s-%s keV)" % (e_min, e_max),
+                     sampling_type="cell", units="photons/cm**3/s/arcsec**2")
+
+        fields += [ei_name, i_name]
+
+    [mylog.info("Adding %s field." % field) for field in fields]
+
+    return fields

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -20,12 +20,18 @@
       OrionHierarchy, \
       OrionDataset, \
       CastroDataset, \
-      MaestroDataset
+      MaestroDataset, \
+      NyxDataset, \
+      NyxHierarchy, \
+      WarpXDataset, \
+      WarpXHierarchy
 
 from .fields import \
       BoxlibFieldInfo, \
       MaestroFieldInfo, \
-      CastroFieldInfo
+      CastroFieldInfo, \
+      NyxFieldInfo, \
+      WarpXFieldInfo
 
 from .io import \
       IOHandlerBoxlib

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -16,19 +16,21 @@
 import inspect
 import os
 import re
+from collections import namedtuple
 
 from stat import ST_CTIME
 
 import numpy as np
+import glob
 
 from yt.funcs import \
     ensure_tuple, \
     mylog, \
     setdefaultattr
 from yt.data_objects.grid_patch import AMRGridPatch
-from yt.extern.six.moves import zip as izip
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
+from yt.units import YTQuantity
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
@@ -39,9 +41,10 @@
 
 from .fields import \
     BoxlibFieldInfo, \
+    NyxFieldInfo, \
     MaestroFieldInfo, \
-    CastroFieldInfo
-
+    CastroFieldInfo, \
+    WarpXFieldInfo
 
 # This is what we use to find scientific notation that might include d's
 # instead of e's.
@@ -74,6 +77,7 @@
         self._base_offset = offset
         self._parent_id = []
         self._children_ids = []
+        self._pdata = {}
 
     def _prepare_grid(self):
         super(BoxlibGrid, self)._prepare_grid()
@@ -142,6 +146,7 @@
         self.dataset_type = dataset_type
         self.header_filename = os.path.join(ds.output_dir, 'Header')
         self.directory = ds.output_dir
+        self.particle_headers = {}
 
         GridIndex.__init__(self, ds, dataset_type)
         self._cache_endianness(self.grids[-1])
@@ -360,6 +365,32 @@
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.dataset)
 
+    def _read_particles(self, directory_name, is_checkpoint, extra_field_names=None):
+
+        self.particle_headers[directory_name] = BoxLibParticleHeader(self.ds,
+                                                                     directory_name,
+                                                                     is_checkpoint,
+                                                                     extra_field_names)
+
+        base_particle_fn = self.ds.output_dir + '/' + directory_name + "/Level_%d/DATA_%.4d"
+
+        gid = 0
+        for lev, data in self.particle_headers[directory_name].data_map.items():
+            for pdf in data.values():
+                pdict = self.grids[gid]._pdata
+                pdict[directory_name] = {}
+                pdict[directory_name]["particle_filename"] = base_particle_fn % \
+                                                             (lev, pdf.file_number)
+                pdict[directory_name]["offset"] = pdf.offset
+                pdict[directory_name]["NumberOfParticles"] = pdf.num_particles
+                self.grid_particle_count[gid] += pdf.num_particles
+                self.grids[gid].NumberOfParticles += pdf.num_particles
+                gid += 1
+
+        # add particle fields to field_list
+        pfield_list = self.particle_headers[directory_name].known_fields
+        self.field_list.extend(pfield_list)
+
 
 class BoxlibDataset(Dataset):
     """
@@ -690,7 +721,7 @@
 
     def _read_particles(self):
         """
-        reads in particles and assigns them to grids. Will search for
+        Reads in particles and assigns them to grids. Will search for
         Star particles, then sink particles if no star particle file
         is found, and finally will simply note that no particles are
         found if neither works. To add a new Orion particle type,
@@ -778,6 +809,9 @@
         if os.path.exists(jobinfo_filename):
             return False
         # Now we check for all the others
+        warpx_jobinfo_filename = os.path.join(output_dir, "warpx_job_info")
+        if os.path.exists(warpx_jobinfo_filename):
+            return False
         lines = open(inputs_filename).readlines()
         if any(("castro." in line for line in lines)): return False
         if any(("nyx." in line for line in lines)): return False
@@ -910,47 +944,23 @@
 
     def __init__(self, ds, dataset_type='nyx_native'):
         super(NyxHierarchy, self).__init__(ds, dataset_type)
-        self._read_particle_header()
-
-    def _read_particle_header(self):
-        if not self.ds.parameters["particles"]:
-            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
-            return
-        for fn in ['particle_position_%s' % ax for ax in 'xyz'] + \
-                  ['particle_mass'] +  \
-                  ['particle_velocity_%s' % ax for ax in 'xyz']:
-            self.field_list.append(("io", fn))
-        header = open(os.path.join(self.ds.output_dir, "DM", "Header"))
-        version = header.readline()  # NOQA
-        ndim = header.readline()  # NOQA
-        nfields = header.readline()  # NOQA
-        ntotalpart = int(header.readline())  # NOQA
-        nextid = header.readline()  # NOQA
-        maxlevel = int(header.readline())  # NOQA
 
-        # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel + 1):
-            header.readline()
+        # extra beyond the base real fields that all Boxlib
+        # particles have, i.e. the xyz positions
+        nyx_extra_real_fields = ['particle_mass',
+                                 'particle_velocity_x',
+                                 'particle_velocity_y',
+                                 'particle_velocity_z']
 
-        grid_info = np.fromiter((int(i) for line in header.readlines()
-                                 for i in line.split()),
-                                dtype='int64',
-                                count=3*self.num_grids).reshape((self.num_grids, 3))
-        # we need grid_info in `populate_grid_objects`, so save it to self
+        is_checkpoint = False
 
-        for g, pg in izip(self.grids, grid_info):
-            g.particle_filename = os.path.join(self.ds.output_dir, "DM",
-                                               "Level_%s" % (g.Level),
-                                               "DATA_%04i" % pg[0])
-            g.NumberOfParticles = pg[1]
-            g._particle_offset = pg[2]
-
-        self.grid_particle_count[:, 0] = grid_info[:, 1]
+        self._read_particles("DM", is_checkpoint, nyx_extra_real_fields)
 
 
 class NyxDataset(BoxlibDataset):
 
     _index_class = NyxHierarchy
+    _field_info_class = NyxFieldInfo
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
@@ -1013,7 +1023,7 @@
         if os.path.isdir(os.path.join(self.output_dir, "DM")):
             # we have particles
             self.parameters["particles"] = 1 
-            self.particle_types = ("io",)
+            self.particle_types = ("DM",)
             self.particle_types_raw = self.particle_types
 
     def _set_code_unit_attributes(self):
@@ -1044,3 +1054,219 @@
     if len(vals) == 1:
         vals = vals[0]
     return vals
+
+
+class BoxLibParticleHeader(object):
+
+    def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None):
+
+        self.species_name = directory_name
+        header_filename =  ds.output_dir + "/" + directory_name + "/Header"
+        with open(header_filename, "r") as f:
+            self.version_string = f.readline().strip()
+
+            particle_real_type = self.version_string.split('_')[-1]
+            particle_real_type = self.version_string.split('_')[-1]
+            if particle_real_type == 'double':
+                self.real_type = np.float64
+            elif particle_real_type == 'single':
+                self.real_type = np.float32
+            else:
+                raise RuntimeError("yt did not recognize particle real type.")
+            self.int_type = np.int32
+
+            self.dim = int(f.readline().strip())
+            self.num_int_base = 2 + self.dim
+            self.num_real_base = self.dim
+            self.num_int_extra = 0  # this should be written by Boxlib, but isn't
+            self.num_real_extra = int(f.readline().strip())
+            self.num_int = self.num_int_base + self.num_int_extra
+            self.num_real = self.num_real_base + self.num_real_extra            
+            self.num_particles = int(f.readline().strip())
+            self.max_next_id = int(f.readline().strip())
+            self.finest_level = int(f.readline().strip())
+            self.num_levels = self.finest_level + 1
+
+            # Boxlib particles can be written in checkpoint or plotfile mode
+            # The base integer fields are only there for checkpoints, but some
+            # codes use the checkpoint format for plotting
+            if not is_checkpoint:
+                self.num_int_base = 0
+                self.num_int_extra = 0
+                self.num_int = 0
+
+            self.grids_per_level = np.zeros(self.num_levels, dtype='int64')
+            self.data_map = {}
+            for level_num in range(self.num_levels):
+                self.grids_per_level[level_num] = int(f.readline().strip())
+                self.data_map[level_num] = {}
+            
+            pfd = namedtuple("ParticleFileDescriptor",
+                             ["file_number", "num_particles", "offset"])
+
+            for level_num in range(self.num_levels):
+                for grid_num in range(self.grids_per_level[level_num]):
+                    entry = [int(val) for val in f.readline().strip().split()]
+                    self.data_map[level_num][grid_num] = pfd(*entry)
+
+        self._generate_particle_fields(extra_field_names)
+
+    def _generate_particle_fields(self, extra_field_names):
+
+        # these are the 'base' integer fields
+        self.known_int_fields = [(self.species_name, "particle_id"),
+                                 (self.species_name, "particle_cpu"),
+                                 (self.species_name, "particle_cell_x"),
+                                 (self.species_name, "particle_cell_y"),
+                                 (self.species_name, "particle_cell_z")]
+        self.known_int_fields = self.known_int_fields[0:self.num_int_base]
+
+        # these are extra integer fields
+        extra_int_fields = ["particle_int_comp%d" % i 
+                            for i in range(self.num_int_extra)]
+        self.known_int_fields.extend([(self.species_name, field) 
+                                      for field in extra_int_fields])
+
+        # these are the base real fields
+        self.known_real_fields = [(self.species_name, "particle_position_x"),
+                                  (self.species_name, "particle_position_y"),
+                                  (self.species_name, "particle_position_z")]
+        self.known_real_fields = self.known_real_fields[0:self.num_real_base]
+
+        # these are the extras
+        if extra_field_names is not None:
+            assert(len(extra_field_names) == self.num_real_extra)
+        else:
+            extra_field_names = ["particle_real_comp%d" % i 
+                                 for i in range(self.num_real_extra)]
+
+        self.known_real_fields.extend([(self.species_name, field) 
+                                       for field in extra_field_names])
+
+        self.known_fields = self.known_int_fields + self.known_real_fields
+
+        self.particle_int_dtype = np.dtype([(t[1], self.int_type) 
+                                            for t in self.known_int_fields])
+
+        self.particle_real_dtype = np.dtype([(t[1], self.real_type) 
+                                            for t in self.known_real_fields])
+
+
+class WarpXHierarchy(BoxlibHierarchy):
+
+    def __init__(self, ds, dataset_type="boxlib_native"):
+        super(WarpXHierarchy, self).__init__(ds, dataset_type)
+
+        # extra beyond the base real fields that all Boxlib
+        # particles have, i.e. the xyz positions
+        warpx_extra_real_fields = ['particle_weight',
+                                   'particle_velocity_x',
+                                   'particle_velocity_y',
+                                   'particle_velocity_z']
+
+        is_checkpoint = False
+
+        for ptype in self.ds.particle_types:
+            self._read_particles(ptype, is_checkpoint, warpx_extra_real_fields)
+        
+        # Additional WarpX particle information (used to set up species)
+        with open(self.ds.output_dir + "/WarpXHeader", 'r') as f:
+
+            # skip to the end, where species info is written out
+            line = f.readline()
+            while line and line != ')\n':
+                line = f.readline()
+            line = f.readline()
+
+            # Read in the species information
+            species_id = 0
+            while line:
+                line = line.strip().split()
+                charge = YTQuantity(float(line[0]), "C")
+                mass = YTQuantity(float(line[1]), "kg")
+                charge_name = 'particle%.1d_charge' % species_id
+                mass_name = 'particle%.1d_mass' % species_id
+                self.parameters[charge_name] = charge
+                self.parameters[mass_name] = mass
+                line = f.readline()
+                species_id += 1
+    
+def _skip_line(line):
+    if len(line) == 0:
+        return True
+    if line[0] == '\n':
+        return True
+    if line[0] == "=":
+        return True
+    if line[0] == ' ':
+        return True
+
+
+class WarpXDataset(BoxlibDataset):
+
+    _index_class = WarpXHierarchy
+    _field_info_class = WarpXFieldInfo
+
+    def __init__(self, output_dir,
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
+                 dataset_type='boxlib_native',
+                 storage_filename=None,
+                 units_override=None,
+                 unit_system="mks"):
+
+        super(WarpXDataset, self).__init__(output_dir,
+                                           cparam_filename,
+                                           fparam_filename,
+                                           dataset_type,
+                                           storage_filename,
+                                           units_override,
+                                           unit_system)
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "warpx_job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        if os.path.exists(jobinfo_filename):
+            return True
+        return False
+
+    def _parse_parameter_file(self):
+        super(WarpXDataset, self)._parse_parameter_file()
+        jobinfo_filename = os.path.join(self.output_dir, "warpx_job_info")
+        with open(jobinfo_filename, "r") as f:
+            for line in f.readlines():
+                if _skip_line(line):
+                    continue
+                l = line.strip().split(":")
+                if len(l) == 2:
+                    self.parameters[l[0].strip()] = l[1].strip()
+                l = line.strip().split("=")
+                if len(l) == 2:
+                    self.parameters[l[0].strip()] = l[1].strip()
+                
+        # set the periodicity based on the integer BC runtime parameters
+        is_periodic = self.parameters['geometry.is_periodic'].split()
+        periodicity = [bool(val) for val in is_periodic]
+        self.periodicity = ensure_tuple(periodicity)
+
+        species_list = []
+        species_dirs = glob.glob(self.output_dir + "/particle*")
+        for species in species_dirs:
+            species_list.append(species[len(self.output_dir)+1:])
+        self.particle_types = tuple(species_list)
+        self.particle_types_raw = self.particle_types
+
+
+    def _set_code_unit_attributes(self):
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "m"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "kg"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "m/s"))

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -51,6 +51,66 @@
     return tr
 
 
+class WarpXFieldInfo(FieldInfoContainer):
+
+    known_other_fields = (
+        ("Bx", ("T", ["magnetic_field_x"], None)),
+        ("By", ("T", ["magnetic_field_y"], None)),
+        ("Bz", ("T", ["magnetic_field_z"], None)),
+        ("Ex", ("V/m", ["electric_field_x"], None)),
+        ("Ey", ("V/m", ["electric_field_y"], None)),
+        ("Ex", ("V/m", ["electric_field_z"], None)),
+        ("jx", ("A", ["current_x"], None)),
+        ("jy", ("A", ["current_y"], None)),
+        ("jz", ("A", ["current_z"], None)),
+    )
+
+    known_particle_fields = (
+        ("particle_weight", ("", [], None)),
+        ("particle_position_x", ("m", [], None)),
+        ("particle_position_y", ("m", [], None)),
+        ("particle_position_z", ("m", [], None)),
+        ("particle_velocity_x", ("m/s", [], None)),
+        ("particle_velocity_y", ("m/s", [], None)),
+        ("particle_velocity_z", ("m/s", [], None)),
+    )
+
+    extra_union_fields = (
+        ("kg", "particle_mass"),
+        ("C", "particle_charge"),
+        ("", "particle_ones"),
+    )
+
+    def setup_particle_fields(self, ptype):
+
+        def get_mass(field, data):
+            species_mass = data.ds.index.parameters[ptype + '_mass']
+            return data["particle_weight"]*species_mass
+
+        self.add_field((ptype, "particle_mass"), sampling_type="particle",
+                       function=get_mass,
+                       units="kg")
+
+        def get_charge(field, data):
+            species_charge = data.ds.index.parameters[ptype + '_charge']
+            return data["particle_weight"]*species_charge
+
+        self.add_field((ptype, "particle_charge"), sampling_type="particle",
+                       function=get_charge,
+                       units="C")
+
+        super(WarpXFieldInfo, self).setup_particle_fields(ptype)
+
+
+class NyxFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+    known_particle_fields = (
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+    )
+
+
 class BoxlibFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", (rho_units, ["density"], None)),

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -1,5 +1,5 @@
 """
-Orion data-file handling functions
+Boxlib data-file handling functions
 
 
 
@@ -81,6 +81,85 @@
                         local_offset += size
         return data
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+        unions = self.ds.particle_unions
+
+        rv = {f: np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    if ftype in unions:
+                        for subtype in unions[ftype]:
+                            data = self._read_particles(grid, selector,
+                                                        subtype, fname)
+                            rv[ftype, fname] = np.concatenate((data,
+                                                               rv[ftype, fname]))
+                    else:
+                        data = self._read_particles(grid, selector,
+                                                    ftype, fname)
+                        rv[ftype, fname] = np.concatenate((data,
+                                                           rv[ftype, fname]))
+        return rv
+
+    def _read_particles(self, grid, selector, ftype, name):
+
+        npart = grid._pdata[ftype]["NumberOfParticles"]
+        if npart == 0:
+            return np.array([])
+
+        fn = grid._pdata[ftype]["particle_filename"]
+        offset = grid._pdata[ftype]["offset"]
+        pheader = self.ds.index.particle_headers[ftype]
+        
+        # handle the case that this is an integer field
+        int_fnames = [fname for _, fname in pheader.known_int_fields]
+        if name in int_fnames:
+            ind = int_fnames.index(name)
+            fn = grid._pdata[ftype]["particle_filename"]
+            with open(fn, "rb") as f:
+
+                # read in the position fields for selection
+                f.seek(offset + 
+                       pheader.particle_int_dtype.itemsize * npart)
+                rdata = np.fromfile(f, pheader.real_type, pheader.num_real * npart)
+                x = np.asarray(rdata[0::pheader.num_real], dtype=np.float64)
+                y = np.asarray(rdata[1::pheader.num_real], dtype=np.float64)
+                z = np.asarray(rdata[2::pheader.num_real], dtype=np.float64)
+                mask = selector.select_points(x, y, z, 0.0)
+
+                if mask is None:
+                    return np.array([])
+                
+                # read in the data we want
+                f.seek(offset)
+                idata = np.fromfile(f, pheader.int_type, pheader.num_int * npart)
+                data = np.asarray(idata[ind::pheader.num_int], dtype=np.float64)
+                return data[mask].flatten()
+
+        # handle case that this is a real field
+        real_fnames = [fname for _, fname in pheader.known_real_fields]
+        if name in real_fnames:
+            ind = real_fnames.index(name)
+            with open(fn, "rb") as f:
+
+                # read in the position fields for selection
+                f.seek(offset + 
+                       pheader.particle_int_dtype.itemsize * npart)
+                rdata = np.fromfile(f, pheader.real_type, pheader.num_real * npart)
+                x = np.asarray(rdata[0::pheader.num_real], dtype=np.float64)
+                y = np.asarray(rdata[1::pheader.num_real], dtype=np.float64)
+                z = np.asarray(rdata[2::pheader.num_real], dtype=np.float64)
+                mask = selector.select_points(x, y, z, 0.0)
+
+                if mask is None:
+                    return np.array([])
+
+                data = np.asarray(rdata[ind::pheader.num_real], dtype=np.float64)
+                return data[mask].flatten()
+
+
 class IOHandlerOrion(IOHandlerBoxlib):
     _dataset_type = "orion_native"
 

diff -r 2cb4a8cf44e7c5034d36d9ba18604288d0206603 -r b312eec01230619020e34e16ae56848ac3883a45 yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-Orion frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import \
-    assert_equal, \
-    requires_file, \
-    units_override_check
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    small_patch_amr, \
-    data_dir_load
-from yt.frontends.boxlib.api import OrionDataset
-
-# We don't do anything needing ghost zone generation right now, because these
-# are non-periodic datasets.
-_fields = ("temperature", "density", "velocity_magnitude")
-
-radadvect = "RadAdvect/plt00000"
- at requires_ds(radadvect)
-def test_radadvect():
-    ds = data_dir_load(radadvect)
-    yield assert_equal, str(ds), "plt00000"
-    for test in small_patch_amr(ds, _fields):
-        test_radadvect.__name__ = test.description
-        yield test
-
-rt = "RadTube/plt00500"
- at requires_ds(rt)
-def test_radtube():
-    ds = data_dir_load(rt)
-    yield assert_equal, str(ds), "plt00500"
-    for test in small_patch_amr(ds, _fields):
-        test_radtube.__name__ = test.description
-        yield test
-
-star = "StarParticles/plrd01000"
- at requires_ds(star)
-def test_star():
-    ds = data_dir_load(star)
-    yield assert_equal, str(ds), "plrd01000"
-    for test in small_patch_amr(ds, _fields):
-        test_star.__name__ = test.description
-        yield test
-
- at requires_file(rt)
-def test_OrionDataset():
-    assert isinstance(data_dir_load(rt), OrionDataset)
-
- at requires_file(rt)
-def test_units_override():
-    for test in units_override_check(rt):
-        yield test
-

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/12ab7e374fcb/
Changeset:   12ab7e374fcb
Branch:      yt
User:        ngoldbaum
Date:        2017-01-31 18:41:53+00:00
Summary:     Merged in krafczyk/yt (pull request #2492)

Correct coordinate problem when 'center' is used in plots.
Affected #:  1 file

diff -r dc439e4d8dbb93b261ebc8c064572523311d56ce -r 12ab7e374fcbdd45e56622c9bae43cd9dd334b2c yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -152,11 +152,11 @@
 
         # We need a special case for when we are only given one coordinate.
         if ccoord.shape == (2,):
-            return ((ccoord[0]-x0)/(x1-x0)*(xx1-xx0) + xx0,
-                    (ccoord[1]-y0)/(y1-y0)*(yy1-yy0) + yy0)
+            return (np.mod((ccoord[0]-x0)/(x1-x0), 1.0)*(xx1-xx0) + xx0,
+                    np.mod((ccoord[1]-y0)/(y1-y0), 1.0)*(yy1-yy0) + yy0)
         else:
-            return ((ccoord[0][:]-x0)/(x1-x0)*(xx1-xx0) + xx0,
-                    (ccoord[1][:]-y0)/(y1-y0)*(yy1-yy0) + yy0)
+            return (np.mod((ccoord[0][:]-x0)/(x1-x0), 1.0)*(xx1-xx0) + xx0,
+                    np.mod((ccoord[1][:]-y0)/(y1-y0), 1.0)*(yy1-yy0) + yy0)
 
     def sanitize_coord_system(self, plot, coord, coord_system):
         """
@@ -1480,22 +1480,21 @@
 
         # Set up scales for pixel size and original data
         pixel_scale = self.pixel_scale(plot)[0]
-        data_scale = data.ds.length_unit
-        units = data_scale.units
+        units = plot.xlim[0].units
 
         # Convert halo positions to code units of the plotted data
         # and then to units of the plotted window
-        px = halo_data[field_x][:].in_units(units) / data_scale
-        py = halo_data[field_y][:].in_units(units) / data_scale
+        px = halo_data[field_x][:].in_units(units)
+        py = halo_data[field_y][:].in_units(units)
+
         px, py = self.convert_to_plot(plot,[px,py])
 
         # Convert halo radii to a radius in pixels
         radius = halo_data[self.radius_field][:].in_units(units)
-        radius = np.array(radius*pixel_scale*self.factor/data_scale)
+        radius = np.array(radius*pixel_scale*self.factor)
 
         if self.width:
-            pz = halo_data[field_z][:].in_units(units)/data_scale
-            pz = data.ds.arr(pz, 'code_length')
+            pz = halo_data[field_z][:].in_units('code_length')
             c = data.center[data.axis]
 
             # I should catch an error here if width isn't in this form

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list