[yt-svn] commit/yt: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Dec 2 12:14:19 PST 2014


5 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/c3744c41bb8b/
Changeset:   c3744c41bb8b
Branch:      yt
User:        hegan
Date:        2014-11-20 21:23:39+00:00
Summary:     First pass at correcting periodic rvec calculation
Affected #:  1 file

diff -r 99dc43cd40108965e21f1668c0316a08511c92e0 -r c3744c41bb8b8ef066d830b03c71d28c5f3f5e98 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -49,14 +49,26 @@
 
 def get_periodic_rvec(data):
     coords = obtain_rvec(data)
+   # return coords
     if sum(data.ds.periodicity) == 0: return coords
     le = data.ds.domain_left_edge.in_units("code_length").d
     dw = data.ds.domain_width.in_units("code_length").d
     for i in range(coords.shape[0]):
         if not data.ds.periodicity[i]: continue
         coords[i, ...] -= le[i]
-        coords[i, ...] = np.min([np.abs(np.mod(coords[i, ...],  dw[i])),
-                                 np.abs(np.mod(coords[i, ...], -dw[i]))],
-                                 axis=0)
-        coords[i, ...] += le[i]
+        #figure out which measure is less
+        mins = np.argmin([np.abs(np.mod(coords[i, ...],  dw[i])),
+                         np.abs(np.mod(coords[i, ...], -dw[i]))],
+                         axis=0)
+        temp_coords = np.mod(coords[i, ...], dw[i])
+
+        #Where second measure is better, updating temporary coords
+        ii = np.where(mins==1)
+        temp_coords[ii] = np.mod(coords[i, ...], -dw[i])[ii]
+
+        # Putting the temporary coords into the actual storage
+        coords[i, ...] = temp_coords
+
+        coords[i, ...] + le[i]
+
     return coords


https://bitbucket.org/yt_analysis/yt/commits/46be6ac076e5/
Changeset:   46be6ac076e5
Branch:      yt
User:        hegan
Date:        2014-11-20 22:21:12+00:00
Summary:     removed commented line
Affected #:  1 file

diff -r c3744c41bb8b8ef066d830b03c71d28c5f3f5e98 -r 46be6ac076e5a26e34a2003ede3594938c5eb85a yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -49,7 +49,6 @@
 
 def get_periodic_rvec(data):
     coords = obtain_rvec(data)
-   # return coords
     if sum(data.ds.periodicity) == 0: return coords
     le = data.ds.domain_left_edge.in_units("code_length").d
     dw = data.ds.domain_width.in_units("code_length").d


https://bitbucket.org/yt_analysis/yt/commits/d698872ff2e7/
Changeset:   d698872ff2e7
Branch:      yt
User:        hegan
Date:        2014-11-21 17:40:04+00:00
Summary:     removed numpy where
Affected #:  1 file

diff -r 46be6ac076e5a26e34a2003ede3594938c5eb85a -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -62,7 +62,7 @@
         temp_coords = np.mod(coords[i, ...], dw[i])
 
         #Where second measure is better, updating temporary coords
-        ii = np.where(mins==1)
+        ii = mins==1
         temp_coords[ii] = np.mod(coords[i, ...], -dw[i])[ii]
 
         # Putting the temporary coords into the actual storage


https://bitbucket.org/yt_analysis/yt/commits/3750b2bf4a68/
Changeset:   3750b2bf4a68
Branch:      yt
User:        hegan
Date:        2014-11-23 19:19:22+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  33 files

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -9,6 +9,7 @@
 yt/frontends/artio/_artio_caller.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
+yt/analysis_modules/ppv_cube/ppv_utils.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -16,12 +16,14 @@
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 Markus Haider (markus.haider at uibk.ac.at)
+                Eric Hallman (hallman13 at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
                 Christian Karch (chiffre at posteo.de)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
                 Kacper Kowalik (xarthisius.kk at gmail.com)
+                Mark Krumholz (mkrumhol at ucsc.edu)
                 Michael Kuhlen (mqk at astro.berkeley.edu)
                 Eve Lee (elee at cita.utoronto.ca)
                 Sam Leitner (sam.leitner at gmail.com)
@@ -38,6 +40,7 @@
                 Brian O'Shea (bwoshea at gmail.com)
                 Jean-Claude Passy (jcpassy at uvic.ca)
                 John Regan (john.regan at helsinki.fi)
+                Sherwood Richers (srichers at tapir.caltech.edu)
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
@@ -48,10 +51,14 @@
                 Devin Silvia (devin.silvia at gmail.com)
                 Sam Skillman (samskillman at gmail.com)
                 Stephen Skory (s at skory.us)
+                Aaron Smith (asmith at astro.as.utexas.edu)
                 Britton Smith (brittonsmith at gmail.com)
                 Geoffrey So (gsiisg at gmail.com)
                 Casey Stark (caseywstark at gmail.com)
+                Antoine Strugarek (strugarek at astro.umontreal.ca)
+                Ji Suoqing (jisuoqing at gmail.com)
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                Benjamin Thompson (bthompson2090 at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 doc/source/analyzing/analysis_modules/_images/dsquared.png
Binary file doc/source/analyzing/analysis_modules/_images/dsquared.png has changed

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -46,17 +46,23 @@
 .. code:: python
 
     import yt
+    #yt.enable_parallelism() # If you want to run in parallel this should go here!
     from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
+.. note::
+
+    For parallel runs using ``mpi4py``, the call to ``yt.enable_parallelism`` should go *before*
+    the import of the ``photon_simulator`` module, as shown above.
+
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
     ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk",
-                 parameters={"time_unit":(1.0,"Myr"),
-                            "length_unit":(1.0,"Mpc"),
-                            "mass_unit":(1.0e14,"Msun")}) 
+                 units_override={"time_unit":(1.0,"Myr"),
+                                 "length_unit":(1.0,"Mpc"),
+                                 "mass_unit":(1.0e14,"Msun")})
 
 First, to get a sense of what the resulting image will look like, let's
 make a new yt field called ``"density_squared"``, since the X-ray
@@ -67,7 +73,7 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    add_field("density_squared", function=_density_squared)
+    ds.add_field("density_squared", function=_density_squared, units="g**2/cm**6")
 
 Then we'll project this field along the z-axis.
 
@@ -141,7 +147,8 @@
 
 .. code:: python
 
-    thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3)
+    thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
+                                       photons_per_chunk=100000000)
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -150,6 +157,14 @@
 assume that is the name of the metallicity field (which may be spatially
 varying).
 
+The ``ThermalPhotonModel`` iterates over "chunks" of the supplied data source
+to generate the photons, to reduce memory usage and make parallelization more
+efficient. For each chunk, memory is set aside for the photon energies that will
+be generated. ``photons_per_chunk`` is an optional keyword argument which controls
+the size of this array. For large numbers of photons, you may find that
+this parameter needs to be set higher, or if you are looking to decrease memory
+usage, you might set this parameter lower.
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more
@@ -294,11 +309,11 @@
             187.49897546,  187.47307048]) degree, 
      'ysky': YTArray([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
             12.33327653,  12.32955225]) degree, 
-     'ypix': YTArray([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
-            129.17278711,  120.11508562]) (dimensionless), 
+     'ypix': array([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
+            129.17278711,  120.11508562]), 
      'PI': array([ 27,  15,  25, ..., 609, 611, 672]), 
-     'xpix': YTArray([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
-            130.93509652,  192.50639633]) (dimensionless)}
+     'xpix': array([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
+            130.93509652,  192.50639633])}
 
 
 We can bin up the events into an image and save it to a FITS file. The
@@ -368,14 +383,23 @@
    files in subtle ways that we haven't been able to identify. Please email
    jzuhone at gmail.com if you find any bugs!
 
-Two ``EventList`` instances can be joined togther like this:
+Two ``EventList`` instances can be added together, which is useful if they were
+created using different data sources:
 
 .. code:: python
 
-    events3 = EventList.join_events(events1, events2)
+    events3 = events1+events2
 
-**WARNING**: This doesn't check for parameter consistency between the
-two lists!
+.. warning:: This only works if the two event lists were generated using
+    the same parameters!
+
+Finally, a new ``EventList`` can be created from a subset of an existing ``EventList``,
+defined by a ds9 region (this functionality requires the
+`pyregion <http://pyregion.readthedocs.org>`_ package to be installed):
+
+.. code:: python
+
+    circle_events = events.filter_events("circle.reg")
 
 Creating a X-ray observation from an in-memory dataset
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -434,11 +458,11 @@
 .. code:: python
 
    data = {}
-   data["density"] = dens
-   data["temperature"] = temp
-   data["velocity_x"] = np.zeros(ddims)
-   data["velocity_y"] = np.zeros(ddims)
-   data["velocity_z"] = np.zeros(ddims)
+   data["density"] = (dens, "g/cm**3")
+   data["temperature"] = (temp, "K")
+   data["velocity_x"] = (np.zeros(ddims), "cm/s")
+   data["velocity_y"] = (np.zeros(ddims), "cm/s")
+   data["velocity_z"] = (np.zeros(ddims), "cm/s")
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
@@ -451,7 +475,7 @@
 
 .. code:: python
 
-   sphere = ds.sphere(ds.domain_center, (1.0,"Mpc"))
+   sphere = ds.sphere("c", (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 doc/source/analyzing/mesh_filter.ipynb
--- a/doc/source/analyzing/mesh_filter.ipynb
+++ b/doc/source/analyzing/mesh_filter.ipynb
@@ -10,7 +10,7 @@
    "name": "python2"
   },
   "name": "",
-  "signature": "sha256:512c0d425416f92fcb8a4049f521b2960ed3b77873c7e8af73b16cf6e31ee266"
+  "signature": "sha256:e7a3de4de9be6a2c653bc45ed2c7ef5fa19da15aafca165d331a3125befe4d6c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -51,6 +51,9 @@
       "ad = ds.all_data()\n",
       "hot_ad = ad.cut_region([\"obj['temperature'] > 1e6\"])\n",
       "dense_ad = ad.cut_region(['obj[\"density\"] > 5e-30'])\n",
+      "\n",
+      "# you can chain cut regions in two ways:\n",
+      "dense_and_cool_ad = dense_ad.cut_region([\"obj['temperature'] < 1e5\"])\n",
       "overpressure_and_fast_ad = ad.cut_region(['(obj[\"pressure\"] > 1e-14) & (obj[\"velocity_magnitude\"].in_units(\"km/s\") > 1e2)'])"
      ],
      "language": "python",
@@ -68,8 +71,19 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print 'Temperature of all data: ad[\"temperature\"] = \\n%s' % ad[\"temperature\"] \n",
-      "print 'Temperature of \"hot\" data: hot_ad[\"temperature\"] = \\n%s' % hot_ad['temperature']"
+      "print \"Temperature of all cells:\\n ad['temperature'] = \\n%s\\n\" % ad[\"temperature\"] \n",
+      "print \"Temperatures of all \\\"hot\\\" cells:\\n hot_ad['temperature'] = \\n%s\" % hot_ad['temperature']"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print \"Density of dense, cool material:\\n dense_and_cool_ad['density'] = \\n%s\\n\" % dense_and_cool_ad['density']\n",
+      "print \"Temperature of dense, cool material:\\n dense_and_cool_ad['temperature'] = \\n%s\" % dense_and_cool_ad['temperature']"
      ],
      "language": "python",
      "metadata": {},
@@ -79,18 +93,52 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "However, now we can use this cut_region object as a data source in generated Projections or Profiles or any other number of tasks.  Let's look at a density projection of the densest material, or the material which is overpressure and hot.\n",
+      "Now that we've constructed a `cut_region`, we can use it as a data source for further analysis. To create a plot based on a `cut_region`, use the `data_source` keyword argument provided by yt's plotting objects.\n",
       "\n",
-      "We can visualize this material using the `data_source` keyword argument to the initializers of yt's plotting classes."
+      "Here's an example using projections:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "proj = yt.ProjectionPlot(ds, 'x', \"density\", weight_field=\"density\")\n",
-      "proj.annotate_title('All Data, No Cuts')\n",
-      "proj.show()"
+      "proj1 = yt.ProjectionPlot(ds, 'x', \"density\", weight_field=\"density\")\n",
+      "proj1.annotate_title('No Cuts')\n",
+      "proj1.set_figure_size(5)\n",
+      "proj1.show()\n",
+      "\n",
+      "proj2 = yt.ProjectionPlot(ds, 'x', \"density\", weight_field=\"density\", data_source=hot_ad)\n",
+      "proj2.annotate_title('Hot Gas')\n",
+      "proj2.set_zlim(\"density\", 3e-31, 3e-27)\n",
+      "proj2.set_figure_size(5)\n",
+      "proj2.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `data_source` keyword argument is also accepted by `SlicePlot`, `ProfilePlot` and `PhasePlot`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc1 = yt.SlicePlot(ds, 'x', \"density\", center='m')\n",
+      "slc1.set_zlim('density', 3e-31, 3e-27)\n",
+      "slc1.annotate_title('No Cuts')\n",
+      "slc1.set_figure_size(5)\n",
+      "slc1.show()\n",
+      "\n",
+      "slc2 = yt.SlicePlot(ds, 'x', \"density\", center='m', data_source=dense_ad)\n",
+      "slc2.set_zlim('density', 3e-31, 3e-27)\n",
+      "slc2.annotate_title('Dense Gas')\n",
+      "slc2.set_figure_size(5)\n",
+      "slc2.show()"
      ],
      "language": "python",
      "metadata": {},
@@ -100,82 +148,17 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "proj = yt.ProjectionPlot(ds, 'x', \"density\", weight_field=\"density\", data_source=dense_ad)\n",
-      "proj.annotate_title('Dense Material')\n",
-      "proj.set_zlim(\"density\", 3e-31, 3e-27)\n",
-      "proj.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "proj = yt.ProjectionPlot(ds, 'x', \"density\", weight_field=\"density\", data_source=overpressure_and_fast_ad)\n",
-      "proj.annotate_title('Overpressured and Fast Material')\n",
-      "proj.set_zlim(\"density\", 3e-31, 3e-27)\n",
-      "proj.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The `data_source` keyword argument is also accepted by `SlicePlot` and `PhasePlot`:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = yt.SlicePlot(ds, 'x', \"density\")\n",
-      "slc.set_zlim('density', 3e-31, 3e-27)\n",
-      "slc.annotate_title('No Cuts')\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = yt.SlicePlot(ds, 'x', \"density\", data_source=dense_ad)\n",
-      "slc.set_zlim('density', 3e-31, 3e-27)\n",
-      "slc.annotate_title('Dense Material')\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ph = yt.PhasePlot(ad, 'density', 'temperature', 'cell_mass', weight_field=None)\n",
-      "ph.set_xlim(3e-31, 3e-27)\n",
-      "ph.set_title('cell_mass', 'No Cuts')\n",
-      "ph.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ph = yt.PhasePlot(dense_ad, 'density', 'temperature', 'cell_mass', weight_field=None)\n",
-      "ph.set_xlim(3e-31, 3e-27)\n",
-      "ph.set_title('cell_mass', 'Dense material')\n",
-      "ph.show()"
+      "ph1 = yt.PhasePlot(ad, 'density', 'temperature', 'cell_mass', weight_field=None)\n",
+      "ph1.set_xlim(3e-31, 3e-27)\n",
+      "ph1.set_title('cell_mass', 'No Cuts')\n",
+      "ph1.set_figure_size(5)\n",
+      "ph1.show()\n",
+      "\n",
+      "ph1 = yt.PhasePlot(dense_ad, 'density', 'temperature', 'cell_mass', weight_field=None)\n",
+      "ph1.set_xlim(3e-31, 3e-27)\n",
+      "ph1.set_title('cell_mass', 'Dense Gas')\n",
+      "ph1.set_figure_size(5)\n",
+      "ph1.show()"
      ],
      "language": "python",
      "metadata": {},

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 doc/source/analyzing/units/unit_equivalencies.rst
--- a/doc/source/analyzing/units/unit_equivalencies.rst
+++ b/doc/source/analyzing/units/unit_equivalencies.rst
@@ -1,7 +1,7 @@
-.. _symbolic_units:
+.. _unit_equivalencies:
 
-Symbolic units: :code:`yt.units`
-================================
+Unit Equivalencies
+==================
 
 .. notebook:: 6)_Unit_Equivalencies.ipynb
    :skip_exceptions:

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -131,6 +131,22 @@
 ``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
 in code units.
 
+Some 3D Athena outputs may have large grids (especially parallel datasets subsequently joined with
+the `join_vtk` script), and may benefit from being subdivided into "virtual grids". For this purpose,
+one can pass in the `nprocs` parameter:
+
+.. code-block:: python
+
+   import yt
+
+   ds = yt.load("sloshing.0000.vtk", nprocs=8)
+
+which will subdivide each original grid into `nprocs` grids.
+
+.. note::
+
+    Virtual grids are only supported (and really only necessary) for 3D data.
+
 Alternative values for the following simulation parameters may be specified using a ``parameters``
 dict, accepting the following keys:
 

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
--- a/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+++ b/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
@@ -15,6 +15,7 @@
            ("halos", "particle_mass"))
 
 methods = {"fof": 2, "hop": 2, "rockstar": 3}
+decimals = {"fof": 10, "hop": 10, "rockstar": 1}
 
 e64 = "Enzo_64/DD0043/data0043"
 @requires_ds(e64, big_data=True)
@@ -28,9 +29,10 @@
                                    maxprocs=methods[method])
         comm.Disconnect()
 
-    fn = os.path.join(os.path.dirname(__file__),
-                      "halo_catalogs", method,
-                      "%s.0.h5" % method)
-    ds = load(fn)
-    for field in _fields:
-        yield FieldValuesTest(ds, field, particle_type=True)
+        fn = os.path.join(os.path.dirname(__file__),
+                          "halo_catalogs", method,
+                          "%s.0.h5" % method)
+        ds = load(fn)
+        for field in _fields:
+            yield FieldValuesTest(ds, field, particle_type=True,
+                                  decimals=decimals[method])

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/analysis_modules/halo_finding/tests/test_rockstar.py
--- a/yt/analysis_modules/halo_finding/tests/test_rockstar.py
+++ b/yt/analysis_modules/halo_finding/tests/test_rockstar.py
@@ -25,8 +25,8 @@
     h1 = "rockstar_halos/halos_0.0.bin"
     d1 = load(h1)
     for field in _fields:
-        yield FieldValuesTest(d1, field, particle_type=True)
+        yield FieldValuesTest(d1, field, particle_type=True, decimals=1)
     h2 = "rockstar_halos/halos_1.0.bin"
     d2 = load(h2)
     for field in _fields:
-        yield FieldValuesTest(d2, field, particle_type=True)
+        yield FieldValuesTest(d2, field, particle_type=True, decimals=1)

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -98,11 +98,16 @@
 
         self.add_info_item("total_cells")
         self.add_info_item("cell_mass")
-        self.add_info_item("mass_weighted_jeans_mass")
-        self.add_info_item("volume_weighted_jeans_mass")
+
+        if any("jeans" in f for f in self.data.pf.field_list):
+            self.add_info_item("mass_weighted_jeans_mass")
+            self.add_info_item("volume_weighted_jeans_mass")
+
         self.add_info_item("max_grid_level")
-        self.add_info_item("min_number_density")
-        self.add_info_item("max_number_density")
+
+        if any("number_density" in f for f in self.data.pf.field_list):
+            self.add_info_item("min_number_density")
+            self.add_info_item("max_number_density")
 
     def clear_clump_info(self):
         """

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -26,12 +26,12 @@
 from yt.funcs import *
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system
-from yt import units
+     communication_system, parallel_objects
+from yt.units.yt_array import uconcatenate
 
-N_TBIN = 10000
-TMIN = 8.08e-2
-TMAX = 50.
+n_kT = 10000
+kT_min = 8.08e-2
+kT_max = 50.
 
 comm = communication_system.communicators[-1]
 
@@ -59,11 +59,15 @@
     Zmet : float or string, optional
         The metallicity. If a float, assumes a constant metallicity throughout.
         If a string, is taken to be the name of the metallicity field.
+    photons_per_chunk : integer
+        The maximum number of photons that are allocated per chunk. Increase or decrease
+        as needed.
     """
-    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, photons_per_chunk=10000000):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
+        self.photons_per_chunk = photons_per_chunk
 
     def __call__(self, data_source, parameters):
         
@@ -74,130 +78,160 @@
         redshift = parameters["FiducialRedshift"]
         D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
         dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**3)
-                
+        src_ctr = parameters["center"]
+
         vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
 
-        num_cells = data_source["temperature"].shape[0]
-        start_c = comm.rank*num_cells/comm.size
-        end_c = (comm.rank+1)*num_cells/comm.size
-        
-        kT = (kboltz*data_source["temperature"][start_c:end_c]).in_units("keV").to_ndarray()
-        vol = data_source["cell_volume"][start_c:end_c].in_cgs().to_ndarray()
-        EM = (data_source["density"][start_c:end_c]/mp).to_ndarray()**2
-        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+        my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
 
-        data_source.clear_data()
-    
-        x = data_source["x"][start_c:end_c].copy()
-        y = data_source["y"][start_c:end_c].copy()
-        z = data_source["z"][start_c:end_c].copy()
-        dx = data_source["dx"][start_c:end_c].copy()
-
-        data_source.clear_data()
-        
-        vx = data_source["velocity_x"][start_c:end_c].copy()
-        vy = data_source["velocity_y"][start_c:end_c].copy()
-        vz = data_source["velocity_z"][start_c:end_c].copy()
-    
-        if isinstance(self.Zmet, basestring):
-            metalZ = data_source[self.Zmet][start_c:end_c].to_ndarray()
-        else:
-            metalZ = self.Zmet*np.ones(EM.shape)
-        
-        data_source.clear_data()
-
-        idxs = np.argsort(kT)
-        dshape = idxs.shape
-
-        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
-        dkT = kT_bins[1]-kT_bins[0]
-        kT_idxs = np.digitize(kT[idxs], kT_bins)
-        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
-        bcounts = np.bincount(kT_idxs).astype("int")
-        bcounts = bcounts[bcounts > 0]
-        n = int(0)
-        bcell = []
-        ecell = []
-        for bcount in bcounts:
-            bcell.append(n)
-            ecell.append(n+bcount)
-            n += bcount
-        kT_idxs = np.unique(kT_idxs)
-        
         self.spectral_model.prepare()
         energy = self.spectral_model.ebins
-    
-        cell_em = EM[idxs]*vol_scale
-    
-        number_of_photons = np.zeros(dshape, dtype='uint64')
-        energies = []
-    
-        u = np.random.random(cell_em.shape)
-        
-        pbar = get_pbar("Generating Photons", dshape[0])
 
-        for i, ikT in enumerate(kT_idxs):
+        citer = data_source.chunks(["kT","cell_volume","density",
+                                    "x","y","z","dx","velocity_x",
+                                    "velocity_y","velocity_z"], "io")
 
-            ibegin = bcell[i]
-            iend = ecell[i]
-            kT = kT_bins[ikT] + 0.5*dkT
-        
-            em_sum_c = cell_em[ibegin:iend].sum()
-            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+        photons = {}
+        photons["x"] = []
+        photons["y"] = []
+        photons["z"] = []
+        photons["vx"] = []
+        photons["vy"] = []
+        photons["vz"] = []
+        photons["dx"] = []
+        photons["Energy"] = []
+        photons["NumberOfPhotons"] = []
 
-            cspec, mspec = self.spectral_model.get_spectrum(kT)
-            cspec *= dist_fac*em_sum_c/vol_scale
-            mspec *= dist_fac*em_sum_m/vol_scale
+        spectral_norm = area.v*exp_time.v*dist_fac/vol_scale
 
-            cumspec_c = np.cumsum(cspec.ndarray_view())
-            counts_c = cumspec_c[:]/cumspec_c[-1]
-            counts_c = np.insert(counts_c, 0, 0.0)
-            tot_ph_c = cumspec_c[-1]*area.value*exp_time.value
+        for chunk in parallel_objects(citer):
 
-            cumspec_m = np.cumsum(mspec.ndarray_view())
-            counts_m = cumspec_m[:]/cumspec_m[-1]
-            counts_m = np.insert(counts_m, 0, 0.0)
-            tot_ph_m = cumspec_m[-1]*area.value*exp_time.value
+            kT = chunk["kT"].v
+            num_cells = len(kT)
+            if num_cells == 0:
+                continue
+            vol = chunk["cell_volume"].in_cgs().v
+            EM = (chunk["density"]/mp).v**2
+            EM *= 0.5*(1.+self.X_H)*self.X_H*vol
 
-            for icell in xrange(ibegin, iend):
+            if isinstance(self.Zmet, basestring):
+                metalZ = chunk[self.Zmet].v
+            else:
+                metalZ = self.Zmet
+
+            idxs = np.argsort(kT)
+
+            kT_bins = np.linspace(kT_min, max(my_kT_max, kT_max), num=n_kT+1)
+            dkT = kT_bins[1]-kT_bins[0]
+            kT_idxs = np.digitize(kT[idxs], kT_bins)
+            kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1
+            bcounts = np.bincount(kT_idxs).astype("int")
+            bcounts = bcounts[bcounts > 0]
+            n = int(0)
+            bcell = []
+            ecell = []
+            for bcount in bcounts:
+                bcell.append(n)
+                ecell.append(n+bcount)
+                n += bcount
+            kT_idxs = np.unique(kT_idxs)
+
+            cell_em = EM[idxs]*vol_scale
+
+            number_of_photons = np.zeros(num_cells, dtype="uint64")
+            energies = np.zeros(self.photons_per_chunk)
+
+            start_e = 0
+            end_e = 0
+
+            pbar = get_pbar("Generating photons for chunk ", num_cells)
+
+            for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
+
+                kT = kT_bins[ikT] + 0.5*dkT
+
+                n_current = iend-ibegin
+
+                cem = cell_em[ibegin:iend]
+
+                em_sum_c = cem.sum()
+                if isinstance(self.Zmet, basestring):
+                    em_sum_m = (metalZ[ibegin:iend]*cem).sum()
+                else:
+                    em_sum_m = metalZ*em_sum_c
+
+                cspec, mspec = self.spectral_model.get_spectrum(kT)
+
+                cumspec_c = np.cumsum(cspec.d)
+                tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
+                cumspec_c /= cumspec_c[-1]
+                cumspec_c = np.insert(cumspec_c, 0, 0.0)
+
+                cumspec_m = np.cumsum(mspec.d)
+                tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
+                cumspec_m /= cumspec_m[-1]
+                cumspec_m = np.insert(cumspec_m, 0, 0.0)
+
+                u = np.random.random(size=n_current)
+
+                cell_norm_c = tot_ph_c*cem/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u)
             
-                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+                if isinstance(self.Zmet, basestring):
+                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem/em_sum_m
+                else:
+                    cell_norm_m = tot_ph_m*metalZ*cem/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u)
             
-                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+                number_of_photons[ibegin:iend] = cell_n_c + cell_n_m
+
+                end_e += int((cell_n_c+cell_n_m).sum())
+
+                if end_e > self.photons_per_chunk:
+                    raise RuntimeError("Number of photons generated for this chunk "+
+                                       "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
+                                       "Increase photons_per_chunk!")
+
+                energies[start_e:end_e] = _generate_energies(cell_n_c, cell_n_m, cumspec_c, cumspec_m, energy)
             
-                cell_n = cell_n_c + cell_n_m
+                start_e = end_e
 
-                if cell_n > 0:
-                    number_of_photons[icell] = cell_n
-                    randvec_c = np.random.uniform(size=cell_n_c)
-                    randvec_c.sort()
-                    randvec_m = np.random.uniform(size=cell_n_m)
-                    randvec_m.sort()
-                    cell_e_c = np.interp(randvec_c, counts_c, energy)
-                    cell_e_m = np.interp(randvec_m, counts_m, energy)
-                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
-                
-                pbar.update(icell)
+                pbar.update(iend)
 
-        pbar.finish()
-            
-        active_cells = number_of_photons > 0
-        idxs = idxs[active_cells]
-        
-        photons = {}
+            pbar.finish()
 
-        src_ctr = parameters["center"]
-        
-        photons["x"] = (x[idxs]-src_ctr[0]).in_units("kpc")
-        photons["y"] = (y[idxs]-src_ctr[1]).in_units("kpc")
-        photons["z"] = (z[idxs]-src_ctr[2]).in_units("kpc")
-        photons["vx"] = vx[idxs].in_units("km/s")
-        photons["vy"] = vy[idxs].in_units("km/s")
-        photons["vz"] = vz[idxs].in_units("km/s")
-        photons["dx"] = dx[idxs].in_units("kpc")
-        photons["NumberOfPhotons"] = number_of_photons[active_cells]
-        photons["Energy"] = np.concatenate(energies)*units.keV
-    
+            active_cells = number_of_photons > 0
+            idxs = idxs[active_cells]
+
+            mylog.info("Number of photons generated for this chunk: %d" % int(number_of_photons.sum()))
+            mylog.info("Number of cells with photons: %d" % int(active_cells.sum()))
+
+            photons["NumberOfPhotons"].append(number_of_photons[active_cells])
+            photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
+            photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
+            photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc"))
+            photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc"))
+            photons["vx"].append(chunk["velocity_x"][idxs].in_units("km/s"))
+            photons["vy"].append(chunk["velocity_y"][idxs].in_units("km/s"))
+            photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
+            photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
+
+        for key in photons:
+            photons[key] = uconcatenate(photons[key])
+
         return photons
+
+def _generate_energies(cell_n_c, cell_n_m, counts_c, counts_m, energy):
+    energies = np.array([])
+    for cn_c, cn_m in zip(cell_n_c, cell_n_m):
+        if cn_c > 0:
+            randvec_c = np.random.uniform(size=cn_c)
+            randvec_c.sort()
+            cell_e_c = np.interp(randvec_c, counts_c, energy)
+            energies = np.append(energies, cell_e_c)
+        if cn_m > 0: 
+            randvec_m = np.random.uniform(size=cn_m)
+            randvec_m.sort()
+            cell_e_m = np.interp(randvec_m, counts_m, energy)
+            energies = np.append(energies, cell_e_m)
+    return energies

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -28,8 +28,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      parallel_capable
-from yt import units
-from yt.units.yt_array import YTQuantity
+from yt.units.yt_array import YTQuantity, YTArray, uconcatenate
 import h5py
 from yt.utilities.on_demand_imports import _astropy
 pyfits = _astropy.pyfits
@@ -81,7 +80,10 @@
                     for i in xrange(self.num_cells)]
         else:
             return self.photons[key]
-    
+
+    def __repr__(self):
+        return self.photons.__repr__()
+
     @classmethod
     def from_file(cls, filename):
         r"""
@@ -93,12 +95,12 @@
         
         f = h5py.File(filename, "r")
 
-        parameters["FiducialExposureTime"] = f["/fid_exp_time"].value*units.s
-        parameters["FiducialArea"] = f["/fid_area"].value*units.cm*units.cm
+        parameters["FiducialExposureTime"] = YTQuantity(f["/fid_exp_time"].value, "s")
+        parameters["FiducialArea"] = YTQuantity(f["/fid_area"].value, "cm**2")
         parameters["FiducialRedshift"] = f["/fid_redshift"].value
-        parameters["FiducialAngularDiameterDistance"] = f["/fid_d_a"].value*units.Mpc
+        parameters["FiducialAngularDiameterDistance"] = YTQuantity(f["/fid_d_a"].value, "Mpc")
         parameters["Dimension"] = f["/dimension"].value
-        parameters["Width"] = f["/width"].value*units.kpc
+        parameters["Width"] = YTQuantity(f["/width"].value, "kpc")
         parameters["HubbleConstant"] = f["/hubble"].value
         parameters["OmegaMatter"] = f["/omega_matter"].value
         parameters["OmegaLambda"] = f["/omega_lambda"].value
@@ -107,13 +109,13 @@
         start_c = comm.rank*num_cells/comm.size
         end_c = (comm.rank+1)*num_cells/comm.size
         
-        photons["x"] = f["/x"][start_c:end_c]*units.kpc
-        photons["y"] = f["/y"][start_c:end_c]*units.kpc
-        photons["z"] = f["/z"][start_c:end_c]*units.kpc
-        photons["dx"] = f["/dx"][start_c:end_c]*units.kpc
-        photons["vx"] = f["/vx"][start_c:end_c]*units.km/units.s
-        photons["vy"] = f["/vy"][start_c:end_c]*units.km/units.s
-        photons["vz"] = f["/vz"][start_c:end_c]*units.km/units.s
+        photons["x"] = YTArray(f["/x"][start_c:end_c], "kpc")
+        photons["y"] = YTArray(f["/y"][start_c:end_c], "kpc")
+        photons["z"] = YTArray(f["/z"][start_c:end_c], "kpc")
+        photons["dx"] = YTArray(f["/dx"][start_c:end_c], "kpc")
+        photons["vx"] = YTArray(f["/vx"][start_c:end_c], "km/s")
+        photons["vy"] = YTArray(f["/vy"][start_c:end_c], "km/s")
+        photons["vz"] = YTArray(f["/vz"][start_c:end_c], "km/s")
 
         n_ph = f["/num_photons"][:]
         
@@ -128,7 +130,7 @@
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
         
-        photons["Energy"] = f["/energy"][start_e:end_e]*units.keV
+        photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
         
         f.close()
 
@@ -193,15 +195,15 @@
         determine them, the *photons* dict needs to have the following items, corresponding
         to cells which have photons:
 
-        "x" : the x-position of the cell relative to the source center in kpc, NumPy array of floats
-        "y" : the y-position of the cell relative to the source center in kpc, NumPy array of floats
-        "z" : the z-position of the cell relative to the source center in kpc, NumPy array of floats
-        "vx" : the x-velocity of the cell in km/s, NumPy array of floats
-        "vy" : the y-velocity of the cell in km/s, NumPy array of floats
-        "vz" : the z-velocity of the cell in km/s, NumPy array of floats
-        "dx" : the width of the cell in kpc, NumPy array of floats
-        "NumberOfPhotons" : the number of photons in the cell, NumPy array of integers
-        "Energy" : the source rest-frame energies of the photons, NumPy array of floats
+        "x" : the x-position of the cell relative to the source center in kpc, YTArray
+        "y" : the y-position of the cell relative to the source center in kpc, YTArray
+        "z" : the z-position of the cell relative to the source center in kpc, YTArray
+        "vx" : the x-velocity of the cell in km/s, YTArray
+        "vy" : the y-velocity of the cell in km/s, YTArray
+        "vz" : the z-velocity of the cell in km/s, YTArray
+        "dx" : the width of the cell in kpc, YTArray
+        "NumberOfPhotons" : the number of photons in the cell, NumPy array of unsigned 64-bit integers
+        "Energy" : the source rest-frame energies of the photons, YTArray
 
         The last array is not the same size as the others because it contains the energies in all of
         the cells in a single 1-D array. The first photons["NumberOfPhotons"][0] elements are
@@ -211,7 +213,9 @@
         spectrum of photons is created. More complicated examples which actually
         create photons based on the fields in the dataset could be created. 
 
-        >>> from scipy.stats import powerlaw
+        >>> import numpy as np
+        >>> import yt
+        >>> from yt.analysis_modules.photon_simulator import *
         >>> def line_func(source, parameters):
         ...
         ...     ds = source.ds
@@ -219,18 +223,19 @@
         ...     num_photons = parameters["num_photons"]
         ...     E0  = parameters["line_energy"] # Energies are in keV
         ...     sigE = parameters["line_sigma"] 
+        ...     src_ctr = parameters["center"]
         ...
         ...     energies = norm.rvs(loc=E0, scale=sigE, size=num_photons)
-        ...     
-        ...     photons["x"] = np.zeros((1)) # Place everything in the center cell
-        ...     photons["y"] = np.zeros((1))
-        ...     photons["z"] = np.zeros((1))
-        ...     photons["vx"] = np.zeros((1))
-        ...     photons["vy"] = np.zeros((1))
-        ...     photons["vz"] = 100.*np.ones((1))
-        ...     photons["dx"] = source["dx"][0]*ds.units["kpc"]*np.ones((1)) 
-        ...     photons["NumberOfPhotons"] = num_photons*np.ones((1))
-        ...     photons["Energy"] = np.array(energies)
+        ...
+        ...     # Place everything in the center cell
+        ...     for i, ax in enumerate("xyz"):
+        ...         photons[ax] = (ds.domain_center[0]-src_ctr[0]).in_units("kpc")
+        ...     photons["vx"] = ds.arr([0], "km/s")
+        ...     photons["vy"] = ds.arr([0], "km/s")
+        ...     photons["vz"] = ds.arr([100.0], "km/s")
+        ...     photons["dx"] = ds.find_field_values_at_point("dx", ds.domain_center).in_units("kpc")
+        ...     photons["NumberOfPhotons"] = np.array(num_photons*np.ones(1), dtype="uint64")
+        ...     photons["Energy"] = ds.arr(energies, "keV")
         >>>
         >>> redshift = 0.05
         >>> area = 6000.0
@@ -238,11 +243,12 @@
         >>> parameters = {"num_photons" : 10000, "line_energy" : 5.0,
         ...               "line_sigma" : 0.1}
         >>> ddims = (128,128,128)
-        >>> random_data = {"Density":np.random.random(ddims)}
-        >>> ds = load_uniform_grid(random_data, ddims)
+        >>> random_data = {"density":(np.random.random(ddims),"g/cm**3")}
+        >>> ds = yt.load_uniform_grid(random_data, ddims)
         >>> dd = ds.all_data
         >>> my_photons = PhotonList.from_user_model(dd, redshift, area,
-        ...                                         time, line_func)
+        ...                                         time, line_func,
+        ...                                         parameters=parameters)
 
         """
 
@@ -296,17 +302,19 @@
         dimension = 0
         width = 0.0
         for i, ax in enumerate("xyz"):
-            pos = data_source[ax]
-            delta = data_source["d%s"%(ax)]
-            le = np.min(pos-0.5*delta)
-            re = np.max(pos+0.5*delta)
+            le, re = data_source.quantities.extrema(ax)
+            delta_min, delta_max = data_source.quantities.extrema("d%s"%ax)
+            le -= 0.5*delta_max
+            re += 0.5*delta_max
             width = max(width, re-parameters["center"][i], parameters["center"][i]-le)
-            dimension = max(dimension, int(width/delta.min()))
+            dimension = max(dimension, int(width/delta_min))
         parameters["Dimension"] = 2*dimension
         parameters["Width"] = 2.*width.in_units("kpc")
                 
         photons = photon_model(data_source, parameters)
-        
+
+        mylog.info("Finished generating photons.")
+
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
                         
@@ -316,6 +324,7 @@
         """
         Write the photons to the HDF5 file *photonfile*.
         """
+
         if parallel_capable:
             
             mpi_long = get_mpi_type("int64")
@@ -332,15 +341,15 @@
                 num_photons = sum(sizes_p)        
                 disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))]
                 disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))]
-                x = np.zeros((num_cells))
-                y = np.zeros((num_cells))
-                z = np.zeros((num_cells))
-                vx = np.zeros((num_cells))
-                vy = np.zeros((num_cells))
-                vz = np.zeros((num_cells))
-                dx = np.zeros((num_cells))
-                n_ph = np.zeros((num_cells), dtype="uint64")
-                e = np.zeros((num_photons))
+                x = np.zeros(num_cells)
+                y = np.zeros(num_cells)
+                z = np.zeros(num_cells)
+                vx = np.zeros(num_cells)
+                vy = np.zeros(num_cells)
+                vz = np.zeros(num_cells)
+                dx = np.zeros(num_cells)
+                n_ph = np.zeros(num_cells, dtype="uint64")
+                e = np.zeros(num_photons)
             else:
                 sizes_c = []
                 sizes_p = []
@@ -377,15 +386,15 @@
 
         else:
 
-            x = self.photons["x"].ndarray_view()
-            y = self.photons["y"].ndarray_view()
-            z = self.photons["z"].ndarray_view()
-            vx = self.photons["vx"].ndarray_view()
-            vy = self.photons["vy"].ndarray_view()
-            vz = self.photons["vz"].ndarray_view()
-            dx = self.photons["dx"].ndarray_view()
+            x = self.photons["x"].d
+            y = self.photons["y"].d
+            z = self.photons["z"].d
+            vx = self.photons["vx"].d
+            vy = self.photons["vy"].d
+            vz = self.photons["vz"].d
+            dx = self.photons["dx"].d
             n_ph = self.photons["NumberOfPhotons"]
-            e = self.photons["Energy"].ndarray_view()
+            e = self.photons["Energy"].d
                                                 
         if comm.rank == 0:
             
@@ -423,7 +432,7 @@
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
-                        convolve_energies=False):
+                        convolve_energies=False, no_shifting=False):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -454,6 +463,8 @@
             The names of the ARF and/or RMF files to convolve the photons with.
         convolve_energies : boolean, optional
             If this is set, the photon energies will be convolved with the RMF.
+        no_shifting : boolean, optional
+            If set, the photon energies will not be Doppler shifted.
             
         Examples
         --------
@@ -472,7 +483,7 @@
         else:
             sky_center = YTArray(sky_center, "degree")
 
-        dx = self.photons["dx"].ndarray_view()
+        dx = self.photons["dx"].d
         nx = self.parameters["Dimension"]
         if psf_sigma is not None:
              psf_sigma = parse_value(psf_sigma, "degree")
@@ -560,12 +571,13 @@
         x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
         y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
         z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-                    
-        vz = self.photons["vx"]*z_hat[0] + \
-             self.photons["vy"]*z_hat[1] + \
-             self.photons["vz"]*z_hat[2]
-        shift = -vz.in_cgs()/clight
-        shift = np.sqrt((1.-shift)/(1.+shift))
+
+        if not no_shifting:
+            vz = self.photons["vx"]*z_hat[0] + \
+                 self.photons["vy"]*z_hat[1] + \
+                 self.photons["vz"]*z_hat[2]
+            shift = -vz.in_cgs()/clight
+            shift = np.sqrt((1.-shift)/(1.+shift))
 
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
@@ -579,8 +591,11 @@
         z *= delta
         x += self.photons["x"][obs_cells]
         y += self.photons["y"][obs_cells]
-        z += self.photons["z"][obs_cells]  
-        eobs = self.photons["Energy"][idxs]*shift[obs_cells]
+        z += self.photons["z"][obs_cells]
+        if no_shifting:
+            eobs = self.photons["Energy"][idxs]
+        else:
+            eobs = self.photons["Energy"][idxs]*shift[obs_cells]
 
         xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
         ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]
@@ -611,7 +626,7 @@
         events = {}
 
         dx_min = self.parameters["Width"].value/self.parameters["Dimension"]
-        dtheta = np.rad2deg(dx_min/D_A.value)*units.degree
+        dtheta = YTQuantity(np.rad2deg(dx_min/D_A.value), "degree")
         
         events["xpix"] = xsky[detected]/dx_min + 0.5*(nx+1)
         events["ypix"] = ysky[detected]/dx_min + 0.5*(nx+1)
@@ -625,7 +640,7 @@
         
         num_events = len(events["xpix"])
             
-        if comm.rank == 0: mylog.info("Total number of observed photons: %d" % (num_events))
+        if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)
@@ -737,7 +752,7 @@
     
 class EventList(object) :
 
-    def __init__(self, events, parameters) :
+    def __init__(self, events, parameters):
 
         self.events = events
         self.parameters = parameters
@@ -749,8 +764,8 @@
         self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
         self.wcs.wcs.cunit = ["deg"]*2                                                
         x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
-        self.events["xsky"] = x*units.degree
-        self.events["ysky"] = y*units.degree
+        self.events["xsky"] = YTArray(x, "degree")
+        self.events["ysky"] = YTArray(y, "degree")
 
     def keys(self):
         return self.events.keys()
@@ -769,6 +784,51 @@
 
     def __repr__(self):
         return self.events.__repr__()
+
+    def __add__(self, other):
+        keys1 = self.parameters.keys()
+        keys2 = other.parameters.keys()
+        keys1.sort()
+        keys2.sort()
+        if keys1 != keys2:
+            raise RuntimeError("The two EventLists do not have the same parameters!")
+        for k1, k2 in zip(keys1, keys2):
+            v1 = self.parameters[k1]
+            v2 = other.parameters[k2]
+            if isinstance(v1, basestring) or isinstance(v2, basestring):
+                check_equal = v1 == v2
+            else:
+                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
+            if not check_equal:
+                raise RuntimeError("The values for the parameter '%s' in the two EventLists" % k1 +
+                                   " are not identical (%s vs. %s)!" % (v1, v2))
+        events = {}
+        for item1, item2 in zip(self.items(), other.items()):
+            k1, v1 = item1
+            k2, v2 = item2
+            events[k1] = uconcatenate([v1,v2])
+        return EventList(events, self.parameters)
+
+    def filter_events(self, region):
+        """                                                                                                                                 
+        Filter events using a ds9 region. Requires the pyregion package.                                                                    
+        Returns a new EventList.                                                                                                            
+        """
+        import pyregion
+        import os
+        if os.path.exists(region):
+            reg = pyregion.open(region)
+        else:
+            reg = pyregion.parse(region)
+        r = reg.as_imagecoord(header=self.wcs.to_header())
+        f = r.get_filter()
+        idxs = f.inside_x_y(self.events["xpix"], self.events["ypix"])
+        if idxs.sum() == 0:
+            raise RuntimeError("No events are inside this region!")
+        new_events = {}
+        for k, v in self.events.items():
+            new_events[k] = v[idxs]
+        return EventList(new_events, self.parameters)
    
     @classmethod
     def from_h5_file(cls, h5file):
@@ -780,10 +840,10 @@
         
         f = h5py.File(h5file, "r")
 
-        parameters["ExposureTime"] = f["/exp_time"].value*units.s
-        parameters["Area"] = f["/area"].value*units.cm*units.cm
+        parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
+        parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
         parameters["Redshift"] = f["/redshift"].value
-        parameters["AngularDiameterDistance"] = f["/d_a"].value*units.Mpc
+        parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
         if "rmf" in f:
             parameters["RMF"] = f["/rmf"].value
         if "arf" in f:
@@ -799,13 +859,13 @@
 
         events["xpix"] = f["/xpix"][:]
         events["ypix"] = f["/ypix"][:]
-        events["eobs"] = f["/eobs"][:]*units.keV
+        events["eobs"] = YTArray(f["/eobs"][:], "keV")
         if "pi" in f:
             events["PI"] = f["/pi"][:]
         if "pha" in f:
             events["PHA"] = f["/pha"][:]
-        parameters["sky_center"] = f["/sky_center"][:]*units.deg
-        parameters["dtheta"] = f["/dtheta"].value*units.deg
+        parameters["sky_center"] = YTArray(f["/sky_center"][:], "deg")
+        parameters["dtheta"] = YTQuantity(f["/dtheta"].value, "deg")
         parameters["pix_center"] = f["/pix_center"][:]
         
         f.close()
@@ -824,10 +884,10 @@
         events = {}
         parameters = {}
         
-        parameters["ExposureTime"] = tblhdu.header["EXPOSURE"]*units.s
-        parameters["Area"] = tblhdu.header["AREA"]*units.cm*units.cm
+        parameters["ExposureTime"] = YTQuantity(tblhdu.header["EXPOSURE"], "s")
+        parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2")
         parameters["Redshift"] = tblhdu.header["REDSHIFT"]
-        parameters["AngularDiameterDistance"] = tblhdu.header["D_A"]*units.Mpc
+        parameters["AngularDiameterDistance"] = YTQuantity(tblhdu.header["D_A"], "Mpc")
         if "RMF" in tblhdu.header:
             parameters["RMF"] = tblhdu["RMF"]
         if "ARF" in tblhdu.header:
@@ -840,12 +900,12 @@
             parameters["Telescope"] = tblhdu["TELESCOP"]
         if "INSTRUME" in tblhdu.header:
             parameters["Instrument"] = tblhdu["INSTRUME"]
-        parameters["sky_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]])*units.deg
+        parameters["sky_center"] = YTArray([tblhdu["TCRVL2"],tblhdu["TCRVL3"]], "deg")
         parameters["pix_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]])
-        parameters["dtheta"] = tblhdu["TCRVL3"]*units.deg
+        parameters["dtheta"] = YTQuantity(tblhdu["TCRVL3"], "deg")
         events["xpix"] = tblhdu.data.field("X")
         events["ypix"] = tblhdu.data.field("Y")
-        events["eobs"] = (tblhdu.data.field("ENERGY")/1000.)*units.keV # Convert to keV
+        events["eobs"] = YTArray(tblhdu.data.field("ENERGY")/1000., "keV")
         if "PI" in tblhdu.columns.names:
             events["PI"] = tblhdu.data.field("PI")
         if "PHA" in tblhdu.columns.names:
@@ -853,19 +913,6 @@
         
         return cls(events, parameters)
 
-    @classmethod
-    def join_events(cls, events1, events2):
-        """
-        Join two sets of events, *events1* and *events2*.
-        """
-        events = {}
-        for item1, item2 in zip(events1.items(), events2.items()):
-            k1, v1 = item1
-            k2, v2 = item2
-            events[k1] = np.concatenate([v1,v2])
-        
-        return cls(events, events1.parameters)
-                
     @parallel_root_only
     def write_fits_file(self, fitsfile, clobber=False):
         """
@@ -1152,7 +1199,7 @@
 
         if energy_bins:
             spectype = "energy"
-            espec = self.events["eobs"].ndarray_view()
+            espec = self.events["eobs"].d
             range = (emin, emax)
             spec, ee = np.histogram(espec, bins=nchan, range=range)
             bins = 0.5*(ee[1:]+ee[:-1])

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -14,7 +14,7 @@
 import numpy as np
 import os
 from yt.funcs import *
-from yt import units
+from yt.units.yt_array import YTQuantity
 import h5py
 
 try:
@@ -29,16 +29,17 @@
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
-hc = (hcgs*clight).in_units("keV*angstrom")
-cm3 = units.cm*units.cm*units.cm
+hc = (hcgs*clight).in_units("keV*angstrom").v
+cl = clight.v
+K = 1.0/np.sqrt(2.*np.pi)
 
 class SpectralModel(object):
 
     def __init__(self, emin, emax, nchan):
-        self.emin = emin*units.keV
-        self.emax = emax*units.keV
+        self.emin = YTQuantity(emin, "keV")
+        self.emax = YTQuantity(emax, "keV")
         self.nchan = nchan
-        self.ebins = np.linspace(emin, emax, nchan+1)*units.keV
+        self.ebins = np.linspace(self.emin, self.emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
@@ -68,8 +69,9 @@
     --------
     >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000)
     """
-    def __init__(self, model_name, emin, emax, nchan):
+    def __init__(self, model_name, emin, emax, nchan, thermal_broad=False):
         self.model_name = model_name
+        self.thermal_broad = thermal_broad
         SpectralModel.__init__(self, emin, emax, nchan)
         
     def prepare(self):
@@ -80,27 +82,29 @@
         xspec.AllModels.setEnergies("%f %f %d lin" %
                                     (self.emin.value, self.emax.value, self.nchan))
         self.model = xspec.Model(self.model_name)
+        self.thermal_comp = getattr(self.model,self.model_name)
         if self.model_name == "bremss":
             self.norm = 3.02e-15
         else:
             self.norm = 1.0e-14
-        
+        self.thermal_comp.norm = 1.0
+        self.thermal_comp.Redshift = 0.0
+        if self.thermal_broad:
+            xspec.Xset.addModelString("APECTHERMAL","yes")
+
     def get_spectrum(self, kT):
         """
         Get the thermal emission spectrum given a temperature *kT* in keV. 
         """
-        m = getattr(self.model,self.model_name)
-        m.kT = kT
-        m.Abundanc = 0.0
-        m.norm = 1.0
-        m.Redshift = 0.0
+        self.thermal_comp.kT = kT
+        self.thermal_comp.Abundanc = 0.0
         cosmic_spec = self.norm*np.array(self.model.values(0))
-        m.Abundanc = 1.0
         if self.model_name == "bremss":
-            metal_spec = np.zeros((self.nchan))
+            metal_spec = np.zeros(self.nchan)
         else:
+            self.thermal_comp.Abundanc = 1.0
             metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
-        return cosmic_spec*cm3/units.s, metal_spec*cm3/units.s
+        return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
         
 class XSpecAbsorbModel(SpectralModel):
     r"""
@@ -188,7 +192,7 @@
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
         SpectralModel.__init__(self, emin, emax, nchan)
-        self.wvbins = (hc/self.ebins[::-1]).ndarray_view()
+        self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
         # Non-trace metals
@@ -230,12 +234,12 @@
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
         vec = np.zeros(self.nchan)
-        E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
+        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
-        ebins = self.ebins.ndarray_view()
+        ebins = self.ebins.d
         if self.thermal_broad:
             vec = np.zeros(self.nchan)
-            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value
+            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/cl
             for E, sig, a in zip(E0, sigma, amp):
                 cdf = stats.norm(E,sig).cdf(ebins)
                 vec += np.diff(cdf)*a
@@ -255,13 +259,13 @@
         e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
         continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
 
-        tmpspec += np.interp(self.emid.ndarray_view(), e_cont, continuum)*self.de.ndarray_view()
+        tmpspec += np.interp(self.emid.d, e_cont, continuum)*self.de.d
         
         n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
         e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
         pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
         
-        tmpspec += np.interp(self.emid.ndarray_view(), e_pseudo, pseudo)*self.de.ndarray_view()
+        tmpspec += np.interp(self.emid.d, e_pseudo, pseudo)*self.de.d
         
         return tmpspec
 
@@ -275,7 +279,7 @@
         mspec_r = np.zeros(self.nchan)
         tindex = np.searchsorted(self.Tvals, kT)-1
         if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
-            return cspec_l*cm3/units.s, mspec_l*cm3/units.s
+            return YTArray(cspec_l, "cm**3/s"), YTArray(mspec_l, "cm**3/s")
         dT = (kT-self.Tvals[tindex])/self.dTvals[tindex]
         # First do H,He, and trace elements
         for elem in self.cosmic_elem:
@@ -285,8 +289,8 @@
         for elem in self.metal_elem:
             mspec_l += self._make_spectrum(elem, tindex+2)
             mspec_r += self._make_spectrum(elem, tindex+3)
-        cosmic_spec = (cspec_l*(1.-dT)+cspec_r*dT)*cm3/units.s
-        metal_spec = (mspec_l*(1.-dT)+mspec_r*dT)*cm3/units.s
+        cosmic_spec = YTArray(cspec_l*(1.-dT)+cspec_r*dT, "cm**3/s")
+        metal_spec = YTArray(mspec_l*(1.-dT)+mspec_r*dT, "cm**3/s")
         return cosmic_spec, metal_spec
 
 class TableAbsorbModel(SpectralModel):
@@ -313,11 +317,11 @@
         f = h5py.File(self.filename,"r")
         emin = f["energy"][:].min()
         emax = f["energy"][:].max()
-        self.sigma = f["cross_section"][:]*units.cm*units.cm
+        self.sigma = YTArray(f["cross_section"][:], "cm**2")
         nchan = self.sigma.shape[0]
         f.close()
         SpectralModel.__init__(self, emin, emax, nchan)
-        self.nH = nH*1.0e22/(units.cm*units.cm)
+        self.nH = YTQuantity(nH*1.0e22, "cm**-2")
         
     def prepare(self):
         """

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -26,8 +26,11 @@
 import ppv_utils
 from yt.funcs import is_root
 
-def create_vlos(normal):
-    if isinstance(normal, basestring):
+def create_vlos(normal, no_shifting):
+    if no_shifting:
+        def _v_los(field, data):
+            return data.ds.arr(data["zeros"], "cm/s")
+    elif isinstance(normal, basestring):
         def _v_los(field, data): 
             return -data["velocity_%s" % normal]
     else:
@@ -48,7 +51,7 @@
 class PPVCube(object):
     def __init__(self, ds, normal, field, center="c", width=(1.0,"unitary"),
                  dims=(100,100,100), velocity_bounds=None, thermal_broad=False,
-                 atomic_weight=56., method="integrate"):
+                 atomic_weight=56., method="integrate", no_shifting=False):
         r""" Initialize a PPVCube object.
 
         Parameters
@@ -88,6 +91,9 @@
             Set the projection method to be used.
             "integrate" : line of sight integration over the line element.
             "sum" : straight summation over the line of sight.
+        no_shifting : boolean, optional
+            If set, no shifting due to velocity will occur but only thermal broadening.
+            Should not be set when *thermal_broad* is False, otherwise nothing happens!
 
         Examples
         --------
@@ -102,6 +108,10 @@
         self.width = width
         self.particle_mass = atomic_weight*mh
         self.thermal_broad = thermal_broad
+        self.no_shifting = no_shifting
+
+        if no_shifting and not thermal_broad:
+            raise RuntimeError("no_shifting cannot be True when thermal_broad is False!")
 
         self.center = ds.coordinates.sanitize_center(center, normal)[0]
 
@@ -135,7 +145,7 @@
 
         self.current_v = 0.0
 
-        _vlos = create_vlos(normal)
+        _vlos = create_vlos(normal, self.no_shifting)
         self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s")
 
         _intensity = self.create_intensity()
@@ -287,7 +297,7 @@
         w.wcs.cunit = [units,units,vunit]
         w.wcs.ctype = [types[0],types[1],vtype]
 
-        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
+        fib = FITSImageBuffer(self.data.transpose(2,0,1), fields=self.field, wcs=w)
         fib[0].header["bunit"] = re.sub('()', '', str(self.proj_units))
         fib[0].header["btype"] = self.field
 

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -317,7 +317,12 @@
         np.add(py, oy, py)
         np.multiply(pdy, self.ds.domain_width[yax], pdy)
         if self.weight_field is not None:
-            np.divide(nvals, nwvals[:,None], nvals)
+            # If there are 0s remaining in the weight vals
+            # this will not throw an error, but silently
+            # return nans for vals where dividing by 0
+            # Leave as NaNs to be auto-masked by Matplotlib
+            with np.errstate(invalid='ignore'):
+                np.divide(nvals, nwvals[:,None], nvals)
         # We now convert to half-widths and center-points
         data = {}
         #non_nan = ~np.any(np.isnan(nvals), axis=-1)

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -560,11 +560,14 @@
         self._data_source = data_source
         if data_source is not None:
             if data_source.ds is not self.ds:
-                raise RuntimeError("Attempted to construct a DataContainer with a data_source from a different DataSet", ds, data_source.ds)
+                raise RuntimeError("Attempted to construct a DataContainer with a data_source "
+                                   "from a different DataSet", ds, data_source.ds)
             if data_source._dimensionality < self._dimensionality:
-                raise RuntimeError("Attempted to construct a DataContainer with a data_source of lower dimensionality (%u vs %u)" %
+                raise RuntimeError("Attempted to construct a DataContainer with a data_source "
+                                   "of lower dimensionality (%u vs %u)" %
                                     (data_source._dimensionality, self._dimensionality))
- 
+            self.field_parameters.update(data_source.field_parameters)
+
     @property
     def selector(self):
         if self._selector is not None: return self._selector

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -265,7 +265,12 @@
             self.fcoords, fields,
             self.domain_id, self._domain_offset, self.ds.periodicity,
             index_fields, particle_octree, pdom_ind, self.ds.geometry)
-        vals = op.finalize()
+        # If there are 0s in the smoothing field this will not throw an error, 
+        # but silently return nans for vals where dividing by 0
+        # Same as what is currently occurring, but suppressing the div by zero
+        # error.
+        with np.errstate(invalid='ignore'):
+            vals = op.finalize()
         if vals is None: return
         if isinstance(vals, list):
             vals = [np.asfortranarray(v) for v in vals]

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -693,7 +693,7 @@
 
     Parameters
     ----------
-    base_object : YTSelectionContainer3D
+    data_source : YTSelectionContainer3D
         The object to which cuts will be applied.
     conditionals : list of strings
         A list of conditionals that will be evaluated.  In the namespace
@@ -710,13 +710,20 @@
     >>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"])
     """
     _type_name = "cut_region"
-    _con_args = ("base_object", "conditionals")
-    def __init__(self, base_object, conditionals, ds=None,
-                 field_parameters=None, data_source=None):
-        super(YTCutRegionBase, self).__init__(base_object.center, ds,
-                                              field_parameters, data_source)
+    _con_args = ("data_source", "conditionals")
+    def __init__(self, data_source, conditionals, ds=None,
+                 field_parameters=None, base_object=None):
+        if base_object is not None:
+            # passing base_object explicitly has been deprecated,
+            # but we handle it here for backward compatibility
+            if data_source is not None:
+                raise RuntimeError(
+                    "Cannot use both base_object and data_source")
+            data_source=base_object
+        super(YTCutRegionBase, self).__init__(
+            data_source.center, ds, field_parameters, data_source=data_source)
         self.conditionals = ensure_list(conditionals)
-        self.base_object = base_object
+        self.base_object = data_source
         self._selector = None
         # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
         # ires and get_data
@@ -725,8 +732,8 @@
         # We actually want to chunk the sub-chunk, not ourselves.  We have no
         # chunks to speak of, as we do not data IO.
         for chunk in self.index._chunk(self.base_object,
-                                           chunking_style,
-                                           **kwargs):
+                                       chunking_style,
+                                       **kwargs):
             with self.base_object._chunked_read(chunk):
                 with self._chunked_read(chunk):
                     self.get_data(fields)
@@ -755,9 +762,6 @@
             if not np.any(m): continue
             yield obj, m
 
-    def cut_region(self, *args, **kwargs):
-        raise NotImplementedError
-
     @property
     def _cond_ind(self):
         ind = None

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -28,9 +28,13 @@
     mpc_conversion, sec_conversion
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
+from yt.geometry.geometry_handler import \
+    YTDataChunk
 
 from .fields import AthenaFieldInfo
 from yt.units.yt_array import YTQuantity
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 
 def _get_convert(fname):
     def _conv(data):
@@ -39,7 +43,8 @@
 
 class AthenaGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, id, index, level, start, dimensions):
+    def __init__(self, id, index, level, start, dimensions,
+                 file_offset, read_dims):
         df = index.dataset.filename[4:-4]
         gname = index.grid_filenames[id]
         AMRGridPatch.__init__(self, id, filename = gname,
@@ -51,6 +56,8 @@
         self.start_index = start.copy()
         self.stop_index = self.start_index + dimensions
         self.ActiveDimensions = dimensions.copy()
+        self.file_offset = file_offset
+        self.read_dims = read_dims
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -172,7 +179,7 @@
         self._field_map = field_map
 
     def _count_grids(self):
-        self.num_grids = self.dataset.nvtk
+        self.num_grids = self.dataset.nvtk*self.dataset.nprocs
 
     def _parse_index(self):
         f = open(self.index_filename,'rb')
@@ -220,7 +227,6 @@
         gridlistread = [fn for fn in gridlistread if os.path.basename(fn).count(".") == ndots]
         self.num_grids = len(gridlistread)
         dxs=[]
-        self.grids = np.empty(self.num_grids, dtype='object')
         levels = np.zeros(self.num_grids, dtype='int32')
         glis = np.empty((self.num_grids,3), dtype='float64')
         gdds = np.empty((self.num_grids,3), dtype='float64')
@@ -292,24 +298,66 @@
             self.dataset.domain_dimensions[2] = np.int(1)
         if self.dataset.dimensionality == 1 :
             self.dataset.domain_dimensions[1] = np.int(1)
-        for i in range(levels.shape[0]):
-            self.grids[i] = self.grid(i,self,levels[i],
-                                      glis[i],
-                                      gdims[i])
-            dx = (self.dataset.domain_right_edge-
-                  self.dataset.domain_left_edge)/self.dataset.domain_dimensions
-            dx = dx/self.dataset.refine_by**(levels[i])
-            dxs.append(dx)
 
-        dx = self.ds.arr(dxs, "code_length")
         dle = self.dataset.domain_left_edge
         dre = self.dataset.domain_right_edge
-        self.grid_left_edge = self.ds.arr(np.round(dle + dx*glis, decimals=12), "code_length")
-        self.grid_dimensions = gdims.astype("int32")
-        self.grid_right_edge = self.ds.arr(np.round(self.grid_left_edge +
-                                                    dx*self.grid_dimensions,
-                                                    decimals=12),
-                                            "code_length")
+        dx_root = (self.dataset.domain_right_edge-
+                   self.dataset.domain_left_edge)/self.dataset.domain_dimensions
+
+        if self.dataset.nprocs > 1:
+            gle_all = []
+            gre_all = []
+            shapes_all = []
+            levels_all = []
+            new_gridfilenames = []
+            file_offsets = []
+            read_dims = []
+            for i in range(levels.shape[0]):
+                dx = dx_root/self.dataset.refine_by**(levels[i])
+                gle_orig = self.ds.arr(np.round(dle + dx*glis[i], decimals=12),
+                                       "code_length")
+                gre_orig = self.ds.arr(np.round(gle_orig + dx*gdims[i], decimals=12),
+                                       "code_length")
+                bbox = np.array([[le,re] for le, re in zip(gle_orig, gre_orig)])
+                psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs)
+                gle, gre, shapes, slices = decompose_array(gdims[i], psize, bbox)
+                gle_all += gle
+                gre_all += gre
+                shapes_all += shapes
+                levels_all += [levels[i]]*self.dataset.nprocs
+                new_gridfilenames += [self.grid_filenames[i]]*self.dataset.nprocs
+                file_offsets += [[slc[0].start, slc[1].start, slc[2].start] for slc in slices]
+                read_dims += [np.array([gdims[i][0], gdims[i][1], shape[2]], dtype="int") for shape in shapes]
+            self.num_grids *= self.dataset.nprocs
+            self.grids = np.empty(self.num_grids, dtype='object')
+            self.grid_filenames = new_gridfilenames
+            self.grid_left_edge = self.ds.arr(gle_all, "code_length")
+            self.grid_right_edge = self.ds.arr(gre_all, "code_length")
+            self.grid_dimensions = np.array([shape for shape in shapes_all],
+                                            dtype="int32")
+            gdds = (self.grid_right_edge-self.grid_left_edge)/self.grid_dimensions
+            glis = np.round((self.grid_left_edge - self.ds.domain_left_edge)/gdds).astype('int')
+            for i in range(self.num_grids):
+                self.grids[i] = self.grid(i,self,levels_all[i],
+                                          glis[i], shapes_all[i],
+                                          file_offsets[i], read_dims[i])
+        else:
+            self.grids = np.empty(self.num_grids, dtype='object')
+            for i in range(levels.shape[0]):
+                self.grids[i] = self.grid(i,self,levels[i],
+                                          glis[i], gdims[i], [0]*3,
+                                          gdims[i])
+                dx = dx_root/self.dataset.refine_by**(levels[i])
+                dxs.append(dx)
+
+            dx = self.ds.arr(dxs, "code_length")
+            self.grid_left_edge = self.ds.arr(np.round(dle + dx*glis, decimals=12),
+                                              "code_length")
+            self.grid_dimensions = gdims.astype("int32")
+            self.grid_right_edge = self.ds.arr(np.round(self.grid_left_edge +
+                                                        dx*self.grid_dimensions,
+                                                        decimals=12),
+                                               "code_length")
         
         if self.dataset.dimensionality <= 2:
             self.grid_right_edge[:,2] = dre[2]
@@ -347,6 +395,14 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        gfiles = defaultdict(list)
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in gobjs:
+            yield YTDataChunk(dobj, "io", [subset],
+                              self._count_selection(dobj, [subset]),
+                              cache = cache)
+
 class AthenaDataset(Dataset):
     _index_class = AthenaHierarchy
     _field_info_class = AthenaFieldInfo
@@ -354,8 +410,9 @@
 
     def __init__(self, filename, dataset_type='athena',
                  storage_filename=None, parameters=None,
-                 units_override=None):
+                 units_override=None, nprocs=1):
         self.fluid_types += ("athena",)
+        self.nprocs = nprocs
         if parameters is None:
             parameters = {}
         self.specified_parameters = parameters
@@ -435,6 +492,8 @@
             dimensionality = 1
         if dimensionality <= 2 : self.domain_dimensions[2] = np.int32(1)
         if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
+        if dimensionality != 3 and self.nprocs > 1:
+            raise RuntimeError("Virtual grids are only supported for 3D outputs!")
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self.parameter_filename.__hash__()

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -17,6 +17,9 @@
 import numpy as np
 from yt.funcs import mylog, defaultdict
 
+float_size = np.dtype(">f4").itemsize
+axis_list = ["_x","_y","_z"]
+
 class IOHandlerAthena(BaseIOHandler):
     _dataset_type = "athena"
     _offset_string = 'data:offsets=0'
@@ -33,40 +36,40 @@
 
     def _read_chunk_data(self,chunk,fields):
         data = {}
-        grids_by_file = defaultdict(list)
         if len(chunk.objs) == 0: return data
-        field_list = set(f[1] for f in fields)
         for grid in chunk.objs:
             if grid.filename is None:
                 continue
             f = open(grid.filename, "rb")
             data[grid.id] = {}
-            grid_ncells = np.prod(grid.ActiveDimensions)
             grid_dims = grid.ActiveDimensions
-            grid0_ncells = np.prod(grid.index.grid_dimensions[0,:])
+            read_dims = grid.read_dims
+            grid_ncells = np.int(np.prod(read_dims))
+            grid0_ncells = np.int(np.prod(grid.index.grids[0].read_dims))
             read_table_offset = get_read_table_offset(f)
-            for field in self.ds.field_list:
+            for field in fields:
                 dtype, offsetr = grid.index._field_map[field]
                 if grid_ncells != grid0_ncells:
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
                 if grid_ncells == grid0_ncells:
                     offset = offsetr
-                f.seek(read_table_offset+offset)
+                file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size
+                xread = slice(grid.file_offset[0],grid.file_offset[0]+grid_dims[0])
+                yread = slice(grid.file_offset[1],grid.file_offset[1]+grid_dims[1])
+                f.seek(read_table_offset+offset+file_offset)
                 if dtype == 'scalar':
+                    f.seek(read_table_offset+offset+file_offset)
                     v = np.fromfile(f, dtype='>f4',
-                                    count=grid_ncells).reshape(grid_dims,order='F')
+                                    count=grid_ncells).reshape(read_dims,order='F')
                 if dtype == 'vector':
+                    vec_offset = axis_list.index(field[-1][-2:])
+                    f.seek(read_table_offset+offset+3*file_offset)
                     v = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
-                if '_x' in field[-1]:
-                    v = v[0::3].reshape(grid_dims,order='F')
-                elif '_y' in field[-1]:
-                    v = v[1::3].reshape(grid_dims,order='F')
-                elif '_z' in field[-1]:
-                    v = v[2::3].reshape(grid_dims,order='F')
+                    v = v[vec_offset::3].reshape(read_dims,order='F')
                 if grid.ds.field_ordering == 1:
-                    data[grid.id][field] = v.T.astype("float64")
+                    data[grid.id][field] = v[xread,yread,:].T.astype("float64")
                 else:
-                    data[grid.id][field] = v.astype("float64")
+                    data[grid.id][field] = v[xread,yread,:].astype("float64")
             f.close()
         return data
     

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -20,6 +20,8 @@
     big_patch_amr, \
     data_dir_load
 from yt.frontends.athena.api import AthenaDataset
+from yt.config import ytcfg
+from yt.convenience import load
 
 _fields_cloud = ("scalar[0]", "density", "total_energy")
 
@@ -58,6 +60,31 @@
         test_stripping.__name__ = test.description
         yield test
 
+sloshing = "MHDSloshing/virgo_low_res.0054.vtk"
+
+uo_sloshing = {"length_unit":(1.0,"Mpc"),
+               "time_unit":(1.0,"Myr"),
+               "mass_unit":(1.0e14,"Msun")}
+
+ at requires_file(sloshing)
+def test_nprocs():
+    ytcfg["yt","skip_dataset_cache"] = "True"
+
+    ds1 = load(sloshing, units_override=uo_sloshing)
+    sp1 = ds1.sphere("c", (100.,"kpc"))
+    prj1 = ds1.proj("density",0)
+    ds2 = load(sloshing, units_override=uo_sloshing, nprocs=8)
+    sp2 = ds2.sphere("c", (100.,"kpc"))
+    prj2 = ds1.proj("density",0)
+
+    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
+    yield assert_allclose, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
+    for ax in "xyz":
+        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
+    yield assert_allclose, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
+    yield assert_equal, prj1["density"], prj2["density"]
+
+    ytcfg["yt","skip_dataset_cache"] = "False"
 
 @requires_file(cloud)
 def test_AthenaDataset():

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -784,39 +784,33 @@
 region_selector = RegionSelector
 
 cdef class CutRegionSelector(SelectorObject):
-    cdef SelectorObject base_selector
-    cdef dict _positions
+    cdef set _positions
+    cdef tuple _conditionals
 
     def __init__(self, dobj):
-        self.base_selector = <SelectorObject>dobj.base_object.selector
         positions = np.array([dobj['x'], dobj['y'], dobj['z']]).T
-        self._positions = {}
-        for position in positions:
-            self._positions[tuple(position)] = 1
+        self._conditionals = tuple(dobj.conditionals)
+        self._positions = set(tuple(position) for position in positions)
 
     cdef int select_bbox(self,  np.float64_t left_edge[3],
                      np.float64_t right_edge[3]) nogil:
-        return self.base_selector.select_bbox(left_edge, right_edge)
+        return 1
 
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
-        ret = self.base_selector.select_cell(pos, dds)
-        if ret:
-            with gil:
-                if (pos[0], pos[1], pos[2]) in self._positions:
-                    return 1
-                else:
-                    return 0
-        else:
-            return ret
+        with gil:
+            if (pos[0], pos[1], pos[2]) in self._positions:
+                return 1
+            else:
+                return 0
 
     cdef int select_point(self, np.float64_t pos[3]) nogil:
-        return self.base_selector.select_point(pos)
+        return 1
 
     cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
-        return self.base_selector.select_sphere(pos, radius)
+        return 1
 
     def _hash_vals(self):
-        return self.base_selector._hash_vals()
+        return self._conditionals
 
 cut_region_selector = CutRegionSelector
 

diff -r d698872ff2e73147bae8b0a1b3812e8d4c8a6edd -r 3750b2bf4a688b002055dae5bfb1254cb2549504 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -204,14 +204,14 @@
     yield assert_true, u1.cgs_value == 42
     yield assert_true,  u1.dimensions == length**2*mass
 
-    yield assert_raises, UnitParseError, Unit, 'abc', \
-        {'cgs_value':42, 'dimensions':length**length}
-    yield assert_raises, UnitParseError, Unit, 'abc', \
-        {'cgs_value':42, 'dimensions':length**(length*length)}
-    yield assert_raises, UnitParseError, Unit, 'abc', \
-        {'cgs_value':42, 'dimensions':length-mass}
-    yield assert_raises, UnitParseError, Unit, 'abc', \
-        {'cgs_value':42, 'dimensions':length+mass}
+    assert_raises(UnitParseError, Unit, 'abc', cgs_value=42,
+                  dimensions=length**length)
+    assert_raises(UnitParseError, Unit, 'abc', cgs_value=42,
+                  dimensions=length**(length*length))
+    assert_raises(UnitParseError, Unit, 'abc', cgs_value=42,
+                  dimensions=length-mass)
+    assert_raises(UnitParseError, Unit, 'abc', cgs_value=42,
+                  dimensions=length+mass)
 
 def test_create_fail_on_unknown_symbol():
     """

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/8d144ae1a0e7/
Changeset:   8d144ae1a0e7
Branch:      yt
User:        xarthisius
Date:        2014-12-02 20:14:10+00:00
Summary:     Merged in hegan/yt (pull request #1311)

[bugfix] First pass at correcting periodic rvec calculation
Affected #:  1 file

diff -r dc2ade03350286b7e54436ac100da00d086c24ae -r 8d144ae1a0e76369339f08b4633a6ef7eb91e7fc yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -55,8 +55,19 @@
     for i in range(coords.shape[0]):
         if not data.ds.periodicity[i]: continue
         coords[i, ...] -= le[i]
-        coords[i, ...] = np.min([np.abs(np.mod(coords[i, ...],  dw[i])),
-                                 np.abs(np.mod(coords[i, ...], -dw[i]))],
-                                 axis=0)
-        coords[i, ...] += le[i]
+        #figure out which measure is less
+        mins = np.argmin([np.abs(np.mod(coords[i, ...],  dw[i])),
+                         np.abs(np.mod(coords[i, ...], -dw[i]))],
+                         axis=0)
+        temp_coords = np.mod(coords[i, ...], dw[i])
+
+        #Where second measure is better, updating temporary coords
+        ii = mins==1
+        temp_coords[ii] = np.mod(coords[i, ...], -dw[i])[ii]
+
+        # Putting the temporary coords into the actual storage
+        coords[i, ...] = temp_coords
+
+        coords[i, ...] + le[i]
+
     return coords

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list