[yt-svn] commit/yt: 50 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Nov 20 18:00:49 PST 2014


50 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/cd6b62f135ec/
Changeset:   cd6b62f135ec
Branch:      yt
User:        jzuhone
Date:        2014-11-06 02:02:57+00:00
Summary:     Adding the hour angle unit
Affected #:  1 file

diff -r 52a7220cf325116928457893a2ee1810407331e0 -r cd6b62f135ec2dc50abff1ea1c056777237ac71e yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -94,6 +94,7 @@
     "arcmin": (np.pi/10800., dimensions.angle), # arcminutes
     "arcsec": (np.pi/648000., dimensions.angle), # arcseconds
     "mas": (np.pi/648000000., dimensions.angle), # millarcseconds
+    "hourangle": (np.pi/12., dimensions.angle), # hour angle
     "steradian": (1.0, dimensions.solid_angle),
 
     # misc


https://bitbucket.org/yt_analysis/yt/commits/2b236cea2181/
Changeset:   2b236cea2181
Branch:      yt
User:        jzuhone
Date:        2014-11-06 15:46:04+00:00
Summary:     Merge
Affected #:  3 files

diff -r cd6b62f135ec2dc50abff1ea1c056777237ac71e -r 2b236cea218110278416e4aa8468bb5d057316f5 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -761,6 +761,7 @@
         self.field_data = YTFieldData()
         if weight_field is not None:
             self.variance = YTFieldData()
+            weight_field = self.data_source._determine_fields(weight_field)[0]
         self.weight_field = weight_field
         self.field_units = {}
         ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
@@ -774,7 +775,7 @@
             A list of fields to create profile histograms for
         
         """
-        fields = ensure_list(fields)
+        fields = self.data_source._determine_fields(fields)
         temp_storage = ProfileFieldAccumulator(len(fields), self.size)
         cfields = fields + list(self.bin_fields)
         citer = self.data_source.chunks(cfields, "io")
@@ -907,9 +908,11 @@
         if not np.any(filter): return None
         arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
-            arr[:,i] = chunk[field][filter]
+            units = chunk.ds.field_info[field].units
+            arr[:,i] = chunk[field][filter].in_units(units)
         if self.weight_field is not None:
-            weight_data = chunk[self.weight_field]
+            units = chunk.ds.field_info[self.weight_field].units
+            weight_data = chunk[self.weight_field].in_units(units)
         else:
             weight_data = np.ones(filter.size, dtype="float64")
         weight_data = weight_data[filter]
@@ -1230,6 +1233,16 @@
         self.z_bins.convert_to_units(new_unit)
         self.z = 0.5*(self.z_bins[1:]+self.z_bins[:-1])
 
+
+def sanitize_field_tuple_keys(input_dict, data_source):
+    if input_dict is not None:
+        dummy = {}
+        for item in input_dict:
+            dummy[data_source._determine_fields(item)[0]] = input_dict[item]
+        return dummy
+    else:
+        return input_dict
+
 def create_profile(data_source, bin_fields, fields, n_bins=64,
                    extrema=None, logs=None, units=None,
                    weight_field="cell_mass",
@@ -1293,7 +1306,7 @@
     >>> print profile["gas", "temperature"]
 
     """
-    bin_fields = ensure_list(bin_fields)
+    bin_fields = data_source._determine_fields(bin_fields)
     fields = ensure_list(fields)
     if len(bin_fields) == 1:
         cls = Profile1D
@@ -1305,16 +1318,9 @@
         raise NotImplementedError
     bin_fields = data_source._determine_fields(bin_fields)
     fields = data_source._determine_fields(fields)
-    if units is not None:
-        dummy = {}
-        for item in units:
-            dummy[data_source._determine_fields(item)[0]] = units[item]
-        units.update(dummy)
-    if extrema is not None:
-        dummy = {}
-        for item in extrema:
-            dummy[data_source._determine_fields(item)[0]] = extrema[item]
-        extrema.update(dummy)
+    units = sanitize_field_tuple_keys(units, data_source)
+    extrema = sanitize_field_tuple_keys(extrema, data_source)
+    logs = sanitize_field_tuple_keys(logs, data_source)
     if weight_field is not None:
         weight_field, = data_source._determine_fields([weight_field])
     if not iterable(n_bins):
@@ -1322,18 +1328,21 @@
     if not iterable(accumulation):
         accumulation = [accumulation] * len(bin_fields)
     if logs is None:
-        logs = [data_source.ds._get_field_info(f[0],f[1]).take_log
-                for f in bin_fields]
-    else:
-        logs = [logs[bin_field[-1]] for bin_field in bin_fields]
+        logs = {}
+    logs_list = []
+    for bin_field in bin_fields:
+        if bin_field in logs:
+            logs_list.append(logs[bin_field])
+        else:
+            logs_list.append(data_source.ds.field_info[bin_field].take_log)
+    logs = logs_list
     if extrema is None:
         ex = [data_source.quantities["Extrema"](f, non_zero=l)
               for f, l in zip(bin_fields, logs)]
     else:
         ex = []
         for bin_field in bin_fields:
-            bf_units = data_source.ds._get_field_info(
-                bin_field[0], bin_field[1]).units
+            bf_units = data_source.ds.field_info[bin_field].units
             try:
                 field_ex = list(extrema[bin_field[-1]])
             except KeyError:

diff -r cd6b62f135ec2dc50abff1ea1c056777237ac71e -r 2b236cea218110278416e4aa8468bb5d057316f5 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,7 +1,7 @@
 from yt.testing import *
 from yt.data_objects.profiles import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    Profile1D, Profile2D, Profile3D
+    Profile1D, Profile2D, Profile3D, create_profile
 
 _fields = ("density", "temperature", "dinosaurs", "tribbles")
 _units = ("g/cm**3", "K", "dyne", "erg")
@@ -87,13 +87,26 @@
     for nb in [8, 16, 32, 64]:
         # We log all the fields or don't log 'em all.  No need to do them
         # individually.
-        for lf in [True, False]: 
-            p1d = Profile1D(dd, 
-                "density",     nb, rmi*e1, rma*e2, lf,
-                weight_field = None)
-            p1d.add_fields(["ones", "temperature"])
-            yield assert_equal, p1d["ones"].sum(), nv
-            yield assert_rel_equal, tt, p1d["temperature"].sum(), 7
+        for lf in [True, False]:
+            direct_profile = Profile1D(
+                dd, "density", nb, rmi*e1, rma*e2, lf, weight_field = None)
+            direct_profile.add_fields(["ones", "temperature"])
+
+            indirect_profile_s = create_profile(
+                dd, "density", ["ones", "temperature"], n_bins=nb,
+                extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf}, 
+                weight_field=None)
+
+            indirect_profile_t = create_profile(
+                dd, ("gas", "density"),
+                [("index", "ones"), ("gas", "temperature")], n_bins=nb,
+                extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf}, 
+                weight_field=None)
+
+            for p1d in [direct_profile, indirect_profile_s,
+                        indirect_profile_t]:
+                yield assert_equal, p1d["index", "ones"].sum(), nv
+                yield assert_rel_equal, tt, p1d["gas", "temperature"].sum(), 7
 
             p2d = Profile2D(dd, 
                 "density",     nb, rmi*e1, rma*e2, lf,
@@ -154,6 +167,12 @@
         p3d.add_fields(["ones"])
         yield assert_equal, p3d["ones"], np.ones((nb,nb,nb))
 
+extrema_s = {'particle_position_x': (0, 1)}
+logs_s = {'particle_position_x': False}
+
+extrema_t = {('all', 'particle_position_x'): (0, 1)}
+logs_t = {('all', 'particle_position_x'): False}
+
 def test_particle_profiles():
     for nproc in [1, 2, 4, 8]:
         ds = fake_random_ds(32, nprocs=nproc, particles = 32**3)
@@ -164,6 +183,18 @@
         p1d.add_fields(["particle_ones"])
         yield assert_equal, p1d["particle_ones"].sum(), 32**3
 
+        p1d = create_profile(dd, ["particle_position_x"], ["particle_ones"],
+                             weight_field=None, n_bins=128, extrema=extrema_s,
+                             logs=logs_s)
+        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+
+        p1d = create_profile(dd,
+                             [("all", "particle_position_x")],
+                             [("all", "particle_ones")],
+                             weight_field=None, n_bins=128, extrema=extrema_t,
+                             logs=logs_t)
+        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+
         p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
                             "particle_position_y", 128, 0.0, 1.0, False,
                         weight_field = None)

diff -r cd6b62f135ec2dc50abff1ea1c056777237ac71e -r 2b236cea218110278416e4aa8468bb5d057316f5 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -21,6 +21,7 @@
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.selection_routines import AlwaysSelector
 
 # http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
 def particle_sequences(grids):
@@ -132,7 +133,19 @@
         rv = {}
         for g in chunk.objs:
             rv[g.id] = {}
-        for field in fields:
+        # Split into particles and non-particles
+        fluid_fields, particle_fields = [], []
+        for ftype, fname in fields:
+            if ftype in self.ds.particle_types:
+                particle_fields.append((ftype, fname))
+            else:
+                fluid_fields.append((ftype, fname))
+        if len(particle_fields) > 0:
+            selector = AlwaysSelector(self.ds)
+            rv.update(self._read_particle_selection(
+                [chunk], selector, particle_fields))
+        if len(fluid_fields) == 0: return rv
+        for field in fluid_fields:
             ftype, fname = field
             ds = f["/%s" % fname]
             ind = 0


https://bitbucket.org/yt_analysis/yt/commits/b7242ded8434/
Changeset:   b7242ded8434
Branch:      yt
User:        jzuhone
Date:        2014-11-06 15:46:48+00:00
Summary:     Merge
Affected #:  6 files

diff -r 2b236cea218110278416e4aa8468bb5d057316f5 -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -97,6 +97,8 @@
         # Now, let's figure out what fields are included.
         if any(f[1] == "xmom" for f in self.field_list):
             self.setup_momentum_to_velocity()
+        elif any(f[1] == "xvel" for f in self.field_list):
+            self.setup_velocity_to_momentum()
         self.add_field(("gas", "thermal_energy"),
                        function=_thermal_energy,
                        units="erg/g")
@@ -112,11 +114,22 @@
         def _get_vel(axis):
             def velocity(field, data):
                 return data["%smom" % axis]/data["density"]
+            return velocity
         for ax in 'xyz':
             self.add_field(("gas", "velocity_%s" % ax),
                            function=_get_vel(ax),
                            units="cm/s")
 
+    def setup_velocity_to_momentum(self):
+        def _get_mom(axis):
+            def momentum(field, data):
+                return data["%svel" % axis]*data["density"]
+            return momentum
+        for ax in 'xyz':
+            self.add_field(("gas", "momentum_%s" % ax),
+                           function=_get_mom(ax),
+                           units=mom_units)
+
 
 class CastroFieldInfo(FieldInfoContainer):
 

diff -r 2b236cea218110278416e4aa8468bb5d057316f5 -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -43,6 +43,14 @@
         test_radtube.__name__ = test.description
         yield test
 
+star = "StarParticles/plrd01000"
+ at requires_ds(star)
+def test_star():
+    ds = data_dir_load(star)
+    yield assert_equal, str(ds), "plrd01000"
+    for test in small_patch_amr(star, _fields):
+        test_star.__name__ = test.description
+        yield test
 
 @requires_file(rt)
 def test_OrionDataset():

diff -r 2b236cea218110278416e4aa8468bb5d057316f5 -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -47,6 +47,15 @@
         test_tb.__name__ = test.description
         yield test
 
+iso = "IsothermalSphere/data.0000.3d.hdf5"
+ at requires_ds(iso)
+def test_iso():
+    ds = data_dir_load(iso)
+    yield assert_equal, str(ds), "data.0000.3d.hdf5"
+    for test in small_patch_amr(iso, _fields):
+        test_iso.__name__ = test.description
+        yield test
+
 _zp_fields = ("rhs", "phi", "gravitational_field_x",
               "gravitational_field_y")
 zp = "ZeldovichPancake/plt32.2d.hdf5"

diff -r 2b236cea218110278416e4aa8468bb5d057316f5 -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -427,6 +427,19 @@
             if key not in keys:
                 del self.frb[key]
 
+    def _set_font_properties(self):
+        for f in self.plots:
+            ax = self.plots[f].axes
+            cbax = self.plots[f].cb.ax
+            labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()
+            labels += cbax.yaxis.get_ticklabels()
+            labels += [ax.title, ax.xaxis.label, ax.yaxis.label,
+                       cbax.yaxis.label]
+            for label in labels:
+                label.set_fontproperties(self._font_properties)
+                if self._font_color is not None:
+                    label.set_color(self._font_color)
+
     @invalidate_plot
     @invalidate_figure
     def set_font(self, font_dict=None):

diff -r 2b236cea218110278416e4aa8468bb5d057316f5 -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -744,7 +744,7 @@
                         mylog.warning("Switching to linear colorbar scaling.")
                         self._field_transform[f] = linear_transform
 
-            fp = self._font_properties
+            font_size = self._font_properties.get_size()
 
             fig = None
             axes = None
@@ -763,7 +763,7 @@
                 image, self._field_transform[f].name,
                 self._field_transform[f].func,
                 self._colormaps[f], extent, zlim,
-                self.figure_size, fp.get_size(),
+                self.figure_size, font_size,
                 self.aspect, fig, axes, cax)
 
             axes_unit_labels = ['', '']
@@ -846,14 +846,8 @@
             if y_label is not None:
                 labels[1] = y_label
 
-            self.plots[f].axes.set_xlabel(labels[0],fontproperties=fp)
-            self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
-
-            for label in (self.plots[f].axes.get_xticklabels() +
-                          self.plots[f].axes.get_yticklabels() +
-                          [self.plots[f].axes.xaxis.get_offset_text(),
-                           self.plots[f].axes.yaxis.get_offset_text()]):
-                label.set_fontproperties(fp)
+            self.plots[f].axes.set_xlabel(labels[0])
+            self.plots[f].axes.set_ylabel(labels[1])
 
             # Determine the units of the data
             units = Unit(self.frb[f].units, registry=self.ds.unit_registry)
@@ -874,13 +868,7 @@
             except ParseFatalException, err:
                 raise YTCannotParseUnitDisplayName(f, colorbar_label, str(err))
 
-            self.plots[f].cb.set_label(colorbar_label, fontproperties=fp)
-
-            for label in (self.plots[f].cb.ax.get_xticklabels() +
-                          self.plots[f].cb.ax.get_yticklabels() +
-                          [self.plots[f].cb.ax.axes.xaxis.get_offset_text(),
-                           self.plots[f].cb.ax.axes.yaxis.get_offset_text()]):
-                label.set_fontproperties(fp)
+            self.plots[f].cb.set_label(colorbar_label)
 
             # x-y axes minorticks
             if f not in self._minorticks:
@@ -916,14 +904,7 @@
             if draw_colorbar is False:
                 self.plots[f]._toggle_colorbar(draw_colorbar)
 
-            if self._font_color is not None:
-                ax = self.plots[f].axes
-                cbax = self.plots[f].cb.ax
-                labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()
-                labels += cbax.yaxis.get_ticklabels()
-                labels += [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
-                for label in labels:
-                    label.set_color(self._font_color)
+        self._set_font_properties()
 
         self._plot_valid = True
 

diff -r 2b236cea218110278416e4aa8468bb5d057316f5 -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -150,6 +150,15 @@
         A dictionary or list of dictionaries containing plot keyword 
         arguments.  For example, dict(color="red", linestyle=":").
         Default: None.
+    x_log : bool
+        If not None, whether the x_axis should be plotted with a logarithmic
+        scaling.
+        Default: None
+    y_log : dict
+        A dictionary containing field:boolean pairs, setting the logarithmic
+        property for that field. May be overridden after instantiation using 
+        set_log.
+        Default: None
 
     Examples
     --------
@@ -189,7 +198,6 @@
     """
     x_log = None
     y_log = None
-    z_log = None
     x_title = None
     y_title = None
     _plot_valid = False
@@ -197,21 +205,28 @@
     def __init__(self, data_source, x_field, y_fields,
                  weight_field="cell_mass", n_bins=64,
                  accumulation=False, fractional=False,
-                 label=None, plot_spec=None):
+                 label=None, plot_spec=None,
+                 x_log=None, y_log=None):
+
+        if x_log is None:
+            logs = None
+        else:
+            logs = {x_field:x_log}
 
         profiles = [create_profile(data_source, [x_field],
                                    n_bins=[n_bins],
                                    fields=ensure_list(y_fields),
                                    weight_field=weight_field,
                                    accumulation=accumulation,
-                                   fractional=fractional)]
+                                   fractional=fractional,
+                                   logs=logs)]
 
         if plot_spec is None:
             plot_spec = [dict() for p in profiles]
         if not isinstance(plot_spec, list):
             plot_spec = [plot_spec.copy() for p in profiles]
 
-        ProfilePlot._initialize_instance(self, profiles, label, plot_spec)
+        ProfilePlot._initialize_instance(self, profiles, label, plot_spec, y_log)
 
     def save(self, name=None):
         r"""
@@ -323,11 +338,15 @@
         self._plot_valid = True
 
     @classmethod
-    def _initialize_instance(cls, obj, profiles, labels, plot_specs):
+    def _initialize_instance(cls, obj, profiles, labels, plot_specs, y_log):
+        obj.profiles = ensure_list(profiles)
+        obj.x_log = None
         obj.y_log = {}
+        if y_log is not None:
+            for field, log in y_log.items():
+                field, = obj.profiles[0].data_source._determine_fields([field])
+                obj.y_log[field] = log
         obj.y_title = {}
-        obj.x_log = None
-        obj.profiles = ensure_list(profiles)
         obj.label = sanitize_label(labels, len(obj.profiles))
         if plot_specs is None:
             plot_specs = [dict() for p in obj.profiles]
@@ -338,7 +357,7 @@
         return obj
 
     @classmethod
-    def from_profiles(cls, profiles, labels=None, plot_specs=None):
+    def from_profiles(cls, profiles, labels=None, plot_specs=None, y_log=None):
         r"""
         Instantiate a ProfilePlot object from a list of profiles
         created with :func:`~yt.data_objects.profiles.create_profile`.
@@ -384,7 +403,7 @@
         if plot_specs is not None and len(plot_specs) != len(profiles):
             raise RuntimeError("Profiles list and plot_specs list must be the same size.")
         obj = cls.__new__(cls)
-        return cls._initialize_instance(obj, profiles, labels, plot_specs)
+        return cls._initialize_instance(obj, profiles, labels, plot_specs, y_log)
 
     @invalidate_plot
     def set_line_property(self, property, value, index=None):
@@ -814,21 +833,21 @@
                     self._field_transform[f] = linear_transform
                 zlim = [zmin, np.nanmax(data)]
 
-            fp = self._font_properties
+            font_size = self._font_properties.get_size()
             f = self.profile.data_source._determine_fields(f)[0]
 
             self.plots[f] = PhasePlotMPL(self.profile.x, self.profile.y, data,
                                          x_scale, y_scale, z_scale,
                                          self._colormaps[f], zlim,
-                                         self.figure_size, fp.get_size(),
+                                         self.figure_size, font_size,
                                          fig, axes, cax)
 
             self.plots[f]._toggle_axes(draw_axes)
             self.plots[f]._toggle_colorbar(draw_colorbar)
 
-            self.plots[f].axes.xaxis.set_label_text(x_title, fontproperties=fp)
-            self.plots[f].axes.yaxis.set_label_text(y_title, fontproperties=fp)
-            self.plots[f].cax.yaxis.set_label_text(z_title, fontproperties=fp)
+            self.plots[f].axes.xaxis.set_label_text(x_title)
+            self.plots[f].axes.yaxis.set_label_text(y_title)
+            self.plots[f].cax.yaxis.set_label_text(z_title)
 
             if f in self._plot_text:
                 self.plots[f].axes.text(self._text_xpos[f], self._text_ypos[f],
@@ -839,16 +858,6 @@
             if f in self.plot_title:
                 self.plots[f].axes.set_title(self.plot_title[f])
 
-            ax = self.plots[f].axes
-            cbax = self.plots[f].cb.ax
-            labels = ((ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels() +
-                       cbax.yaxis.get_ticklabels()) +
-                      [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label])
-            for label in labels:
-                label.set_fontproperties(fp)
-                if self._font_color is not None:
-                    label.set_color(self._font_color)
-
             # x-y axes minorticks
             if f not in self._minorticks:
                 self._minorticks[f] = True
@@ -871,6 +880,8 @@
             else:
                 self.plots[f].cax.minorticks_off()
 
+        self._set_font_properties()
+
         self._plot_valid = True
 
     @classmethod
@@ -1012,8 +1023,8 @@
         >>> plot.set_title("cell_mass", "This is a phase plot")
         
         """
-
-        self.plot_title[field] = title
+        self.plot_title[self.data_source._determine_fields(field)[0]] = title
+        return self
 
     @invalidate_plot
     def reset_plot(self):


https://bitbucket.org/yt_analysis/yt/commits/282687f8a642/
Changeset:   282687f8a642
Branch:      yt
User:        jzuhone
Date:        2014-11-06 16:58:47+00:00
Summary:     Merge
Affected #:  3 files

diff -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e -r 282687f8a6425319429593260eff69a5170276c9 doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:cee652d703dd3369d81ebc670882d3734f73d0274aab98823a784d8039355480"
+  "signature": "sha256:b62d83c168828afa81bcf0603bb37d3183f2a810258f25963254ffb24a0acd82"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -159,6 +159,24 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "You can check if a `YTArray` has a given equivalence with `has_equivalent`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print mp.has_equivalent(\"compton\")\n",
+      "print mp.has_equivalent(\"thermal\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To list the equivalencies available for a given `YTArray` or `YTQuantity`, use the `list_equivalencies` method:"
      ]
     },

diff -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e -r 282687f8a6425319429593260eff69a5170276c9 yt/units/equivalencies.py
--- a/yt/units/equivalencies.py
+++ b/yt/units/equivalencies.py
@@ -11,7 +11,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import yt.utilities.physical_constants as pc
 from yt.units.dimensions import temperature, mass, energy, length, rate, \
     velocity, dimensionless, density, number_density, flux
 from yt.extern.six import add_metaclass
@@ -29,15 +28,19 @@
 class Equivalence(object):
     _skip_add = False
 
+    def __init__(self):
+        import yt.utilities.physical_constants as pc
+        self.pc = pc
+
 class NumberDensityEquivalence(Equivalence):
     _type_name = "number_density"
     dims = (density,number_density,)
 
     def convert(self, x, new_dims, mu=0.6):
         if new_dims == number_density:
-            return x/(mu*pc.mh)
+            return x/(mu*self.pc.mh)
         elif new_dims == density:
-            return x*mu*pc.mh
+            return x*mu*self.pc.mh
 
     def __str__(self):
         return "number density: density <-> number density"
@@ -48,9 +51,9 @@
 
     def convert(self, x, new_dims):
         if new_dims == energy:
-            return pc.kboltz*x
+            return self.pc.kboltz*x
         elif new_dims == temperature:
-            return x/pc.kboltz
+            return x/self.pc.kboltz
 
     def __str__(self):
         return "thermal: temperature <-> energy"
@@ -61,9 +64,9 @@
 
     def convert(self, x, new_dims):
         if new_dims == energy:
-            return x*pc.clight*pc.clight
+            return x*self.pc.clight*self.pc.clight
         elif new_dims == mass:
-            return x/(pc.clight*pc.clight)
+            return x/(self.pc.clight*self.pc.clight)
 
     def __str__(self):
         return "mass_energy: mass <-> energy"
@@ -75,20 +78,20 @@
     def convert(self, x, new_dims):
         if new_dims == energy:
             if x.units.dimensions == length:
-                nu = pc.clight/x
+                nu = self.pc.clight/x
             elif x.units.dimensions == rate:
                 nu = x
-            return pc.hcgs*nu
+            return self.pc.hcgs*nu
         elif new_dims == length:
             if x.units.dimensions == rate:
-                return pc.clight/x
+                return self.pc.clight/x
             elif x.units.dimensions == energy:
-                return pc.hcgs*pc.clight/x
+                return self.pc.hcgs*self.pc.clight/x
         elif new_dims == rate:
             if x.units.dimensions == length:
-                return pc.clight/x
+                return self.pc.clight/x
             elif x.units.dimensions == energy:
-                return x/pc.hcgs
+                return x/self.pc.hcgs
 
     def __str__(self):
         return "spectral: length <-> rate <-> energy"
@@ -100,14 +103,14 @@
     def convert(self, x, new_dims, mu=0.6, gamma=5./3.):
         if new_dims == velocity:
             if x.units.dimensions == temperature:
-                kT = pc.kboltz*x
+                kT = self.pc.kboltz*x
             elif x.units.dimensions == energy:
                 kT = x
-            return np.sqrt(gamma*kT/(mu*pc.mh))
+            return np.sqrt(gamma*kT/(mu*self.pc.mh))
         else:
-            kT = x*x*mu*pc.mh/gamma
+            kT = x*x*mu*self.pc.mh/gamma
             if new_dims == temperature:
-                return kT/pc.kboltz
+                return kT/self.pc.kboltz
             else:
                 return kT
 
@@ -120,10 +123,10 @@
 
     def convert(self, x, new_dims):
         if new_dims == dimensionless:
-            beta = x.in_cgs()/pc.clight
+            beta = x.in_cgs()/self.pc.clight
             return 1./np.sqrt(1.-beta**2)
         elif new_dims == velocity:
-            return pc.clight*np.sqrt(1.-1./(x*x))
+            return self.pc.clight*np.sqrt(1.-1./(x*x))
 
     def __str__(self):
         return "lorentz: velocity <-> dimensionless"
@@ -134,9 +137,9 @@
 
     def convert(self, x, new_dims):
         if new_dims == length:
-            return 2.*pc.G*x/(pc.clight*pc.clight)
+            return 2.*self.pc.G*x/(self.pc.clight*self.pc.clight)
         elif new_dims == mass:
-            return 0.5*x*pc.clight*pc.clight/pc.G
+            return 0.5*x*self.pc.clight*self.pc.clight/self.pc.G
 
     def __str__(self):
         return "schwarzschild: mass <-> length"
@@ -146,7 +149,7 @@
     dims = (mass,length,)
 
     def convert(self, x, new_dims):
-        return pc.hcgs/(x*pc.clight)
+        return self.pc.hcgs/(x*self.pc.clight)
 
     def __str__(self):
         return "compton: mass <-> length"
@@ -157,9 +160,9 @@
 
     def convert(self, x, new_dims):
         if new_dims == flux:
-            return pc.stefan_boltzmann_constant_cgs*x**4
+            return self.pc.stefan_boltzmann_constant_cgs*x**4
         elif new_dims == temperature:
-            return (x/pc.stefan_boltzmann_constant_cgs)**0.25
+            return (x/self.pc.stefan_boltzmann_constant_cgs)**0.25
 
     def __str__(self):
         return "effective_temperature: flux <-> temperature"

diff -r b7242ded8434755fe39ed8eb0f220283e7e6ab8e -r 282687f8a6425319429593260eff69a5170276c9 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -39,6 +39,7 @@
 from yt.utilities.on_demand_imports import _astropy
 from sympy import Rational
 from yt.units.unit_lookup_table import unit_prefixes, prefixable_units
+from yt.units.equivalencies import equivalence_registry
 
 NULL_UNIT = Unit()
 
@@ -479,12 +480,10 @@
         >>> a = yt.YTArray(1.0e7,"K")
         >>> a.to_equivalent("keV", "thermal")
         """
-        from equivalencies import equivalence_registry
-        this_equiv = equivalence_registry[equiv]()
-        old_dims = self.units.dimensions
-        new_dims = YTQuantity(1.0, unit, registry=self.units.registry).units.dimensions
-        if old_dims in this_equiv.dims and new_dims in this_equiv.dims:
-            return this_equiv.convert(self, new_dims, **kwargs).in_units(unit)
+        unit_quan = YTQuantity(1.0, unit, registry=self.units.registry)
+        if self.has_equivalent(equiv) and unit_quan.has_equivalent(equiv):
+            this_equiv = equivalence_registry[equiv]()
+            return this_equiv.convert(self, unit_quan.units.dimensions, **kwargs).in_units(unit)
         else:
             raise YTInvalidUnitEquivalence(equiv, self.units, unit)
 
@@ -493,11 +492,22 @@
         Lists the possible equivalencies associated with this YTArray or
         YTQuantity.
         """
-        from equivalencies import equivalence_registry
         for k,v in equivalence_registry.items():
-            if self.units.dimensions in v.dims:
+            if self.has_equivalent(k):
                 print v()
 
+    def has_equivalent(self, equiv):
+        """
+        Check to see if this YTArray or YTQuantity has an equivalent unit in
+        *equiv*.
+        """
+        try:
+            this_equiv = equivalence_registry[equiv]()
+        except KeyError:
+            raise KeyError("No such equivalence \"%s\"." % equiv)
+        old_dims = self.units.dimensions
+        return old_dims in this_equiv.dims
+
     def ndarray_view(self):
         """
         Returns a view into the array, but as an ndarray rather than ytarray.


https://bitbucket.org/yt_analysis/yt/commits/49a0dc20e661/
Changeset:   49a0dc20e661
Branch:      yt
User:        jzuhone
Date:        2014-11-07 20:02:55+00:00
Summary:     Merge
Affected #:  95 files

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -3,14 +3,19 @@
 from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 from yt.analysis_modules.absorption_spectrum.api import generate_total_fit
 
-# Define and add a field to simulate OVI based on a constant relationship to HI
+# Define a field to simulate OVI based on a constant relationship to HI
 # Do *NOT* use this for science, because this is not how OVI actually behaves;
 # it is just an example.
 
- at yt.derived_field(name='O_p5_number_density', units='cm**-3')
 def _OVI_number_density(field, data):
     return data['H_number_density']*2.0
 
+# Define a function that will accept a ds and add the new field 
+# defined above.  This will be given to the LightRay below.
+def setup_ds(ds):
+    ds.add_field("O_p5_number_density", 
+                 function=_OVI_number_density,
+                 units="cm**-3")
 
 # Define species and associated parameters to add to continuum
 # Parameters used for both adding the transition to the spectrum
@@ -65,7 +70,7 @@
 lr.make_light_ray(seed=123456780,
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
-                  fields=fields,
+                  fields=fields, setup_function=setup_ds,
                   get_los_velocity=True,
                   njobs=-1)
 

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -136,9 +136,9 @@
     hexahedral_connectivity
 
 # For backwards compatibility
-GadgetDataset = frontends.sph.GadgetDataset
+GadgetDataset = frontends.gadget.GadgetDataset
 GadgetStaticOutput = deprecated_class(GadgetDataset)
-TipsyDataset = frontends.sph.TipsyDataset
+TipsyDataset = frontends.tipsy.TipsyDataset
 TipsyStaticOutput = deprecated_class(TipsyDataset)
 
 # Now individual component imports from the visualization API

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -210,7 +210,7 @@
     def make_light_ray(self, seed=None,
                        start_position=None, end_position=None,
                        trajectory=None,
-                       fields=None,
+                       fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=True,
                        njobs=-1):
@@ -241,6 +241,11 @@
         fields : list
             A list of fields for which to get data.
             Default: None.
+        setup_function : callable, accepts a ds
+            This function will be called on each dataset that is loaded 
+            to create the light ray.  For, example, this can be used to 
+            add new derived fields.
+            Default: None.
         solution_filename : string
             Path to a text file where the trajectories of each
             subray is written out.
@@ -299,6 +304,10 @@
 
             # Load dataset for segment.
             ds = load(my_segment['filename'])
+
+            if setup_function is not None:
+                setup_function(ds)
+            
             my_segment["start"] = ds.domain_width * my_segment["start"] + \
                 ds.domain_left_edge
             my_segment["end"] = ds.domain_width * my_segment["end"] + \

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -17,7 +17,7 @@
 
 from yt.analysis_modules.halo_finding.halo_objects import \
     FOFHaloFinder, HOPHaloFinder
-from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
+from yt.frontends.halo_catalog.data_structures import \
     HaloCatalogDataset
 from yt.frontends.stream.data_structures import \
     load_particles
@@ -70,7 +70,7 @@
     Run the Rockstar halo finding method.
     """
 
-    from yt.frontends.halo_catalogs.rockstar.data_structures import \
+    from yt.frontends.rockstar.data_structures import \
      RockstarDataset
     from yt.analysis_modules.halo_finding.rockstar.api import \
      RockstarHaloFinder

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -21,17 +21,23 @@
     'athena',
     'boxlib',
     'chombo',
+    'eagle',
     'enzo',
     'fits',
     'flash',
+    'gadget',
     'gdf',
-    'halo_catalogs',
+    'halo_catalog',
+    'http_stream',
     'moab',
+    'owls',
+    'owls_subfind',
     #'pluto',
     'ramses',
+    'rockstar',
     'sdf',
-    'sph',
     'stream',
+    'tipsy',
 ]
 
 class _frontend_container:

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/eagle/api.py
--- /dev/null
+++ b/yt/frontends/eagle/api.py
@@ -0,0 +1,25 @@
+"""
+API for EAGLE frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+    EagleDataset, \
+    EagleNetworkDataset
+
+from .fields import \
+    EagleNetworkFieldInfo
+
+from .io import \
+    IOHandlerEagleNetwork

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/eagle/data_structures.py
--- /dev/null
+++ b/yt/frontends/eagle/data_structures.py
@@ -0,0 +1,98 @@
+"""
+Data structures for EAGLE frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import types
+
+from yt.frontends.gadget.data_structures import \
+    GadgetHDF5Dataset
+from yt.frontends.owls.fields import \
+    OWLSFieldInfo
+import yt.units
+
+from .fields import \
+    EagleNetworkFieldInfo
+
+class EagleDataset(GadgetHDF5Dataset):
+    _particle_mass_name = "Mass"
+    _field_info_class = OWLSFieldInfo
+    _time_readin_ = 'Time'
+
+    def _parse_parameter_file(self):
+
+        # read values from header
+        hvals = self._get_hvals()
+        self.parameters = hvals
+
+        # set features common to OWLS and Eagle
+        self._set_owls_eagle()
+
+        # Set time from analytic solution for flat LCDM universe
+        a = hvals['ExpansionFactor']
+        H0 = hvals['H(z)'] / hvals['E(z)']
+        a_eq = ( self.omega_matter / self.omega_lambda )**(1./3)
+        t1 = 2.0 / ( 3.0 * np.sqrt( self.omega_lambda ) )
+        t2 = (a/a_eq)**(3./2)
+        t3 = np.sqrt( 1.0 + (a/a_eq)**3 )
+        t = t1 * np.log( t2 + t3 ) / H0
+        self.current_time = t * yt.units.s
+
+    def _set_code_unit_attributes(self):
+        self._set_owls_eagle_units()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        need_groups = ['Config', 'Constants', 'HashTable', 'Header', 
+                       'Parameters', 'RuntimePars', 'Units']
+        veto_groups = ['SUBFIND',
+                       'PartType0/ChemistryAbundances', 
+                       'PartType0/ChemicalAbundances']
+        valid = True
+        try:
+            fileh = h5py.File(args[0], mode='r')
+            for ng in need_groups:
+                if ng not in fileh["/"]:
+                    valid = False
+            for vg in veto_groups:
+                if vg in fileh["/"]:
+                    valid = False                    
+            fileh.close()
+        except:
+            valid = False
+            pass
+        return valid
+
+class EagleNetworkDataset(EagleDataset):
+    _particle_mass_name = "Mass"
+    _field_info_class = EagleNetworkFieldInfo
+    _time_readin = 'Time'
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0], mode='r')
+            if "Constants" in fileh["/"].keys() and \
+               "Header" in fileh["/"].keys() and \
+               "SUBFIND" not in fileh["/"].keys() and \
+               ("ChemistryAbundances" in fileh["PartType0"].keys()
+                or "ChemicalAbundances" in fileh["PartType0"].keys()):
+                fileh.close()
+                return True
+            fileh.close()
+        except:
+            pass
+        return False

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/eagle/definitions.py
--- /dev/null
+++ b/yt/frontends/eagle/definitions.py
@@ -0,0 +1,35 @@
+"""
+EAGLE definitions
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+eaglenetwork_ions = \
+    ('electron', 'H1', 'H2', 'H_m', 'He1', 'He2','He3', 'C1',\
+     'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C_m', 'N1', 'N2', \
+     'N3', 'N4', 'N5', 'N6', 'N7', 'N8', 'O1', 'O2', 'O3', \
+     'O4', 'O5', 'O6', 'O7', 'O8', 'O9', 'O_m', 'Ne1', 'Ne2',\
+     'Ne3', 'Ne4', 'Ne5', 'Ne6', 'Ne7', 'Ne8', 'Ne9', 'Ne10',\
+     'Ne11', 'Mg1', 'Mg2', 'Mg3', 'Mg4', 'Mg5', 'Mg6', 'Mg7',\
+     'Mg8', 'Mg9', 'Mg10', 'Mg11', 'Mg12', 'Mg13', 'Si1', 'Si2',\
+     'Si3', 'Si4', 'Si5', 'Si6', 'Si7', 'Si8', 'Si9', 'Si10',\
+     'Si11', 'Si12', 'Si13', 'Si14', 'Si15', 'Si16', 'Si17',\
+     'Ca1', 'Ca2', 'Ca3', 'Ca4', 'Ca5', 'Ca6', 'Ca7', 'Ca8',\
+     'Ca9', 'Ca10', 'Ca11', 'Ca12', 'Ca13', 'Ca14', 'Ca15',\
+     'Ca16', 'Ca17', 'Ca18', 'Ca19', 'Ca20', 'Ca21', 'Fe1',\
+     'Fe2', 'Fe3', 'Fe4', 'Fe5', 'Fe6', 'Fe7', 'Fe8', 'Fe9',\
+     'Fe10', 'Fe11', 'Fe12', 'Fe13', 'Fe14', 'Fe15', 'Fe16',\
+     'Fe17', 'Fe18', 'Fe19', 'Fe20', 'Fe21', 'Fe22', 'Fe23',\
+     'Fe24', 'Fe25', 'Fe25', 'Fe27',)
+
+eaglenetwork_ion_lookup = {ion:index for index, ion in enumerate(eaglenetwork_ions)}

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/eagle/fields.py
--- /dev/null
+++ b/yt/frontends/eagle/fields.py
@@ -0,0 +1,73 @@
+"""
+EAGLE fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.owls.fields import \
+    OWLSFieldInfo
+from yt.units.yt_array import YTQuantity
+from yt.utilities.periodic_table import periodic_table
+
+from .definitions import \
+    eaglenetwork_ion_lookup
+
+class EagleNetworkFieldInfo(OWLSFieldInfo):
+
+    _ions = \
+        ('H1', 'H2', 'He1', 'He2','He3', 'C1',\
+         'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'N1', 'N2', \
+         'N3', 'N4', 'N5', 'N6', 'N7', 'N8', 'O1', 'O2', 'O3', \
+         'O4', 'O5', 'O6', 'O7', 'O8', 'O9', 'Ne1', 'Ne2',\
+         'Ne3', 'Ne4', 'Ne5', 'Ne6', 'Ne7', 'Ne8', 'Ne9', 'Ne10',\
+         'Ne11', 'Mg1', 'Mg2', 'Mg3', 'Mg4', 'Mg5', 'Mg6', 'Mg7',\
+         'Mg8', 'Mg9', 'Mg10', 'Mg11', 'Mg12', 'Mg13', 'Si1', 'Si2',\
+         'Si3', 'Si4', 'Si5', 'Si6', 'Si7', 'Si8', 'Si9', 'Si10',\
+         'Si11', 'Si12', 'Si13', 'Si14', 'Si15', 'Si16', 'Si17',\
+         'Ca1', 'Ca2', 'Ca3', 'Ca4', 'Ca5', 'Ca6', 'Ca7', 'Ca8',\
+         'Ca9', 'Ca10', 'Ca11', 'Ca12', 'Ca13', 'Ca14', 'Ca15',\
+         'Ca16', 'Ca17', 'Ca18', 'Ca19', 'Ca20', 'Ca21', 'Fe1',\
+         'Fe2', 'Fe3', 'Fe4', 'Fe5', 'Fe6', 'Fe7', 'Fe8', 'Fe9',\
+         'Fe10', 'Fe11', 'Fe12', 'Fe13', 'Fe14', 'Fe15', 'Fe16',\
+         'Fe17', 'Fe18', 'Fe19', 'Fe20', 'Fe21', 'Fe22', 'Fe23',\
+         'Fe24', 'Fe25', 'Fe25', 'Fe27',)
+
+    def __init__(self, *args, **kwargs):
+        
+        super(EagleNetworkFieldInfo,self).__init__( *args, **kwargs )
+        
+    def _create_ion_density_func( self, ftype, ion ):
+        """ returns a function that calculates the ion density of a particle. 
+        """ 
+
+        def _ion_density(field, data):
+
+            # Lookup the index of the ion 
+            index = eaglenetwork_ion_lookup[ion] 
+
+            # Ion to hydrogen number density ratio
+            ion_chem = data[ftype, "Chemistry_%03i"%index]
+
+            # Mass of a single ion
+            if ion[0:2].isalpha():
+                symbol = ion[0:2].capitalize()
+            else:
+                symbol = ion[0:1].capitalize()
+            m_ion = YTQuantity(periodic_table.elements_by_symbol[symbol].weight, 'amu')
+
+            # hydrogen number density 
+            n_H = data["PartType0", "H_number_density"] 
+
+            return m_ion*ion_chem*n_H 
+        
+        return _ion_density

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/eagle/io.py
--- /dev/null
+++ b/yt/frontends/eagle/io.py
@@ -0,0 +1,21 @@
+"""
+EAGLE data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.owls.io import \
+    IOHandlerOWLS
+
+class IOHandlerEagleNetwork(IOHandlerOWLS):
+    _dataset_type = "eagle_network"

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/eagle/setup.py
--- /dev/null
+++ b/yt/frontends/eagle/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('eagle', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/gadget/api.py
--- /dev/null
+++ b/yt/frontends/gadget/api.py
@@ -0,0 +1,23 @@
+"""
+API for Gadget frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+    GadgetDataset, \
+    GadgetHDF5Dataset
+
+from .io import \
+    IOHandlerGadgetBinary, \
+    IOHandlerGadgetHDF5

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/gadget/data_structures.py
--- /dev/null
+++ b/yt/frontends/gadget/data_structures.py
@@ -0,0 +1,337 @@
+"""
+Data structures for Gadget frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import os
+import types
+
+from yt.data_objects.static_output import \
+    ParticleFile
+from yt.frontends.sph.data_structures import \
+    ParticleDataset
+from yt.frontends.sph.fields import \
+    SPHFieldInfo
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.utilities.cosmology import \
+    Cosmology
+from yt.utilities.definitions import \
+    sec_conversion
+from yt.utilities.fortran_utils import read_record
+from yt.utilities.logger import ytLogger as mylog
+
+from .definitions import \
+    gadget_header_specs, \
+    gadget_field_specs, \
+    gadget_ptype_specs
+
+def _fix_unit_ordering(unit):
+    if isinstance(unit[0], types.StringTypes):
+        unit = unit[1], unit[0]
+    return unit
+
+class GadgetBinaryFile(ParticleFile):
+    def __init__(self, ds, io, filename, file_id):
+        with open(filename, "rb") as f:
+            self.header = read_record(f, ds._header_spec)
+            self._position_offset = f.tell()
+            f.seek(0, os.SEEK_END)
+            self._file_size = f.tell()
+
+        super(GadgetBinaryFile, self).__init__(ds, io, filename, file_id)
+
+    def _calculate_offsets(self, field_list):
+        self.field_offsets = self.io._calculate_field_offsets(
+            field_list, self.total_particles,
+            self._position_offset, self._file_size)
+
+class GadgetDataset(ParticleDataset):
+    _index_class = ParticleIndex
+    _file_class = GadgetBinaryFile
+    _field_info_class = SPHFieldInfo
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
+    _particle_velocity_name = "Velocities"
+    _suffix = ""
+
+    def __init__(self, filename, dataset_type="gadget_binary",
+                 additional_fields=(),
+                 unit_base=None, n_ref=64,
+                 over_refine_factor=1,
+                 bounding_box = None,
+                 header_spec = "default",
+                 field_spec = "default",
+                 ptype_spec = "default",
+                 units_override=None):
+        if self._instantiated: return
+        self._header_spec = self._setup_binary_spec(
+            header_spec, gadget_header_specs)
+        self._field_spec = self._setup_binary_spec(
+            field_spec, gadget_field_specs)
+        self._ptype_spec = self._setup_binary_spec(
+            ptype_spec, gadget_ptype_specs)
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        self.storage_filename = None
+        if unit_base is not None and "UnitLength_in_cm" in unit_base:
+            # We assume this is comoving, because in the absence of comoving
+            # integration the redshift will be zero.
+            unit_base['cmcm'] = 1.0 / unit_base["UnitLength_in_cm"]
+        self._unit_base = unit_base
+        if bounding_box is not None:
+            bbox = np.array(bounding_box, dtype="float64")
+            if bbox.shape == (2, 3):
+                bbox = bbox.transpose()
+            self.domain_left_edge = bbox[:,0]
+            self.domain_right_edge = bbox[:,1]
+        else:
+            self.domain_left_edge = self.domain_right_edge = None
+        if units_override is not None:
+            raise RuntimeError("units_override is not supported for GadgetDataset. "+
+                               "Use unit_base instead.")
+        super(GadgetDataset, self).__init__(filename, dataset_type)
+
+    def _setup_binary_spec(self, spec, spec_dict):
+        if isinstance(spec, types.StringTypes):
+            _hs = ()
+            for hs in spec.split("+"):
+                _hs += spec_dict[hs]
+            spec = _hs
+        return spec
+
+    def __repr__(self):
+        return os.path.basename(self.parameter_filename).split(".")[0]
+
+    def _get_hvals(self):
+        # The entries in this header are capitalized and named to match Table 4
+        # in the GADGET-2 user guide.
+
+        f = open(self.parameter_filename)
+        hvals = read_record(f, self._header_spec)
+        for i in hvals:
+            if len(hvals[i]) == 1:
+                hvals[i] = hvals[i][0]
+        return hvals
+
+    def _parse_parameter_file(self):
+
+        hvals = self._get_hvals()
+
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = "sph"
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        # Set standard values
+
+        # We may have an overridden bounding box.
+        if self.domain_left_edge is None:
+            self.domain_left_edge = np.zeros(3, "float64")
+            self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.periodicity = (True, True, True)
+
+        self.cosmological_simulation = 1
+
+        self.current_redshift = hvals["Redshift"]
+        self.omega_lambda = hvals["OmegaLambda"]
+        self.omega_matter = hvals["Omega0"]
+        self.hubble_constant = hvals["HubbleParam"]
+        # According to the Gadget manual, OmegaLambda will be zero for
+        # non-cosmological datasets.  However, it may be the case that
+        # individuals are running cosmological simulations *without* Lambda, in
+        # which case we may be doing something incorrect here.
+        # It may be possible to deduce whether ComovingIntegration is on
+        # somehow, but opinions on this vary.
+        if self.omega_lambda == 0.0:
+            mylog.info("Omega Lambda is 0.0, so we are turning off Cosmology.")
+            self.hubble_constant = 1.0  # So that scaling comes out correct
+            self.cosmological_simulation = 0
+            self.current_redshift = 0.0
+            # This may not be correct.
+            self.current_time = hvals["Time"] * sec_conversion["Gyr"]
+        else:
+            # Now we calculate our time based on the cosmology, because in
+            # ComovingIntegration hvals["Time"] will in fact be the expansion
+            # factor, not the actual integration time, so we re-calculate
+            # global time from our Cosmology.
+            cosmo = Cosmology(self.hubble_constant,
+                              self.omega_matter, self.omega_lambda)
+            self.current_time = cosmo.hubble_time(self.current_redshift)
+            mylog.info("Calculating time from %0.3e to be %0.3e seconds",
+                       hvals["Time"], self.current_time)
+        self.parameters = hvals
+
+        prefix = self.parameter_filename.split(".", 1)[0]
+
+        if hvals["NumFiles"] > 1:
+            self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
+        else:
+            self.filename_template = self.parameter_filename
+
+        self.file_count = hvals["NumFiles"]
+
+    def _set_code_unit_attributes(self):
+        # If no units passed in by user, set a sane default (Gadget-2 users guide).
+        if self._unit_base is None:
+            if self.cosmological_simulation == 1:
+                mylog.info("Assuming length units are in kpc/h (comoving)")
+                self._unit_base = dict(length = (1.0, "kpccm/h"))
+            else:
+                mylog.info("Assuming length units are in kpc (physical)")
+                self._unit_base = dict(length = (1.0, "kpc"))
+                
+        # If units passed in by user, decide what to do about
+        # co-moving and factors of h
+        unit_base = self._unit_base or {}
+        if "length" in unit_base:
+            length_unit = unit_base["length"]
+        elif "UnitLength_in_cm" in unit_base:
+            if self.cosmological_simulation == 0:
+                length_unit = (unit_base["UnitLength_in_cm"], "cm")
+            else:
+                length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
+        else:
+            raise RuntimeError
+        length_unit = _fix_unit_ordering(length_unit)
+        self.length_unit = self.quan(length_unit[0], length_unit[1])
+
+        unit_base = self._unit_base or {}
+        if "velocity" in unit_base:
+            velocity_unit = unit_base["velocity"]
+        elif "UnitVelocity_in_cm_per_s" in unit_base:
+            velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
+        else:
+            velocity_unit = (1e5, "cm/s")
+        velocity_unit = _fix_unit_ordering(velocity_unit)
+        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+
+        # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
+        # Default to 1e10 Msun/h if mass is not specified.
+        if "mass" in unit_base:
+            mass_unit = unit_base["mass"]
+        elif "UnitMass_in_g" in unit_base:
+            if self.cosmological_simulation == 0:
+                mass_unit = (unit_base["UnitMass_in_g"], "g")
+            else:
+                mass_unit = (unit_base["UnitMass_in_g"], "g/h")
+        else:
+            # Sane default
+            mass_unit = (1.0, "1e10*Msun/h")
+        mass_unit = _fix_unit_ordering(mass_unit)
+        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+        self.time_unit = self.length_unit / self.velocity_unit
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # We do not allow load() of these files.
+        return False
+
+class GadgetHDF5Dataset(GadgetDataset):
+    _file_class = ParticleFile
+    _field_info_class = SPHFieldInfo
+    _particle_mass_name = "Masses"
+    _suffix = ".hdf5"
+
+    def __init__(self, filename, dataset_type="gadget_hdf5", 
+                 unit_base = None, n_ref=64,
+                 over_refine_factor=1,
+                 bounding_box = None,
+                 units_override=None):
+        self.storage_filename = None
+        filename = os.path.abspath(filename)
+        if units_override is not None:
+            raise RuntimeError("units_override is not supported for GadgetHDF5Dataset. "+
+                               "Use unit_base instead.")
+        super(GadgetHDF5Dataset, self).__init__(
+            filename, dataset_type, unit_base=unit_base, n_ref=n_ref,
+            over_refine_factor=over_refine_factor,
+            bounding_box = bounding_box)
+
+    def _get_hvals(self):
+        handle = h5py.File(self.parameter_filename, mode="r")
+        hvals = {}
+        hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+        # Compat reasons.
+        hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
+        hvals["Massarr"] = hvals["MassTable"]
+        handle.close()
+        return hvals
+
+    def _get_uvals(self):
+        handle = h5py.File(self.parameter_filename, mode="r")
+        uvals = {}
+        uvals.update((str(k), v) for k, v in handle["/Units"].attrs.items())
+        handle.close()
+        return uvals
+
+
+
+    def _set_owls_eagle(self):
+
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = "sph"
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        self._unit_base = self._get_uvals()
+        self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
+
+        self.current_redshift = self.parameters["Redshift"]
+        self.omega_lambda = self.parameters["OmegaLambda"]
+        self.omega_matter = self.parameters["Omega0"]
+        self.hubble_constant = self.parameters["HubbleParam"]
+
+        if self.domain_left_edge is None:
+            self.domain_left_edge = np.zeros(3, "float64")
+            self.domain_right_edge = np.ones(3, "float64") * self.parameters["BoxSize"]
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+
+        self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
+
+        prefix = os.path.abspath(self.parameter_filename.split(".", 1)[0])
+        suffix = self.parameter_filename.rsplit(".", 1)[-1]
+        self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
+        self.file_count = self.parameters["NumFilesPerSnapshot"]
+
+    def _set_owls_eagle_units(self):
+
+        # note the contents of the HDF5 Units group are in _unit_base 
+        # note the velocity stored on disk is sqrt(a) dx/dt 
+        self.length_unit = self.quan(self._unit_base["UnitLength_in_cm"], 'cmcm/h')
+        self.mass_unit = self.quan(self._unit_base["UnitMass_in_g"], 'g/h')
+        self.velocity_unit = self.quan(self._unit_base["UnitVelocity_in_cm_per_s"], 'cm/s')
+        self.time_unit = self.quan(self._unit_base["UnitTime_in_s"], 's/h')
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0], mode='r')
+            if "Constants" not in fileh["/"].keys() and \
+               "Header" in fileh["/"].keys():
+                fileh.close()
+                return True
+            fileh.close()
+        except:
+            pass
+        return False

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/gadget/definitions.py
--- /dev/null
+++ b/yt/frontends/gadget/definitions.py
@@ -0,0 +1,69 @@
+"""
+Gadget definitions
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+gadget_header_specs = dict(
+    default      = (('Npart', 6, 'i'),
+                    ('Massarr', 6, 'd'),
+                    ('Time', 1, 'd'),
+                    ('Redshift', 1, 'd'),
+                    ('FlagSfr', 1, 'i'),
+                    ('FlagFeedback', 1, 'i'),
+                    ('Nall', 6, 'i'),
+                    ('FlagCooling', 1, 'i'),
+                    ('NumFiles', 1, 'i'),
+                    ('BoxSize', 1, 'd'),
+                    ('Omega0', 1, 'd'),
+                    ('OmegaLambda', 1, 'd'),
+                    ('HubbleParam', 1, 'd'),
+                    ('FlagAge', 1, 'i'),
+                    ('FlagMEtals', 1, 'i'),
+                    ('NallHW', 6, 'i'),
+                    ('unused', 16, 'i')),
+    pad32       = (('empty',  32, 'c'),),
+    pad64       = (('empty',  64, 'c'),),
+    pad128      = (('empty', 128, 'c'),),
+    pad256      = (('empty', 256, 'c'),),
+)
+
+gadget_ptype_specs = dict(
+    default = ( "Gas",
+                "Halo",
+                "Disk",
+                "Bulge",
+                "Stars",
+                "Bndry" )
+)
+
+gadget_field_specs = dict(
+    default = ( "Coordinates",
+                "Velocities",
+                "ParticleIDs",
+                "Mass",
+                ("InternalEnergy", "Gas"),
+                ("Density", "Gas"),
+                ("SmoothingLength", "Gas"),
+    ),
+    agora_unlv = ( "Coordinates",
+                   "Velocities",
+                   "ParticleIDs",
+                   "Mass",
+                   ("InternalEnergy", "Gas"),
+                   ("Density", "Gas"),
+                   ("Electron_Number_Density", "Gas"),
+                   ("HI_NumberDensity", "Gas"),
+                   ("SmoothingLength", "Gas"),
+    )
+)

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/gadget/io.py
--- /dev/null
+++ b/yt/frontends/gadget/io.py
@@ -0,0 +1,210 @@
+"""
+Gadget data-file handling functions
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.frontends.owls.io import \
+    IOHandlerOWLS
+from yt.geometry.oct_container import \
+    _ORDER_MAX
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.lib.geometry_utils import \
+    compute_morton
+
+class IOHandlerGadgetHDF5(IOHandlerOWLS):
+    _dataset_type = "gadget_hdf5"
+
+ZeroMass = object()
+    
+class IOHandlerGadgetBinary(BaseIOHandler):
+    _dataset_type = "gadget_binary"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
+
+    # Particle types (Table 3 in GADGET-2 user guide)
+    #
+    # Blocks in the file:
+    #   HEAD
+    #   POS
+    #   VEL
+    #   ID
+    #   MASS    (variable mass only)
+    #   U       (gas only)
+    #   RHO     (gas only)
+    #   HSML    (gas only)
+    #   POT     (only if enabled in makefile)
+    #   ACCE    (only if enabled in makefile)
+    #   ENDT    (only if enabled in makefile)
+    #   TSTP    (only if enabled in makefile)
+
+    _var_mass = None
+
+    def __init__(self, ds, *args, **kwargs):
+        self._fields = ds._field_spec
+        self._ptypes = ds._ptype_spec
+        super(IOHandlerGadgetBinary, self).__init__(ds, *args, **kwargs)
+
+    @property
+    def var_mass(self):
+        if self._var_mass is None:
+            vm = []
+            for i, v in enumerate(self.ds["Massarr"]):
+                if v == 0:
+                    vm.append(self._ptypes[i])
+            self._var_mass = tuple(vm)
+        return self._var_mass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            poff = data_file.field_offsets
+            tp = data_file.total_particles
+            f = open(data_file.filename, "rb")
+            for ptype in ptf:
+                # This is where we could implement sub-chunking
+                f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
+                pos = self._read_field_from_file(f,
+                            tp[ptype], "Coordinates")
+                yield ptype, (pos[:,0], pos[:,1], pos[:,2])
+            f.close()
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            poff = data_file.field_offsets
+            tp = data_file.total_particles
+            f = open(data_file.filename, "rb")
+            for ptype, field_list in sorted(ptf.items()):
+                f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
+                pos = self._read_field_from_file(f,
+                            tp[ptype], "Coordinates")
+                mask = selector.select_points(
+                    pos[:,0], pos[:,1], pos[:,2], 0.0)
+                del pos
+                if mask is None: continue
+                for field in field_list:
+                    if field == "Mass" and ptype not in self.var_mass:
+                        data = np.empty(mask.sum(), dtype="float64")
+                        m = self.ds.parameters["Massarr"][
+                            self._ptypes.index(ptype)]
+                        data[:] = m
+                        yield (ptype, field), data
+                        continue
+                    f.seek(poff[ptype, field], os.SEEK_SET)
+                    data = self._read_field_from_file(f, tp[ptype], field)
+                    data = data[mask,...]
+                    yield (ptype, field), data
+            f.close()
+
+    def _read_field_from_file(self, f, count, name):
+        if count == 0: return
+        if name == "ParticleIDs":
+            dt = "uint32"
+        else:
+            dt = "float32"
+        if name in self._vector_fields:
+            count *= 3
+        arr = np.fromfile(f, dtype=dt, count = count)
+        if name in self._vector_fields:
+            arr = arr.reshape((count/3, 3), order="C")
+        return arr.astype("float64")
+
+    def _initialize_index(self, data_file, regions):
+        count = sum(data_file.total_particles.values())
+        DLE = data_file.ds.domain_left_edge
+        DRE = data_file.ds.domain_right_edge
+        dx = (DRE - DLE) / 2**_ORDER_MAX
+        with open(data_file.filename, "rb") as f:
+            # We add on an additionally 4 for the first record.
+            f.seek(data_file._position_offset + 4)
+            # The first total_particles * 3 values are positions
+            pp = np.fromfile(f, dtype = 'float32', count = count*3)
+            pp.shape = (count, 3)
+        regions.add_data_file(pp, data_file.file_id, data_file.ds.filter_bbox)
+        morton = compute_morton(pp[:,0], pp[:,1], pp[:,2], DLE, DRE,
+                                data_file.ds.filter_bbox)
+        return morton
+
+    def _count_particles(self, data_file):
+        npart = dict((self._ptypes[i], v)
+            for i, v in enumerate(data_file.header["Npart"]))
+        return npart
+
+    # header is 256, but we have 4 at beginning and end for ints
+    _field_size = 4
+    def _calculate_field_offsets(self, field_list, pcount,
+                                 offset, file_size = None):
+        # field_list is (ftype, fname) but the blocks are ordered
+        # (fname, ftype) in the file.
+        pos = offset
+        fs = self._field_size
+        offsets = {}
+        for field in self._fields:
+            if not isinstance(field, types.StringTypes):
+                field = field[0]
+            if not any( (ptype, field) in field_list
+                        for ptype in self._ptypes):
+                continue
+            pos += 4
+            any_ptypes = False
+            for ptype in self._ptypes:
+                if field == "Mass" and ptype not in self.var_mass:
+                    continue
+                if (ptype, field) not in field_list:
+                    continue
+                offsets[(ptype, field)] = pos
+                any_ptypes = True
+                if field in self._vector_fields:
+                    pos += 3 * pcount[ptype] * fs
+                else:
+                    pos += pcount[ptype] * fs
+            pos += 4
+            if not any_ptypes: pos -= 8
+        if file_size is not None:
+            if file_size != pos:
+                mylog.warning("Your Gadget-2 file may have extra " +
+                              "columns or different precision!" +
+                              " (%s file vs %s computed)",
+                              file_size, pos)
+        return offsets
+
+    def _identify_fields(self, domain):
+        # We can just look at the particle counts.
+        field_list = []
+        tp = domain.total_particles
+        for i, ptype in enumerate(self._ptypes):
+            count = tp[ptype]
+            if count == 0: continue
+            m = domain.header["Massarr"][i]
+            for field in self._fields:
+                if isinstance(field, types.TupleType):
+                    field, req = field
+                    if req is ZeroMass:
+                        if m > 0.0 : continue
+                    elif req != ptype:
+                        continue
+                field_list.append((ptype, field))
+        return field_list, {}

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/gadget/setup.py
--- /dev/null
+++ b/yt/frontends/gadget/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('gadget', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalog/__init__.py
--- /dev/null
+++ b/yt/frontends/halo_catalog/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for HaloCatalog frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalog/api.py
--- /dev/null
+++ b/yt/frontends/halo_catalog/api.py
@@ -0,0 +1,24 @@
+"""
+API for HaloCatalog frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+     HaloCatalogDataset
+
+from .io import \
+     IOHandlerHaloCatalogHDF5
+
+from .fields import \
+     HaloCatalogFieldInfo

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalog/data_structures.py
--- /dev/null
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -0,0 +1,97 @@
+"""
+Data structures for HaloCatalog frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+    HaloCatalogFieldInfo
+
+from yt.utilities.cosmology import Cosmology
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, \
+    ParticleFile
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+    
+class HaloCatalogHDF5File(ParticleFile):
+    def __init__(self, ds, io, filename, file_id):
+        with h5py.File(filename, "r") as f:
+            self.header = dict((field, f.attrs[field]) \
+                               for field in f.attrs.keys())
+
+        super(HaloCatalogHDF5File, self).__init__(ds, io, filename, file_id)
+    
+class HaloCatalogDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = HaloCatalogHDF5File
+    _field_info_class = HaloCatalogFieldInfo
+    _suffix = ".h5"
+
+    def __init__(self, filename, dataset_type="halocatalog_hdf5",
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        super(HaloCatalogDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
+
+    def _parse_parameter_file(self):
+        with h5py.File(self.parameter_filename, "r") as f:
+            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
+        self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
+        self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+
+        for attr in ["cosmological_simulation", "current_time", "current_redshift",
+                     "hubble_constant", "omega_matter", "omega_lambda",
+                     "domain_left_edge", "domain_right_edge"]:
+            setattr(self, attr, hvals[attr])
+        self.periodicity = (True, True, True)
+        self.particle_types = ("halos")
+        self.particle_types_raw = ("halos")
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.parameters.update(hvals)
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.velocity_unit = self.quan(1.0, "cm / s")
+        self.time_unit = self.quan(1.0, "s")
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            if "data_type" in f.attrs and \
+              f.attrs["data_type"] == "halo_catalog":
+                return True
+        return False

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalog/fields.py
--- /dev/null
+++ b/yt/frontends/halo_catalog/fields.py
@@ -0,0 +1,48 @@
+"""
+HaloCatalog-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+from yt.units.yt_array import \
+    YTArray
+
+from yt.utilities.physical_constants import \
+    mh, \
+    mass_sun_cgs
+
+m_units = "g"
+p_units = "cm"
+v_units = "cm / s"
+r_units = "cm"
+
+class HaloCatalogFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+        ("particle_identifier", ("", [], None)),
+        ("particle_position_x", (p_units, [], None)),
+        ("particle_position_y", (p_units, [], None)),
+        ("particle_position_z", (p_units, [], None)),
+        ("particle_velocity_x", (v_units, [], None)),
+        ("particle_velocity_y", (v_units, [], None)),
+        ("particle_velocity_z", (v_units, [], None)),
+        ("particle_mass", (m_units, [], "Virial Mass")),
+        ("virial_radius", (r_units, [], "Virial Radius")),
+)

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalog/io.py
--- /dev/null
+++ b/yt/frontends/halo_catalog/io.py
@@ -0,0 +1,119 @@
+"""
+HaloCatalog data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import mylog
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+
+class IOHandlerHaloCatalogHDF5(BaseIOHandler):
+    _dataset_type = "halocatalog_hdf5"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "halos")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            pcount = data_file.header['num_halos']
+            with h5py.File(data_file.filename, "r") as f:
+                x = f['particle_position_x'].value.astype("float64")
+                y = f['particle_position_y'].value.astype("float64")
+                z = f['particle_position_z'].value.astype("float64")
+                yield "halos", (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "halos")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            pcount = data_file.header['num_halos']
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    x = f['particle_position_x'].value.astype("float64")
+                    y = f['particle_position_y'].value.astype("float64")
+                    z = f['particle_position_z'].value.astype("float64")
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        data = f[field][mask].astype("float64")
+                        yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        pcount = data_file.header["num_halos"]
+        morton = np.empty(pcount, dtype='uint64')
+        mylog.debug("Initializing index % 5i (% 7i particles)",
+                    data_file.file_id, pcount)
+        ind = 0
+        with h5py.File(data_file.filename, "r") as f:
+            if not f.keys(): return None
+            pos = np.empty((pcount, 3), dtype="float64")
+            pos = data_file.ds.arr(pos, "code_length")
+            dx = np.finfo(f['particle_position_x'].dtype).eps
+            dx = 2.0*self.ds.quan(dx, "code_length")
+            pos[:,0] = f["particle_position_x"].value
+            pos[:,1] = f["particle_position_y"].value
+            pos[:,2] = f["particle_position_z"].value
+            # These are 32 bit numbers, so we give a little lee-way.
+            # Otherwise, for big sets of particles, we often will bump into the
+            # domain edges.  This helps alleviate that.
+            np.clip(pos, self.ds.domain_left_edge + dx,
+                         self.ds.domain_right_edge - dx, pos)
+            if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.ds.domain_left_edge,
+                                       self.ds.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.ds.domain_left_edge,
+                data_file.ds.domain_right_edge)
+        return morton
+
+    def _count_particles(self, data_file):
+        return {'halos': data_file.header['num_halos']}
+
+    def _identify_fields(self, data_file):
+        with h5py.File(data_file.filename, "r") as f:
+            fields = [("halos", field) for field in f]
+            units = dict([(("halos", field), 
+                           f[field].attrs["units"]) for field in f])
+        return fields, units

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalog/setup.py
--- /dev/null
+++ b/yt/frontends/halo_catalog/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('halo_catalog', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/__init__.py
--- a/yt/frontends/halo_catalogs/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-API for halo catalog frontends.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/api.py
--- a/yt/frontends/halo_catalogs/api.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-API for yt.frontends.halo_catalogs
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .halo_catalog.api import \
-     HaloCatalogDataset, \
-     IOHandlerHaloCatalogHDF5, \
-     HaloCatalogFieldInfo
-
-from .rockstar.api import \
-      RockstarDataset, \
-      IOHandlerRockstarBinary, \
-      RockstarFieldInfo
-
-from .owls_subfind.api import \
-     OWLSSubfindDataset, \
-     IOHandlerOWLSSubfindHDF5, \
-     OWLSSubfindFieldInfo

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/halo_catalog/__init__.py
--- a/yt/frontends/halo_catalogs/halo_catalog/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-API for HaloCatalog frontend.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/halo_catalog/api.py
--- a/yt/frontends/halo_catalogs/halo_catalog/api.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""
-API for HaloCatalog frontend
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-     HaloCatalogDataset
-
-from .io import \
-     IOHandlerHaloCatalogHDF5
-
-from .fields import \
-     HaloCatalogFieldInfo

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalogs/halo_catalog/data_structures.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Data structures for HaloCatalog frontend.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-import stat
-import weakref
-import struct
-import glob
-import time
-import os
-
-from .fields import \
-    HaloCatalogFieldInfo
-
-from yt.utilities.cosmology import Cosmology
-from yt.geometry.particle_geometry_handler import \
-    ParticleIndex
-from yt.data_objects.static_output import \
-    Dataset, \
-    ParticleFile
-import yt.utilities.fortran_utils as fpu
-from yt.units.yt_array import \
-    YTArray, \
-    YTQuantity
-    
-class HaloCatalogHDF5File(ParticleFile):
-    def __init__(self, ds, io, filename, file_id):
-        with h5py.File(filename, "r") as f:
-            self.header = dict((field, f.attrs[field]) \
-                               for field in f.attrs.keys())
-
-        super(HaloCatalogHDF5File, self).__init__(ds, io, filename, file_id)
-    
-class HaloCatalogDataset(Dataset):
-    _index_class = ParticleIndex
-    _file_class = HaloCatalogHDF5File
-    _field_info_class = HaloCatalogFieldInfo
-    _suffix = ".h5"
-
-    def __init__(self, filename, dataset_type="halocatalog_hdf5",
-                 n_ref = 16, over_refine_factor = 1, units_override=None):
-        self.n_ref = n_ref
-        self.over_refine_factor = over_refine_factor
-        super(HaloCatalogDataset, self).__init__(filename, dataset_type,
-                                                 units_override=units_override)
-
-    def _parse_parameter_file(self):
-        with h5py.File(self.parameter_filename, "r") as f:
-            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
-        self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
-        self.file_count = len(glob.glob(prefix + "*" + self._suffix))
-
-        for attr in ["cosmological_simulation", "current_time", "current_redshift",
-                     "hubble_constant", "omega_matter", "omega_lambda",
-                     "domain_left_edge", "domain_right_edge"]:
-            setattr(self, attr, hvals[attr])
-        self.periodicity = (True, True, True)
-        self.particle_types = ("halos")
-        self.particle_types_raw = ("halos")
-
-        nz = 1 << self.over_refine_factor
-        self.domain_dimensions = np.ones(3, "int32") * nz
-        self.parameters.update(hvals)
-
-    def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.velocity_unit = self.quan(1.0, "cm / s")
-        self.time_unit = self.quan(1.0, "s")
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        if not args[0].endswith(".h5"): return False
-        with h5py.File(args[0], "r") as f:
-            if "data_type" in f.attrs and \
-              f.attrs["data_type"] == "halo_catalog":
-                return True
-        return False

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/halo_catalog/fields.py
--- a/yt/frontends/halo_catalogs/halo_catalog/fields.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
-HaloCatalog-specific fields
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import mylog
-from yt.fields.field_info_container import \
-    FieldInfoContainer
-from yt.units.yt_array import \
-    YTArray
-
-from yt.utilities.physical_constants import \
-    mh, \
-    mass_sun_cgs
-
-m_units = "g"
-p_units = "cm"
-v_units = "cm / s"
-r_units = "cm"
-
-class HaloCatalogFieldInfo(FieldInfoContainer):
-    known_other_fields = (
-    )
-
-    known_particle_fields = (
-        ("particle_identifier", ("", [], None)),
-        ("particle_position_x", (p_units, [], None)),
-        ("particle_position_y", (p_units, [], None)),
-        ("particle_position_z", (p_units, [], None)),
-        ("particle_velocity_x", (v_units, [], None)),
-        ("particle_velocity_y", (v_units, [], None)),
-        ("particle_velocity_z", (v_units, [], None)),
-        ("particle_mass", (m_units, [], "Virial Mass")),
-        ("virial_radius", (r_units, [], "Virial Radius")),
-)

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/halo_catalog/io.py
--- a/yt/frontends/halo_catalogs/halo_catalog/io.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""
-HaloCatalog data-file handling function
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-
-from yt.utilities.exceptions import *
-from yt.funcs import mylog
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-
-from yt.utilities.lib.geometry_utils import compute_morton
-
-from yt.geometry.oct_container import _ORDER_MAX
-
-class IOHandlerHaloCatalogHDF5(BaseIOHandler):
-    _dataset_type = "halocatalog_hdf5"
-
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        raise NotImplementedError
-
-    def _read_particle_coords(self, chunks, ptf):
-        # This will read chunks and yield the results.
-        chunks = list(chunks)
-        data_files = set([])
-        # Only support halo reading for now.
-        assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
-            pcount = data_file.header['num_halos']
-            with h5py.File(data_file.filename, "r") as f:
-                x = f['particle_position_x'].value.astype("float64")
-                y = f['particle_position_y'].value.astype("float64")
-                z = f['particle_position_z'].value.astype("float64")
-                yield "halos", (x, y, z)
-
-    def _read_particle_fields(self, chunks, ptf, selector):
-        # Now we have all the sizes, and we can allocate
-        chunks = list(chunks)
-        data_files = set([])
-        # Only support halo reading for now.
-        assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
-            pcount = data_file.header['num_halos']
-            with h5py.File(data_file.filename, "r") as f:
-                for ptype, field_list in sorted(ptf.items()):
-                    x = f['particle_position_x'].value.astype("float64")
-                    y = f['particle_position_y'].value.astype("float64")
-                    z = f['particle_position_z'].value.astype("float64")
-                    mask = selector.select_points(x, y, z, 0.0)
-                    del x, y, z
-                    if mask is None: continue
-                    for field in field_list:
-                        data = f[field][mask].astype("float64")
-                        yield (ptype, field), data
-
-    def _initialize_index(self, data_file, regions):
-        pcount = data_file.header["num_halos"]
-        morton = np.empty(pcount, dtype='uint64')
-        mylog.debug("Initializing index % 5i (% 7i particles)",
-                    data_file.file_id, pcount)
-        ind = 0
-        with h5py.File(data_file.filename, "r") as f:
-            if not f.keys(): return None
-            pos = np.empty((pcount, 3), dtype="float64")
-            pos = data_file.ds.arr(pos, "code_length")
-            dx = np.finfo(f['particle_position_x'].dtype).eps
-            dx = 2.0*self.ds.quan(dx, "code_length")
-            pos[:,0] = f["particle_position_x"].value
-            pos[:,1] = f["particle_position_y"].value
-            pos[:,2] = f["particle_position_z"].value
-            # These are 32 bit numbers, so we give a little lee-way.
-            # Otherwise, for big sets of particles, we often will bump into the
-            # domain edges.  This helps alleviate that.
-            np.clip(pos, self.ds.domain_left_edge + dx,
-                         self.ds.domain_right_edge - dx, pos)
-            if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
-               np.any(pos.max(axis=0) > self.ds.domain_right_edge):
-                raise YTDomainOverflow(pos.min(axis=0),
-                                       pos.max(axis=0),
-                                       self.ds.domain_left_edge,
-                                       self.ds.domain_right_edge)
-            regions.add_data_file(pos, data_file.file_id)
-            morton[ind:ind+pos.shape[0]] = compute_morton(
-                pos[:,0], pos[:,1], pos[:,2],
-                data_file.ds.domain_left_edge,
-                data_file.ds.domain_right_edge)
-        return morton
-
-    def _count_particles(self, data_file):
-        return {'halos': data_file.header['num_halos']}
-
-    def _identify_fields(self, data_file):
-        with h5py.File(data_file.filename, "r") as f:
-            fields = [("halos", field) for field in f]
-            units = dict([(("halos", field), 
-                           f[field].attrs["units"]) for field in f])
-        return fields, units

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/owls_subfind/__init__.py
--- a/yt/frontends/halo_catalogs/owls_subfind/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-API for HaloCatalog frontend.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 282687f8a6425319429593260eff69a5170276c9 -r 49a0dc20e66151998a389f5a5218d7f9abc66510 yt/frontends/halo_catalogs/owls_subfind/api.py
--- a/yt/frontends/halo_catalogs/owls_subfind/api.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""
-API for OWLSSubfind frontend
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-     OWLSSubfindDataset
-
-from .io import \
-     IOHandlerOWLSSubfindHDF5
-
-from .fields import \
-     OWLSSubfindFieldInfo

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/996bfd765d31/
Changeset:   996bfd765d31
Branch:      yt
User:        jzuhone
Date:        2014-11-07 21:53:46+00:00
Summary:     Cleaning this up and reducing memory
Affected #:  1 file

diff -r 49a0dc20e66151998a389f5a5218d7f9abc66510 -r 996bfd765d31378cfe5435ea2d59be67d2d4a290 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -81,24 +81,13 @@
         start_c = comm.rank*num_cells/comm.size
         end_c = (comm.rank+1)*num_cells/comm.size
         
-        kT = (kboltz*data_source["temperature"][start_c:end_c]).in_units("keV").to_ndarray()
-        vol = data_source["cell_volume"][start_c:end_c].in_cgs().to_ndarray()
-        EM = (data_source["density"][start_c:end_c]/mp).to_ndarray()**2
+        kT = data_source["kT"][start_c:end_c].v
+        vol = data_source["cell_volume"][start_c:end_c].in_cgs().v
+        EM = (data_source["density"][start_c:end_c]/mp).v**2
         EM *= 0.5*(1.+self.X_H)*self.X_H*vol
 
         data_source.clear_data()
-    
-        x = data_source["x"][start_c:end_c].copy()
-        y = data_source["y"][start_c:end_c].copy()
-        z = data_source["z"][start_c:end_c].copy()
-        dx = data_source["dx"][start_c:end_c].copy()
 
-        data_source.clear_data()
-        
-        vx = data_source["velocity_x"][start_c:end_c].copy()
-        vy = data_source["velocity_y"][start_c:end_c].copy()
-        vz = data_source["velocity_z"][start_c:end_c].copy()
-    
         if isinstance(self.Zmet, basestring):
             metalZ = data_source[self.Zmet][start_c:end_c].to_ndarray()
         else:
@@ -189,7 +178,20 @@
         photons = {}
 
         src_ctr = parameters["center"]
-        
+
+        x = data_source["x"][start_c:end_c][idxs]
+        y = data_source["y"][start_c:end_c][idxs]
+        z = data_source["z"][start_c:end_c][idxs]
+        dx = data_source["dx"][start_c:end_c][idxs]
+
+        data_source.clear_data()
+
+        vx = data_source["velocity_x"][start_c:end_c][idxs]
+        vy = data_source["velocity_y"][start_c:end_c][idxs]
+        vz = data_source["velocity_z"][start_c:end_c][idxs]
+
+        data_source.clear_data()
+
         photons["x"] = (x[idxs]-src_ctr[0]).in_units("kpc")
         photons["y"] = (y[idxs]-src_ctr[1]).in_units("kpc")
         photons["z"] = (z[idxs]-src_ctr[2]).in_units("kpc")


https://bitbucket.org/yt_analysis/yt/commits/f975eb820793/
Changeset:   f975eb820793
Branch:      yt
User:        jzuhone
Date:        2014-11-10 01:23:39+00:00
Summary:     Chunking for photon_simulator
Affected #:  1 file

diff -r 996bfd765d31378cfe5435ea2d59be67d2d4a290 -r f975eb820793f183c895a3ce5bd420e1180e1017 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -26,12 +26,11 @@
 from yt.funcs import *
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system
-from yt import units
+     communication_system, parallel_objects
 
-N_TBIN = 10000
-TMIN = 8.08e-2
-TMAX = 50.
+n_kT = 10000
+kT_min = 8.08e-2
+kT_max = 50.
 
 comm = communication_system.communicators[-1]
 
@@ -77,129 +76,125 @@
                 
         vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
 
-        num_cells = data_source["temperature"].shape[0]
-        start_c = comm.rank*num_cells/comm.size
-        end_c = (comm.rank+1)*num_cells/comm.size
-        
-        kT = data_source["kT"][start_c:end_c].v
-        vol = data_source["cell_volume"][start_c:end_c].in_cgs().v
-        EM = (data_source["density"][start_c:end_c]/mp).v**2
-        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+        my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
 
-        data_source.clear_data()
-
-        if isinstance(self.Zmet, basestring):
-            metalZ = data_source[self.Zmet][start_c:end_c].to_ndarray()
-        else:
-            metalZ = self.Zmet*np.ones(EM.shape)
-        
-        data_source.clear_data()
-
-        idxs = np.argsort(kT)
-        dshape = idxs.shape
-
-        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
-        dkT = kT_bins[1]-kT_bins[0]
-        kT_idxs = np.digitize(kT[idxs], kT_bins)
-        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
-        bcounts = np.bincount(kT_idxs).astype("int")
-        bcounts = bcounts[bcounts > 0]
-        n = int(0)
-        bcell = []
-        ecell = []
-        for bcount in bcounts:
-            bcell.append(n)
-            ecell.append(n+bcount)
-            n += bcount
-        kT_idxs = np.unique(kT_idxs)
-        
         self.spectral_model.prepare()
         energy = self.spectral_model.ebins
-    
-        cell_em = EM[idxs]*vol_scale
-    
-        number_of_photons = np.zeros(dshape, dtype='uint64')
-        energies = []
-    
-        u = np.random.random(cell_em.shape)
+
+        citer = self.data_source.chunks(["kT","cell_volume","density"], "io")
+        num_chunks = len(citer)
+        pbar = get_pbar("Generating Photons", num_chunks)
+
+        ck = 0
+
+        photons = {}
+        photons["x"] = []
+        photons["y"] = []
+        photons["z"] = []
+        photons["vx"] = []
+        photons["vy"] = []
+        photons["vz"] = []
+        photons["dx"] = []
+        photons["Energy"] = []
+        photons["NumberOfPhotons"] = []
+
+        for chunk in parallel_objects(citer):
+
+            kT = chunk["kT"].v
+            vol = chunk["cell_volume"].in_cgs().v
+            EM = (chunk["density"]/mp).v**2
+            EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+
+            if isinstance(self.Zmet, basestring):
+                metalZ = chunk[self.Zmet].v
+            else:
+                metalZ = self.Zmet*chunk["ones"]
+
+            idxs = np.argsort(kT)
+            dshape = idxs.shape
+
+            kT_bins = np.linspace(kT_min, max(my_kT_max, kT_max), num=n_kT+1)
+            dkT = kT_bins[1]-kT_bins[0]
+            kT_idxs = np.digitize(kT[idxs], kT_bins)
+            kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1
+            bcounts = np.bincount(kT_idxs).astype("int")
+            bcounts = bcounts[bcounts > 0]
+            n = int(0)
+            bcell = []
+            ecell = []
+            for bcount in bcounts:
+                bcell.append(n)
+                ecell.append(n+bcount)
+                n += bcount
+            kT_idxs = np.unique(kT_idxs)
+
+            cell_em = EM[idxs]*vol_scale
+
+            u = np.random.random(cell_em.shape)
+
+            for i, ikT in enumerate(kT_idxs):
+
+                ibegin = bcell[i]
+                iend = ecell[i]
+                kT = kT_bins[ikT] + 0.5*dkT
         
-        pbar = get_pbar("Generating Photons", dshape[0])
+                em_sum_c = cell_em[ibegin:iend].sum()
+                em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
 
-        for i, ikT in enumerate(kT_idxs):
+                cspec, mspec = self.spectral_model.get_spectrum(kT)
+                cspec *= dist_fac*em_sum_c/vol_scale
+                mspec *= dist_fac*em_sum_m/vol_scale
 
-            ibegin = bcell[i]
-            iend = ecell[i]
-            kT = kT_bins[ikT] + 0.5*dkT
-        
-            em_sum_c = cell_em[ibegin:iend].sum()
-            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+                cumspec_c = np.cumsum(cspec.ndarray_view())
+                counts_c = cumspec_c[:]/cumspec_c[-1]
+                counts_c = np.insert(counts_c, 0, 0.0)
+                tot_ph_c = cumspec_c[-1]*area.value*exp_time.value
 
-            cspec, mspec = self.spectral_model.get_spectrum(kT)
-            cspec *= dist_fac*em_sum_c/vol_scale
-            mspec *= dist_fac*em_sum_m/vol_scale
+                cumspec_m = np.cumsum(mspec.ndarray_view())
+                counts_m = cumspec_m[:]/cumspec_m[-1]
+                counts_m = np.insert(counts_m, 0, 0.0)
+                tot_ph_m = cumspec_m[-1]*area.value*exp_time.value
 
-            cumspec_c = np.cumsum(cspec.ndarray_view())
-            counts_c = cumspec_c[:]/cumspec_c[-1]
-            counts_c = np.insert(counts_c, 0, 0.0)
-            tot_ph_c = cumspec_c[-1]*area.value*exp_time.value
+                for icell in xrange(ibegin, iend):
+            
+                    cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+                    cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+            
+                    cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+                    cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+            
+                    cell_n = cell_n_c + cell_n_m
 
-            cumspec_m = np.cumsum(mspec.ndarray_view())
-            counts_m = cumspec_m[:]/cumspec_m[-1]
-            counts_m = np.insert(counts_m, 0, 0.0)
-            tot_ph_m = cumspec_m[-1]*area.value*exp_time.value
+                    if cell_n > 0:
+                        randvec_c = np.random.uniform(size=cell_n_c)
+                        randvec_c.sort()
+                        randvec_m = np.random.uniform(size=cell_n_m)
+                        randvec_m.sort()
+                        cell_e_c = np.interp(randvec_c, counts_c, energy)
+                        cell_e_m = np.interp(randvec_m, counts_m, energy)
+                        photons["x"].append(chunk["x"][icell])
+                        photons["y"].append(chunk["y"][icell])
+                        photons["z"].append(chunk["z"][icell])
+                        photons["vx"].append(chunk["velocity_x"][icell])
+                        photons["vy"].append(chunk["velocity_y"][icell])
+                        photons["vz"].append(chunk["velocity_z"][icell])
+                        photons["NumberOfPhotons"].append(cell_n)
+                        photons["Energy"].append(np.concatenate([cell_e_c,cell_e_m]))
 
-            for icell in xrange(ibegin, iend):
-            
-                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
-            
-                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
-            
-                cell_n = cell_n_c + cell_n_m
-
-                if cell_n > 0:
-                    number_of_photons[icell] = cell_n
-                    randvec_c = np.random.uniform(size=cell_n_c)
-                    randvec_c.sort()
-                    randvec_m = np.random.uniform(size=cell_n_m)
-                    randvec_m.sort()
-                    cell_e_c = np.interp(randvec_c, counts_c, energy)
-                    cell_e_m = np.interp(randvec_m, counts_m, energy)
-                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
-                
-                pbar.update(icell)
+            ck += 1
+            pbar.update(ck)
 
         pbar.finish()
-            
-        active_cells = number_of_photons > 0
-        idxs = idxs[active_cells]
-        
-        photons = {}
 
         src_ctr = parameters["center"]
 
-        x = data_source["x"][start_c:end_c][idxs]
-        y = data_source["y"][start_c:end_c][idxs]
-        z = data_source["z"][start_c:end_c][idxs]
-        dx = data_source["dx"][start_c:end_c][idxs]
-
-        data_source.clear_data()
-
-        vx = data_source["velocity_x"][start_c:end_c][idxs]
-        vy = data_source["velocity_y"][start_c:end_c][idxs]
-        vz = data_source["velocity_z"][start_c:end_c][idxs]
-
-        data_source.clear_data()
-
-        photons["x"] = (x[idxs]-src_ctr[0]).in_units("kpc")
-        photons["y"] = (y[idxs]-src_ctr[1]).in_units("kpc")
-        photons["z"] = (z[idxs]-src_ctr[2]).in_units("kpc")
-        photons["vx"] = vx[idxs].in_units("km/s")
-        photons["vy"] = vy[idxs].in_units("km/s")
-        photons["vz"] = vz[idxs].in_units("km/s")
-        photons["dx"] = dx[idxs].in_units("kpc")
-        photons["NumberOfPhotons"] = number_of_photons[active_cells]
-        photons["Energy"] = np.concatenate(energies)*units.keV
+        photons["x"] = (ds.arr(photons["x"])-src_ctr[0]).in_units("kpc")
+        photons["y"] = (ds.arr(photons["y"])-src_ctr[1]).in_units("kpc")
+        photons["z"] = (ds.arr(photons["z"])-src_ctr[2]).in_units("kpc")
+        photons["vx"] = ds.arr(photons["vx"]).in_units("km/s")
+        photons["vy"] = ds.arr(photons["vy"]).in_units("km/s")
+        photons["vz"] = ds.arr(photons["vz"]).in_units("km/s")
+        photons["dx"] = ds.arr(photons["dx"]).in_units("kpc")
+        photons["Energy"] = ds.arr(np.concatenate(photons["Energy"]), "keV")
     
         return photons


https://bitbucket.org/yt_analysis/yt/commits/7ab05cb10f14/
Changeset:   7ab05cb10f14
Branch:      yt
User:        jzuhone
Date:        2014-11-10 01:52:51+00:00
Summary:     Bug fixes
Affected #:  2 files

diff -r f975eb820793f183c895a3ce5bd420e1180e1017 -r 7ab05cb10f14955f9e6c7ef2938f1bdb9a6388b9 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -81,11 +81,9 @@
         self.spectral_model.prepare()
         energy = self.spectral_model.ebins
 
-        citer = self.data_source.chunks(["kT","cell_volume","density"], "io")
-        num_chunks = len(citer)
-        pbar = get_pbar("Generating Photons", num_chunks)
-
-        ck = 0
+        citer = data_source.chunks(["kT","cell_volume","density",
+                                    "x","y","z","dx","velocity_x",
+                                    "velocity_y","velocity_z"], "io")
 
         photons = {}
         photons["x"] = []
@@ -132,6 +130,8 @@
 
             u = np.random.random(cell_em.shape)
 
+            pbar = get_pbar("Generating Photons", n_kT)
+
             for i, ikT in enumerate(kT_idxs):
 
                 ibegin = bcell[i]
@@ -175,16 +175,17 @@
                         photons["x"].append(chunk["x"][icell])
                         photons["y"].append(chunk["y"][icell])
                         photons["z"].append(chunk["z"][icell])
+                        photons["dx"].append(chunk["dx"][icell])
                         photons["vx"].append(chunk["velocity_x"][icell])
                         photons["vy"].append(chunk["velocity_y"][icell])
                         photons["vz"].append(chunk["velocity_z"][icell])
                         photons["NumberOfPhotons"].append(cell_n)
                         photons["Energy"].append(np.concatenate([cell_e_c,cell_e_m]))
 
-            ck += 1
-            pbar.update(ck)
+            
+                pbar.update(i)
 
-        pbar.finish()
+            pbar.finish()
 
         src_ctr = parameters["center"]
 

diff -r f975eb820793f183c895a3ce5bd420e1180e1017 -r 7ab05cb10f14955f9e6c7ef2938f1bdb9a6388b9 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -296,12 +296,12 @@
         dimension = 0
         width = 0.0
         for i, ax in enumerate("xyz"):
-            pos = data_source[ax]
-            delta = data_source["d%s"%(ax)]
-            le = np.min(pos-0.5*delta)
-            re = np.max(pos+0.5*delta)
+            le, re = data_source.quantities.extrema(ax)
+            delta_min, delta_max = data_source.quantities.extrema("d%s"%ax)
+            le -= 0.5*delta_max
+            re += 0.5*delta_max
             width = max(width, re-parameters["center"][i], parameters["center"][i]-le)
-            dimension = max(dimension, int(width/delta.min()))
+            dimension = max(dimension, int(width/delta_min))
         parameters["Dimension"] = 2*dimension
         parameters["Width"] = 2.*width.in_units("kpc")
                 
@@ -332,15 +332,15 @@
                 num_photons = sum(sizes_p)        
                 disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))]
                 disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))]
-                x = np.zeros((num_cells))
-                y = np.zeros((num_cells))
-                z = np.zeros((num_cells))
-                vx = np.zeros((num_cells))
-                vy = np.zeros((num_cells))
-                vz = np.zeros((num_cells))
-                dx = np.zeros((num_cells))
-                n_ph = np.zeros((num_cells), dtype="uint64")
-                e = np.zeros((num_photons))
+                x = np.zeros(num_cells)
+                y = np.zeros(num_cells)
+                z = np.zeros(num_cells)
+                vx = np.zeros(num_cells)
+                vy = np.zeros(num_cells)
+                vz = np.zeros(num_cells)
+                dx = np.zeros(num_cells)
+                n_ph = np.zeros(num_cells, dtype="uint64")
+                e = np.zeros(num_photons)
             else:
                 sizes_c = []
                 sizes_p = []
@@ -377,15 +377,15 @@
 
         else:
 
-            x = self.photons["x"].ndarray_view()
-            y = self.photons["y"].ndarray_view()
-            z = self.photons["z"].ndarray_view()
-            vx = self.photons["vx"].ndarray_view()
-            vy = self.photons["vy"].ndarray_view()
-            vz = self.photons["vz"].ndarray_view()
-            dx = self.photons["dx"].ndarray_view()
+            x = self.photons["x"].d
+            y = self.photons["y"].d
+            z = self.photons["z"].d
+            vx = self.photons["vx"].d
+            vy = self.photons["vy"].d
+            vz = self.photons["vz"].d
+            dx = self.photons["dx"].d
             n_ph = self.photons["NumberOfPhotons"]
-            e = self.photons["Energy"].ndarray_view()
+            e = self.photons["Energy"].d
                                                 
         if comm.rank == 0:
             
@@ -472,7 +472,7 @@
         else:
             sky_center = YTArray(sky_center, "degree")
 
-        dx = self.photons["dx"].ndarray_view()
+        dx = self.photons["dx"].d
         nx = self.parameters["Dimension"]
         if psf_sigma is not None:
              psf_sigma = parse_value(psf_sigma, "degree")


https://bitbucket.org/yt_analysis/yt/commits/97b7e97d7da2/
Changeset:   97b7e97d7da2
Branch:      yt
User:        jzuhone
Date:        2014-11-10 02:20:45+00:00
Summary:     Bug fixes and touch-ups
Affected #:  3 files

diff -r 7ab05cb10f14955f9e6c7ef2938f1bdb9a6388b9 -r 97b7e97d7da220a765b8417b8eb9c9c4248c42b2 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -99,6 +99,8 @@
         for chunk in parallel_objects(citer):
 
             kT = chunk["kT"].v
+            if len(kT) == 0:
+                continue
             vol = chunk["cell_volume"].in_cgs().v
             EM = (chunk["density"]/mp).v**2
             EM *= 0.5*(1.+self.X_H)*self.X_H*vol

diff -r 7ab05cb10f14955f9e6c7ef2938f1bdb9a6388b9 -r 97b7e97d7da220a765b8417b8eb9c9c4248c42b2 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -28,8 +28,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      parallel_capable
-from yt import units
-from yt.units.yt_array import YTQuantity
+from yt.units.yt_array import YTQuantity, YTArray
 import h5py
 from yt.utilities.on_demand_imports import _astropy
 pyfits = _astropy.pyfits
@@ -93,12 +92,12 @@
         
         f = h5py.File(filename, "r")
 
-        parameters["FiducialExposureTime"] = f["/fid_exp_time"].value*units.s
-        parameters["FiducialArea"] = f["/fid_area"].value*units.cm*units.cm
+        parameters["FiducialExposureTime"] = YTQuantity(f["/fid_exp_time"].value, "s")
+        parameters["FiducialArea"] = YTQuantity(f["/fid_area"].value, "cm**2")
         parameters["FiducialRedshift"] = f["/fid_redshift"].value
-        parameters["FiducialAngularDiameterDistance"] = f["/fid_d_a"].value*units.Mpc
+        parameters["FiducialAngularDiameterDistance"] = YTQuantity(f["/fid_d_a"].value, "Mpc")
         parameters["Dimension"] = f["/dimension"].value
-        parameters["Width"] = f["/width"].value*units.kpc
+        parameters["Width"] = YTQuantity(f["/width"].value, "kpc")
         parameters["HubbleConstant"] = f["/hubble"].value
         parameters["OmegaMatter"] = f["/omega_matter"].value
         parameters["OmegaLambda"] = f["/omega_lambda"].value
@@ -107,13 +106,13 @@
         start_c = comm.rank*num_cells/comm.size
         end_c = (comm.rank+1)*num_cells/comm.size
         
-        photons["x"] = f["/x"][start_c:end_c]*units.kpc
-        photons["y"] = f["/y"][start_c:end_c]*units.kpc
-        photons["z"] = f["/z"][start_c:end_c]*units.kpc
-        photons["dx"] = f["/dx"][start_c:end_c]*units.kpc
-        photons["vx"] = f["/vx"][start_c:end_c]*units.km/units.s
-        photons["vy"] = f["/vy"][start_c:end_c]*units.km/units.s
-        photons["vz"] = f["/vz"][start_c:end_c]*units.km/units.s
+        photons["x"] = YTArray(f["/x"][start_c:end_c], "kpc")
+        photons["y"] = YTArray(f["/y"][start_c:end_c], "kpc")
+        photons["z"] = YTArray(f["/z"][start_c:end_c], "kpc")
+        photons["dx"] = YTArray(f["/dx"][start_c:end_c], "kpc")
+        photons["vx"] = YTArray(f["/vx"][start_c:end_c], "km/s")
+        photons["vy"] = YTArray(f["/vy"][start_c:end_c], "km/s")
+        photons["vz"] = YTArray(f["/vz"][start_c:end_c], "km/s")
 
         n_ph = f["/num_photons"][:]
         
@@ -128,7 +127,7 @@
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
         
-        photons["Energy"] = f["/energy"][start_e:end_e]*units.keV
+        photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
         
         f.close()
 
@@ -611,7 +610,7 @@
         events = {}
 
         dx_min = self.parameters["Width"].value/self.parameters["Dimension"]
-        dtheta = np.rad2deg(dx_min/D_A.value)*units.degree
+        dtheta = YTQuantity(np.rad2deg(dx_min/D_A.value), "degree")
         
         events["xpix"] = xsky[detected]/dx_min + 0.5*(nx+1)
         events["ypix"] = ysky[detected]/dx_min + 0.5*(nx+1)
@@ -749,8 +748,8 @@
         self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
         self.wcs.wcs.cunit = ["deg"]*2                                                
         x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
-        self.events["xsky"] = x*units.degree
-        self.events["ysky"] = y*units.degree
+        self.events["xsky"] = YTArray(x, "degree")
+        self.events["ysky"] = YTArray(y, "degree")
 
     def keys(self):
         return self.events.keys()
@@ -780,10 +779,10 @@
         
         f = h5py.File(h5file, "r")
 
-        parameters["ExposureTime"] = f["/exp_time"].value*units.s
-        parameters["Area"] = f["/area"].value*units.cm*units.cm
+        parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
+        parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
         parameters["Redshift"] = f["/redshift"].value
-        parameters["AngularDiameterDistance"] = f["/d_a"].value*units.Mpc
+        parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
         if "rmf" in f:
             parameters["RMF"] = f["/rmf"].value
         if "arf" in f:
@@ -799,13 +798,13 @@
 
         events["xpix"] = f["/xpix"][:]
         events["ypix"] = f["/ypix"][:]
-        events["eobs"] = f["/eobs"][:]*units.keV
+        events["eobs"] = YTArray(f["/eobs"][:], "keV")
         if "pi" in f:
             events["PI"] = f["/pi"][:]
         if "pha" in f:
             events["PHA"] = f["/pha"][:]
-        parameters["sky_center"] = f["/sky_center"][:]*units.deg
-        parameters["dtheta"] = f["/dtheta"].value*units.deg
+        parameters["sky_center"] = YTArray(f["/sky_center"][:], "deg")
+        parameters["dtheta"] = YTQuantity(f["/dtheta"].value, "deg")
         parameters["pix_center"] = f["/pix_center"][:]
         
         f.close()
@@ -824,10 +823,10 @@
         events = {}
         parameters = {}
         
-        parameters["ExposureTime"] = tblhdu.header["EXPOSURE"]*units.s
-        parameters["Area"] = tblhdu.header["AREA"]*units.cm*units.cm
+        parameters["ExposureTime"] = YTQuantity(tblhdu.header["EXPOSURE"], "s")
+        parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2")
         parameters["Redshift"] = tblhdu.header["REDSHIFT"]
-        parameters["AngularDiameterDistance"] = tblhdu.header["D_A"]*units.Mpc
+        parameters["AngularDiameterDistance"] = YTQuantity(tblhdu.header["D_A"], "Mpc")
         if "RMF" in tblhdu.header:
             parameters["RMF"] = tblhdu["RMF"]
         if "ARF" in tblhdu.header:
@@ -840,12 +839,12 @@
             parameters["Telescope"] = tblhdu["TELESCOP"]
         if "INSTRUME" in tblhdu.header:
             parameters["Instrument"] = tblhdu["INSTRUME"]
-        parameters["sky_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]])*units.deg
+        parameters["sky_center"] = YTArray([tblhdu["TCRVL2"],tblhdu["TCRVL3"]], "deg")
         parameters["pix_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]])
-        parameters["dtheta"] = tblhdu["TCRVL3"]*units.deg
+        parameters["dtheta"] = YTQuantity(tblhdu["TCRVL3"], "deg")
         events["xpix"] = tblhdu.data.field("X")
         events["ypix"] = tblhdu.data.field("Y")
-        events["eobs"] = (tblhdu.data.field("ENERGY")/1000.)*units.keV # Convert to keV
+        events["eobs"] = YTArray(tblhdu.data.field("ENERGY")/1000., "keV")
         if "PI" in tblhdu.columns.names:
             events["PI"] = tblhdu.data.field("PI")
         if "PHA" in tblhdu.columns.names:

diff -r 7ab05cb10f14955f9e6c7ef2938f1bdb9a6388b9 -r 97b7e97d7da220a765b8417b8eb9c9c4248c42b2 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -14,7 +14,7 @@
 import numpy as np
 import os
 from yt.funcs import *
-from yt import units
+from yt.units.yt_array import YTQuantity
 import h5py
 
 try:
@@ -30,15 +30,14 @@
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
 hc = (hcgs*clight).in_units("keV*angstrom")
-cm3 = units.cm*units.cm*units.cm
 
 class SpectralModel(object):
 
     def __init__(self, emin, emax, nchan):
-        self.emin = emin*units.keV
-        self.emax = emax*units.keV
+        self.emin = YTQuantity(emin, "keV")
+        self.emax = YTQuantity(emax, "keV")
         self.nchan = nchan
-        self.ebins = np.linspace(emin, emax, nchan+1)*units.keV
+        self.ebins = np.linspace(emin, emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
@@ -100,7 +99,7 @@
             metal_spec = np.zeros((self.nchan))
         else:
             metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
-        return cosmic_spec*cm3/units.s, metal_spec*cm3/units.s
+        return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
         
 class XSpecAbsorbModel(SpectralModel):
     r"""
@@ -275,7 +274,7 @@
         mspec_r = np.zeros(self.nchan)
         tindex = np.searchsorted(self.Tvals, kT)-1
         if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
-            return cspec_l*cm3/units.s, mspec_l*cm3/units.s
+            return YTArray(cspec_l, "cm**3/s"), YTArray(mspec_l, "cm**3/s")
         dT = (kT-self.Tvals[tindex])/self.dTvals[tindex]
         # First do H,He, and trace elements
         for elem in self.cosmic_elem:
@@ -285,8 +284,8 @@
         for elem in self.metal_elem:
             mspec_l += self._make_spectrum(elem, tindex+2)
             mspec_r += self._make_spectrum(elem, tindex+3)
-        cosmic_spec = (cspec_l*(1.-dT)+cspec_r*dT)*cm3/units.s
-        metal_spec = (mspec_l*(1.-dT)+mspec_r*dT)*cm3/units.s
+        cosmic_spec = YTArray(cspec_l*(1.-dT)+cspec_r*dT, "cm**3/s")
+        metal_spec = YTArray(mspec_l*(1.-dT)+mspec_r*dT, "cm**3/s")
         return cosmic_spec, metal_spec
 
 class TableAbsorbModel(SpectralModel):
@@ -313,11 +312,11 @@
         f = h5py.File(self.filename,"r")
         emin = f["energy"][:].min()
         emax = f["energy"][:].max()
-        self.sigma = f["cross_section"][:]*units.cm*units.cm
+        self.sigma = YTArray(f["cross_section"][:], "cm**2")
         nchan = self.sigma.shape[0]
         f.close()
         SpectralModel.__init__(self, emin, emax, nchan)
-        self.nH = nH*1.0e22/(units.cm*units.cm)
+        self.nH = YTQuantity(nH*1.0e22, "cm**-2")
         
     def prepare(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/bb05d7afeb20/
Changeset:   bb05d7afeb20
Branch:      yt
User:        jzuhone
Date:        2014-11-10 06:09:24+00:00
Summary:     Virtual grids for Athena data
Affected #:  2 files

diff -r 9153a5a32dce19164cc66f8073a26d460aa74cbe -r bb05d7afeb204d585eb086980015c3e0cf6b9df4 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -31,6 +31,8 @@
 
 from .fields import AthenaFieldInfo
 from yt.units.yt_array import YTQuantity
+from yt.utilities.decompose import \
+    decompose_array
 
 def _get_convert(fname):
     def _conv(data):
@@ -39,7 +41,7 @@
 
 class AthenaGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, id, index, level, start, dimensions):
+    def __init__(self, id, index, level, start, dimensions, file_offset):
         df = index.dataset.filename[4:-4]
         gname = index.grid_filenames[id]
         AMRGridPatch.__init__(self, id, filename = gname,
@@ -51,6 +53,7 @@
         self.start_index = start.copy()
         self.stop_index = self.start_index + dimensions
         self.ActiveDimensions = dimensions.copy()
+        self.file_offset = file_offset
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -172,7 +175,7 @@
         self._field_map = field_map
 
     def _count_grids(self):
-        self.num_grids = self.dataset.nvtk
+        self.num_grids = self.dataset.nvtk*self.dataset.nprocs
 
     def _parse_index(self):
         f = open(self.index_filename,'rb')
@@ -220,7 +223,6 @@
         gridlistread = [fn for fn in gridlistread if os.path.basename(fn).count(".") == ndots]
         self.num_grids = len(gridlistread)
         dxs=[]
-        self.grids = np.empty(self.num_grids, dtype='object')
         levels = np.zeros(self.num_grids, dtype='int32')
         glis = np.empty((self.num_grids,3), dtype='float64')
         gdds = np.empty((self.num_grids,3), dtype='float64')
@@ -292,24 +294,65 @@
             self.dataset.domain_dimensions[2] = np.int(1)
         if self.dataset.dimensionality == 1 :
             self.dataset.domain_dimensions[1] = np.int(1)
-        for i in range(levels.shape[0]):
-            self.grids[i] = self.grid(i,self,levels[i],
-                                      glis[i],
-                                      gdims[i])
-            dx = (self.dataset.domain_right_edge-
-                  self.dataset.domain_left_edge)/self.dataset.domain_dimensions
-            dx = dx/self.dataset.refine_by**(levels[i])
-            dxs.append(dx)
 
-        dx = self.ds.arr(dxs, "code_length")
         dle = self.dataset.domain_left_edge
         dre = self.dataset.domain_right_edge
-        self.grid_left_edge = self.ds.arr(np.round(dle + dx*glis, decimals=12), "code_length")
-        self.grid_dimensions = gdims.astype("int32")
-        self.grid_right_edge = self.ds.arr(np.round(self.grid_left_edge +
-                                                    dx*self.grid_dimensions,
-                                                    decimals=12),
-                                            "code_length")
+        dx_root = (self.dataset.domain_right_edge-
+                   self.dataset.domain_left_edge)/self.dataset.domain_dimensions
+
+        if self.dataset.nprocs > 1:
+            float_size = np.dtype(">f4").itemsize
+            gle_all = []
+            gre_all = []
+            shapes_all = []
+            levels_all = []
+            new_gridfilenames = []
+            file_offsets = []
+            for i in range(levels.shape[0]):
+                dx = dx_root/self.dataset.refine_by**(levels[i])
+                gle_orig = self.ds.arr(np.round(dle + dx*glis[i], decimals=12),
+                                       "code_length")
+                gre_orig = self.ds.arr(np.round(gle_orig + dx*gdims[i], decimals=12),
+                                       "code_length")
+                bbox = np.array([[le,re] for le, re in zip(gle_orig, gre_orig)])
+                psize = np.array([1,1,self.ds.nprocs])
+                gle, gre, shapes, slices = decompose_array(gdims[i], psize, bbox)
+                gle_all += gle
+                gre_all += gre
+                shapes_all += shapes
+                levels_all += [levels[i]]*self.dataset.nprocs
+                new_gridfilenames += [self.grid_filenames[i]]*self.dataset.nprocs
+                file_offsets += [(slc[0].start + slc[1].start*shp[0]+slc[2].start*shp[0]*shp[1])*float_size
+                                 for slc, shp in zip(slices, shapes)]
+            self.num_grids *= self.dataset.nprocs
+            self.grids = np.empty(self.num_grids, dtype='object')
+            self.grid_filenames = new_gridfilenames
+            self.grid_left_edge = self.ds.arr(gle_all, "code_length")
+            self.grid_right_edge = self.ds.arr(gre_all, "code_length")
+            self.grid_dimensions = np.array([shape for shape in shapes_all],
+                                            dtype="int32")
+            gdds = (self.grid_right_edge-self.grid_left_edge)/self.grid_dimensions
+            glis = np.round((self.grid_left_edge - self.ds.domain_left_edge)/gdds).astype('int')
+            for i in range(self.num_grids):
+                self.grids[i] = self.grid(i,self,levels_all[i],
+                                          glis[i], shapes_all[i],
+                                          file_offsets[i])
+        else:
+            self.grids = np.empty(self.num_grids, dtype='object')
+            for i in range(levels.shape[0]):
+                self.grids[i] = self.grid(i,self,levels[i],
+                                          glis[i], gdims[i], 0)
+                dx = dx_root/self.dataset.refine_by**(levels[i])
+                dxs.append(dx)
+
+            dx = self.ds.arr(dxs, "code_length")
+            self.grid_left_edge = self.ds.arr(np.round(dle + dx*glis, decimals=12),
+                                              "code_length")
+            self.grid_dimensions = gdims.astype("int32")
+            self.grid_right_edge = self.ds.arr(np.round(self.grid_left_edge +
+                                                        dx*self.grid_dimensions,
+                                                        decimals=12),
+                                               "code_length")
         
         if self.dataset.dimensionality <= 2:
             self.grid_right_edge[:,2] = dre[2]
@@ -354,8 +397,9 @@
 
     def __init__(self, filename, dataset_type='athena',
                  storage_filename=None, parameters=None,
-                 units_override=None):
+                 units_override=None, nprocs=1):
         self.fluid_types += ("athena",)
+        self.nprocs = nprocs
         if parameters is None:
             parameters = {}
         self.specified_parameters = parameters

diff -r 9153a5a32dce19164cc66f8073a26d460aa74cbe -r bb05d7afeb204d585eb086980015c3e0cf6b9df4 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -35,7 +35,6 @@
         data = {}
         grids_by_file = defaultdict(list)
         if len(chunk.objs) == 0: return data
-        field_list = set(f[1] for f in fields)
         for grid in chunk.objs:
             if grid.filename is None:
                 continue
@@ -45,13 +44,13 @@
             grid_dims = grid.ActiveDimensions
             grid0_ncells = np.prod(grid.index.grid_dimensions[0,:])
             read_table_offset = get_read_table_offset(f)
-            for field in self.ds.field_list:
+            for field in fields:
                 dtype, offsetr = grid.index._field_map[field]
                 if grid_ncells != grid0_ncells:
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
                 if grid_ncells == grid0_ncells:
                     offset = offsetr
-                f.seek(read_table_offset+offset)
+                f.seek(read_table_offset+offset+grid.file_offset)
                 if dtype == 'scalar':
                     v = np.fromfile(f, dtype='>f4',
                                     count=grid_ncells).reshape(grid_dims,order='F')


https://bitbucket.org/yt_analysis/yt/commits/177b386b405a/
Changeset:   177b386b405a
Branch:      yt
User:        jzuhone
Date:        2014-11-10 16:43:18+00:00
Summary:     Bug fixes and docs for virtual grids
Affected #:  4 files

diff -r bb05d7afeb204d585eb086980015c3e0cf6b9df4 -r 177b386b405a3a2c7f7ff417da560f5837414bb6 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -131,6 +131,23 @@
 ``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
 in code units.
 
+Some 3D Athena outputs may have large grids (especially parallel datasets subsequently joined with
+the `join_vtk` script, and may benefit from being subdivided into "virtual grids". For this purpose,
+one can pass in the `nprocs` parameter:
+
+.. code-block:: python
+
+   import yt
+
+   ds = yt.load("sloshing.0000.vtk", nprocs=8)
+
+which will subdivide each original grid into `nprocs` grids along the z-axis.
+
+.. note::
+
+    Virtual grids are only supported for 3D data, and each original grid's dimensions along the z-axis must
+    be an integer multiple of `nprocs`.
+
 Alternative values for the following simulation parameters may be specified using a ``parameters``
 dict, accepting the following keys:
 

diff -r bb05d7afeb204d585eb086980015c3e0cf6b9df4 -r 177b386b405a3a2c7f7ff417da560f5837414bb6 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -309,6 +309,9 @@
             new_gridfilenames = []
             file_offsets = []
             for i in range(levels.shape[0]):
+                if gdims[i][2] % self.ds.nprocs != 0:
+                    raise RuntimeError("Grid %04d cannot be split into virtual grids " % i +
+                                       "since it is not an integer multiple of nprocs!")
                 dx = dx_root/self.dataset.refine_by**(levels[i])
                 gle_orig = self.ds.arr(np.round(dle + dx*glis[i], decimals=12),
                                        "code_length")
@@ -322,7 +325,7 @@
                 shapes_all += shapes
                 levels_all += [levels[i]]*self.dataset.nprocs
                 new_gridfilenames += [self.grid_filenames[i]]*self.dataset.nprocs
-                file_offsets += [(slc[0].start + slc[1].start*shp[0]+slc[2].start*shp[0]*shp[1])*float_size
+                file_offsets += [[slc[0], slc[1], slc[2].start*shp[0]*shp[1]*float_size]
                                  for slc, shp in zip(slices, shapes)]
             self.num_grids *= self.dataset.nprocs
             self.grids = np.empty(self.num_grids, dtype='object')
@@ -341,7 +344,7 @@
             self.grids = np.empty(self.num_grids, dtype='object')
             for i in range(levels.shape[0]):
                 self.grids[i] = self.grid(i,self,levels[i],
-                                          glis[i], gdims[i], 0)
+                                          glis[i], gdims[i], [0]*3)
                 dx = dx_root/self.dataset.refine_by**(levels[i])
                 dxs.append(dx)
 
@@ -479,6 +482,8 @@
             dimensionality = 1
         if dimensionality <= 2 : self.domain_dimensions[2] = np.int32(1)
         if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
+        if dimensionality != 3 and self.nprocs > 1:
+            raise RuntimeError("Virtual grids are only supported for 3D outputs!")
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self.parameter_filename.__hash__()

diff -r bb05d7afeb204d585eb086980015c3e0cf6b9df4 -r 177b386b405a3a2c7f7ff417da560f5837414bb6 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -50,7 +50,7 @@
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
                 if grid_ncells == grid0_ncells:
                     offset = offsetr
-                f.seek(read_table_offset+offset+grid.file_offset)
+                f.seek(read_table_offset+offset+grid.file_offset[2])
                 if dtype == 'scalar':
                     v = np.fromfile(f, dtype='>f4',
                                     count=grid_ncells).reshape(grid_dims,order='F')

diff -r bb05d7afeb204d585eb086980015c3e0cf6b9df4 -r 177b386b405a3a2c7f7ff417da560f5837414bb6 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -20,6 +20,8 @@
     big_patch_amr, \
     data_dir_load
 from yt.frontends.athena.api import AthenaDataset
+from yt.config import ytcfg
+from yt.convenience import load
 
 _fields_cloud = ("scalar[0]", "density", "total_energy")
 
@@ -58,6 +60,27 @@
         test_stripping.__name__ = test.description
         yield test
 
+sloshing = "MHDSloshing/virgo_low_res.0054.vtk"
+
+uo_sloshing = {"length_unit":(1.0,"Mpc"),
+               "time_unit":(1.0,"Myr"),
+               "mass_unit":(1.0e14,"Msun")}
+
+ at requires_file(sloshing)
+def test_nprocs():
+    ytcfg["yt","skip_dataset_cache"] = "True"
+
+    ds1 = load(sloshing, units_override=uo_sloshing)
+    sp1 = ds1.sphere("c", (100.,"kpc"))
+    prj1 = ds1.proj("density",0)
+    ds2 = load(sloshing, units_override=uo_sloshing, nprocs=8)
+    sp2 = ds2.sphere("c", (100.,"kpc"))
+    prj2 = ds1.proj("density",0)
+
+    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
+    yield assert_equal, prj1["density"], prj2["density"]
+
+    ytcfg["yt","skip_dataset_cache"] = "False"
 
 @requires_file(cloud)
 def test_AthenaDataset():


https://bitbucket.org/yt_analysis/yt/commits/31e4c2c515df/
Changeset:   31e4c2c515df
Branch:      yt
User:        jzuhone
Date:        2014-11-10 17:45:17+00:00
Summary:     More refinements to speed things up and reduce memory usage
Affected #:  1 file

diff -r 97b7e97d7da220a765b8417b8eb9c9c4248c42b2 -r 31e4c2c515df150456dd13de34cce935d870aa90 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -73,7 +73,8 @@
         redshift = parameters["FiducialRedshift"]
         D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
         dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**3)
-                
+        src_ctr = parameters["center"]
+
         vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
 
         my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
@@ -99,7 +100,8 @@
         for chunk in parallel_objects(citer):
 
             kT = chunk["kT"].v
-            if len(kT) == 0:
+            num_cells = len(kT)
+            if num_cells == 0:
                 continue
             vol = chunk["cell_volume"].in_cgs().v
             EM = (chunk["density"]/mp).v**2
@@ -111,7 +113,6 @@
                 metalZ = self.Zmet*chunk["ones"]
 
             idxs = np.argsort(kT)
-            dshape = idxs.shape
 
             kT_bins = np.linspace(kT_min, max(my_kT_max, kT_max), num=n_kT+1)
             dkT = kT_bins[1]-kT_bins[0]
@@ -132,6 +133,9 @@
 
             u = np.random.random(cell_em.shape)
 
+            number_of_photons = np.zeros(num_cells)
+            energies = []
+
             pbar = get_pbar("Generating Photons", n_kT)
 
             for i, ikT in enumerate(kT_idxs):
@@ -158,7 +162,7 @@
                 tot_ph_m = cumspec_m[-1]*area.value*exp_time.value
 
                 for icell in xrange(ibegin, iend):
-            
+
                     cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
                     cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
             
@@ -168,36 +172,33 @@
                     cell_n = cell_n_c + cell_n_m
 
                     if cell_n > 0:
+                        number_of_photons[icell] = cell_n
                         randvec_c = np.random.uniform(size=cell_n_c)
                         randvec_c.sort()
                         randvec_m = np.random.uniform(size=cell_n_m)
                         randvec_m.sort()
                         cell_e_c = np.interp(randvec_c, counts_c, energy)
                         cell_e_m = np.interp(randvec_m, counts_m, energy)
-                        photons["x"].append(chunk["x"][icell])
-                        photons["y"].append(chunk["y"][icell])
-                        photons["z"].append(chunk["z"][icell])
-                        photons["dx"].append(chunk["dx"][icell])
-                        photons["vx"].append(chunk["velocity_x"][icell])
-                        photons["vy"].append(chunk["velocity_y"][icell])
-                        photons["vz"].append(chunk["velocity_z"][icell])
-                        photons["NumberOfPhotons"].append(cell_n)
-                        photons["Energy"].append(np.concatenate([cell_e_c,cell_e_m]))
-
+                        energies.append(np.concatenate([cell_e_c,cell_e_m]))
             
                 pbar.update(i)
 
             pbar.finish()
 
-        src_ctr = parameters["center"]
+            active_cells = number_of_photons > 0
+            idxs = idxs[active_cells]
 
-        photons["x"] = (ds.arr(photons["x"])-src_ctr[0]).in_units("kpc")
-        photons["y"] = (ds.arr(photons["y"])-src_ctr[1]).in_units("kpc")
-        photons["z"] = (ds.arr(photons["z"])-src_ctr[2]).in_units("kpc")
-        photons["vx"] = ds.arr(photons["vx"]).in_units("km/s")
-        photons["vy"] = ds.arr(photons["vy"]).in_units("km/s")
-        photons["vz"] = ds.arr(photons["vz"]).in_units("km/s")
-        photons["dx"] = ds.arr(photons["dx"]).in_units("kpc")
-        photons["Energy"] = ds.arr(np.concatenate(photons["Energy"]), "keV")
-    
+            photons["NumberOfPhotons"].append(number_of_photons[active_cells])
+            photons["Energy"].append(np.concatenate(energies))
+            photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
+            photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc"))
+            photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc"))
+            photons["vx"].append(chunk["velocity_x"][idxs].in_units("km/s"))
+            photons["vx"].append(chunk["velocity_y"][idxs].in_units("km/s"))
+            photons["vx"].append(chunk["velocity_z"][idxs].in_units("km/s"))
+            photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
+
+        for key in photons:
+            photons[key] = np.concatenate(photons[key])
+
         return photons


https://bitbucket.org/yt_analysis/yt/commits/8bba25eedce3/
Changeset:   8bba25eedce3
Branch:      yt
User:        jzuhone
Date:        2014-11-10 19:36:43+00:00
Summary:     More photon simulator memory reduction and speedup. Fixed a nasty memory leak in the Athena frontend.
Affected #:  2 files

diff -r 31e4c2c515df150456dd13de34cce935d870aa90 -r 8bba25eedce35dc5725c7dc1fd21c88bbf97e1cc yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -27,6 +27,7 @@
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_objects
+from IPython import embed
 
 n_kT = 10000
 kT_min = 8.08e-2
@@ -97,6 +98,8 @@
         photons["Energy"] = []
         photons["NumberOfPhotons"] = []
 
+        spectral_norm = area.v*exp_time.v*dist_fac/vol_scale
+
         for chunk in parallel_objects(citer):
 
             kT = chunk["kT"].v
@@ -110,7 +113,7 @@
             if isinstance(self.Zmet, basestring):
                 metalZ = chunk[self.Zmet].v
             else:
-                metalZ = self.Zmet*chunk["ones"]
+                metalZ = self.Zmet
 
             idxs = np.argsort(kT)
 
@@ -136,52 +139,56 @@
             number_of_photons = np.zeros(num_cells)
             energies = []
 
-            pbar = get_pbar("Generating Photons", n_kT)
+            pbar = get_pbar("Generating photons for chunk ", num_cells)
 
-            for i, ikT in enumerate(kT_idxs):
+            for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
 
-                ibegin = bcell[i]
-                iend = ecell[i]
                 kT = kT_bins[ikT] + 0.5*dkT
         
                 em_sum_c = cell_em[ibegin:iend].sum()
-                em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+                if isinstance(self.Zmet, basestring):
+                    em_sum_m = (metalZ*cell_em)[ibegin:iend].sum()
+                else:
+                    em_sum_m = metalZ*em_sum_c
 
                 cspec, mspec = self.spectral_model.get_spectrum(kT)
-                cspec *= dist_fac*em_sum_c/vol_scale
-                mspec *= dist_fac*em_sum_m/vol_scale
 
-                cumspec_c = np.cumsum(cspec.ndarray_view())
+                cumspec_c = np.cumsum(cspec.d)
                 counts_c = cumspec_c[:]/cumspec_c[-1]
                 counts_c = np.insert(counts_c, 0, 0.0)
-                tot_ph_c = cumspec_c[-1]*area.value*exp_time.value
+                tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
 
-                cumspec_m = np.cumsum(mspec.ndarray_view())
+                cumspec_m = np.cumsum(mspec.d)
                 counts_m = cumspec_m[:]/cumspec_m[-1]
                 counts_m = np.insert(counts_m, 0, 0.0)
-                tot_ph_m = cumspec_m[-1]*area.value*exp_time.value
+                tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
 
-                for icell in xrange(ibegin, iend):
+                v = u[ibegin:iend]
 
-                    cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
-                    cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+                cell_norm_c = tot_ph_c*cell_em[ibegin:iend]/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= v)
             
-                    cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
-                    cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+                if isinstance(self.Zmet, basestring):
+                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cell_em[ibegin:iend]/em_sum_m
+                else:
+                    cell_norm_m = tot_ph_m*metalZ*cell_em[ibegin:iend]/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= v)
             
-                    cell_n = cell_n_c + cell_n_m
+                cell_n = cell_n_c + cell_n_m
 
-                    if cell_n > 0:
-                        number_of_photons[icell] = cell_n
-                        randvec_c = np.random.uniform(size=cell_n_c)
+                number_of_photons[ibegin:iend] = cell_n
+
+                for cn, cn_c, cn_m in zip(cell_n, cell_n_c, cell_n_m):
+                    if cn > 0:
+                        randvec_c = np.random.uniform(size=cn_c)
                         randvec_c.sort()
-                        randvec_m = np.random.uniform(size=cell_n_m)
+                        randvec_m = np.random.uniform(size=cn_m)
                         randvec_m.sort()
                         cell_e_c = np.interp(randvec_c, counts_c, energy)
                         cell_e_m = np.interp(randvec_m, counts_m, energy)
                         energies.append(np.concatenate([cell_e_c,cell_e_m]))
             
-                pbar.update(i)
+                pbar.update(iend)
 
             pbar.finish()
 
@@ -189,13 +196,13 @@
             idxs = idxs[active_cells]
 
             photons["NumberOfPhotons"].append(number_of_photons[active_cells])
-            photons["Energy"].append(np.concatenate(energies))
+            photons["Energy"].append(ds.arr(np.concatenate(energies), "keV"))
             photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
             photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc"))
             photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc"))
             photons["vx"].append(chunk["velocity_x"][idxs].in_units("km/s"))
-            photons["vx"].append(chunk["velocity_y"][idxs].in_units("km/s"))
-            photons["vx"].append(chunk["velocity_z"][idxs].in_units("km/s"))
+            photons["vy"].append(chunk["velocity_y"][idxs].in_units("km/s"))
+            photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
             photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
 
         for key in photons:

diff -r 31e4c2c515df150456dd13de34cce935d870aa90 -r 8bba25eedce35dc5725c7dc1fd21c88bbf97e1cc yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -35,7 +35,6 @@
         data = {}
         grids_by_file = defaultdict(list)
         if len(chunk.objs) == 0: return data
-        field_list = set(f[1] for f in fields)
         for grid in chunk.objs:
             if grid.filename is None:
                 continue
@@ -45,7 +44,7 @@
             grid_dims = grid.ActiveDimensions
             grid0_ncells = np.prod(grid.index.grid_dimensions[0,:])
             read_table_offset = get_read_table_offset(f)
-            for field in self.ds.field_list:
+            for field in fields:
                 dtype, offsetr = grid.index._field_map[field]
                 if grid_ncells != grid0_ncells:
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))


https://bitbucket.org/yt_analysis/yt/commits/a4a09439c520/
Changeset:   a4a09439c520
Branch:      yt
User:        jzuhone
Date:        2014-11-10 19:39:02+00:00
Summary:     Merge
Affected #:  5 files

diff -r 177b386b405a3a2c7f7ff417da560f5837414bb6 -r a4a09439c520ed991bdcf1256c15d8f9d80c0d21 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -26,12 +26,12 @@
 from yt.funcs import *
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system
-from yt import units
+     communication_system, parallel_objects
+from IPython import embed
 
-N_TBIN = 10000
-TMIN = 8.08e-2
-TMAX = 50.
+n_kT = 10000
+kT_min = 8.08e-2
+kT_max = 50.
 
 comm = communication_system.communicators[-1]
 
@@ -74,130 +74,138 @@
         redshift = parameters["FiducialRedshift"]
         D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
         dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**3)
-                
+        src_ctr = parameters["center"]
+
         vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
 
-        num_cells = data_source["temperature"].shape[0]
-        start_c = comm.rank*num_cells/comm.size
-        end_c = (comm.rank+1)*num_cells/comm.size
-        
-        kT = (kboltz*data_source["temperature"][start_c:end_c]).in_units("keV").to_ndarray()
-        vol = data_source["cell_volume"][start_c:end_c].in_cgs().to_ndarray()
-        EM = (data_source["density"][start_c:end_c]/mp).to_ndarray()**2
-        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+        my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
 
-        data_source.clear_data()
-    
-        x = data_source["x"][start_c:end_c].copy()
-        y = data_source["y"][start_c:end_c].copy()
-        z = data_source["z"][start_c:end_c].copy()
-        dx = data_source["dx"][start_c:end_c].copy()
-
-        data_source.clear_data()
-        
-        vx = data_source["velocity_x"][start_c:end_c].copy()
-        vy = data_source["velocity_y"][start_c:end_c].copy()
-        vz = data_source["velocity_z"][start_c:end_c].copy()
-    
-        if isinstance(self.Zmet, basestring):
-            metalZ = data_source[self.Zmet][start_c:end_c].to_ndarray()
-        else:
-            metalZ = self.Zmet*np.ones(EM.shape)
-        
-        data_source.clear_data()
-
-        idxs = np.argsort(kT)
-        dshape = idxs.shape
-
-        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
-        dkT = kT_bins[1]-kT_bins[0]
-        kT_idxs = np.digitize(kT[idxs], kT_bins)
-        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
-        bcounts = np.bincount(kT_idxs).astype("int")
-        bcounts = bcounts[bcounts > 0]
-        n = int(0)
-        bcell = []
-        ecell = []
-        for bcount in bcounts:
-            bcell.append(n)
-            ecell.append(n+bcount)
-            n += bcount
-        kT_idxs = np.unique(kT_idxs)
-        
         self.spectral_model.prepare()
         energy = self.spectral_model.ebins
-    
-        cell_em = EM[idxs]*vol_scale
-    
-        number_of_photons = np.zeros(dshape, dtype='uint64')
-        energies = []
-    
-        u = np.random.random(cell_em.shape)
+
+        citer = data_source.chunks(["kT","cell_volume","density",
+                                    "x","y","z","dx","velocity_x",
+                                    "velocity_y","velocity_z"], "io")
+
+        photons = {}
+        photons["x"] = []
+        photons["y"] = []
+        photons["z"] = []
+        photons["vx"] = []
+        photons["vy"] = []
+        photons["vz"] = []
+        photons["dx"] = []
+        photons["Energy"] = []
+        photons["NumberOfPhotons"] = []
+
+        spectral_norm = area.v*exp_time.v*dist_fac/vol_scale
+
+        for chunk in parallel_objects(citer):
+
+            kT = chunk["kT"].v
+            num_cells = len(kT)
+            if num_cells == 0:
+                continue
+            vol = chunk["cell_volume"].in_cgs().v
+            EM = (chunk["density"]/mp).v**2
+            EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+
+            if isinstance(self.Zmet, basestring):
+                metalZ = chunk[self.Zmet].v
+            else:
+                metalZ = self.Zmet
+
+            idxs = np.argsort(kT)
+
+            kT_bins = np.linspace(kT_min, max(my_kT_max, kT_max), num=n_kT+1)
+            dkT = kT_bins[1]-kT_bins[0]
+            kT_idxs = np.digitize(kT[idxs], kT_bins)
+            kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1
+            bcounts = np.bincount(kT_idxs).astype("int")
+            bcounts = bcounts[bcounts > 0]
+            n = int(0)
+            bcell = []
+            ecell = []
+            for bcount in bcounts:
+                bcell.append(n)
+                ecell.append(n+bcount)
+                n += bcount
+            kT_idxs = np.unique(kT_idxs)
+
+            cell_em = EM[idxs]*vol_scale
+
+            u = np.random.random(cell_em.shape)
+
+            number_of_photons = np.zeros(num_cells)
+            energies = []
+
+            pbar = get_pbar("Generating photons for chunk ", num_cells)
+
+            for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
+
+                kT = kT_bins[ikT] + 0.5*dkT
         
-        pbar = get_pbar("Generating Photons", dshape[0])
+                em_sum_c = cell_em[ibegin:iend].sum()
+                if isinstance(self.Zmet, basestring):
+                    em_sum_m = (metalZ*cell_em)[ibegin:iend].sum()
+                else:
+                    em_sum_m = metalZ*em_sum_c
 
-        for i, ikT in enumerate(kT_idxs):
+                cspec, mspec = self.spectral_model.get_spectrum(kT)
 
-            ibegin = bcell[i]
-            iend = ecell[i]
-            kT = kT_bins[ikT] + 0.5*dkT
-        
-            em_sum_c = cell_em[ibegin:iend].sum()
-            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+                cumspec_c = np.cumsum(cspec.d)
+                counts_c = cumspec_c[:]/cumspec_c[-1]
+                counts_c = np.insert(counts_c, 0, 0.0)
+                tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
 
-            cspec, mspec = self.spectral_model.get_spectrum(kT)
-            cspec *= dist_fac*em_sum_c/vol_scale
-            mspec *= dist_fac*em_sum_m/vol_scale
+                cumspec_m = np.cumsum(mspec.d)
+                counts_m = cumspec_m[:]/cumspec_m[-1]
+                counts_m = np.insert(counts_m, 0, 0.0)
+                tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
 
-            cumspec_c = np.cumsum(cspec.ndarray_view())
-            counts_c = cumspec_c[:]/cumspec_c[-1]
-            counts_c = np.insert(counts_c, 0, 0.0)
-            tot_ph_c = cumspec_c[-1]*area.value*exp_time.value
+                v = u[ibegin:iend]
 
-            cumspec_m = np.cumsum(mspec.ndarray_view())
-            counts_m = cumspec_m[:]/cumspec_m[-1]
-            counts_m = np.insert(counts_m, 0, 0.0)
-            tot_ph_m = cumspec_m[-1]*area.value*exp_time.value
-
-            for icell in xrange(ibegin, iend):
+                cell_norm_c = tot_ph_c*cell_em[ibegin:iend]/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= v)
             
-                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
-            
-                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+                if isinstance(self.Zmet, basestring):
+                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cell_em[ibegin:iend]/em_sum_m
+                else:
+                    cell_norm_m = tot_ph_m*metalZ*cell_em[ibegin:iend]/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= v)
             
                 cell_n = cell_n_c + cell_n_m
 
-                if cell_n > 0:
-                    number_of_photons[icell] = cell_n
-                    randvec_c = np.random.uniform(size=cell_n_c)
-                    randvec_c.sort()
-                    randvec_m = np.random.uniform(size=cell_n_m)
-                    randvec_m.sort()
-                    cell_e_c = np.interp(randvec_c, counts_c, energy)
-                    cell_e_m = np.interp(randvec_m, counts_m, energy)
-                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
-                
-                pbar.update(icell)
+                number_of_photons[ibegin:iend] = cell_n
 
-        pbar.finish()
+                for cn, cn_c, cn_m in zip(cell_n, cell_n_c, cell_n_m):
+                    if cn > 0:
+                        randvec_c = np.random.uniform(size=cn_c)
+                        randvec_c.sort()
+                        randvec_m = np.random.uniform(size=cn_m)
+                        randvec_m.sort()
+                        cell_e_c = np.interp(randvec_c, counts_c, energy)
+                        cell_e_m = np.interp(randvec_m, counts_m, energy)
+                        energies.append(np.concatenate([cell_e_c,cell_e_m]))
             
-        active_cells = number_of_photons > 0
-        idxs = idxs[active_cells]
-        
-        photons = {}
+                pbar.update(iend)
 
-        src_ctr = parameters["center"]
-        
-        photons["x"] = (x[idxs]-src_ctr[0]).in_units("kpc")
-        photons["y"] = (y[idxs]-src_ctr[1]).in_units("kpc")
-        photons["z"] = (z[idxs]-src_ctr[2]).in_units("kpc")
-        photons["vx"] = vx[idxs].in_units("km/s")
-        photons["vy"] = vy[idxs].in_units("km/s")
-        photons["vz"] = vz[idxs].in_units("km/s")
-        photons["dx"] = dx[idxs].in_units("kpc")
-        photons["NumberOfPhotons"] = number_of_photons[active_cells]
-        photons["Energy"] = np.concatenate(energies)*units.keV
-    
+            pbar.finish()
+
+            active_cells = number_of_photons > 0
+            idxs = idxs[active_cells]
+
+            photons["NumberOfPhotons"].append(number_of_photons[active_cells])
+            photons["Energy"].append(ds.arr(np.concatenate(energies), "keV"))
+            photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
+            photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc"))
+            photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc"))
+            photons["vx"].append(chunk["velocity_x"][idxs].in_units("km/s"))
+            photons["vy"].append(chunk["velocity_y"][idxs].in_units("km/s"))
+            photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
+            photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
+
+        for key in photons:
+            photons[key] = np.concatenate(photons[key])
+
         return photons

diff -r 177b386b405a3a2c7f7ff417da560f5837414bb6 -r a4a09439c520ed991bdcf1256c15d8f9d80c0d21 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -28,8 +28,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      parallel_capable
-from yt import units
-from yt.units.yt_array import YTQuantity
+from yt.units.yt_array import YTQuantity, YTArray
 import h5py
 from yt.utilities.on_demand_imports import _astropy
 pyfits = _astropy.pyfits
@@ -93,12 +92,12 @@
         
         f = h5py.File(filename, "r")
 
-        parameters["FiducialExposureTime"] = f["/fid_exp_time"].value*units.s
-        parameters["FiducialArea"] = f["/fid_area"].value*units.cm*units.cm
+        parameters["FiducialExposureTime"] = YTQuantity(f["/fid_exp_time"].value, "s")
+        parameters["FiducialArea"] = YTQuantity(f["/fid_area"].value, "cm**2")
         parameters["FiducialRedshift"] = f["/fid_redshift"].value
-        parameters["FiducialAngularDiameterDistance"] = f["/fid_d_a"].value*units.Mpc
+        parameters["FiducialAngularDiameterDistance"] = YTQuantity(f["/fid_d_a"].value, "Mpc")
         parameters["Dimension"] = f["/dimension"].value
-        parameters["Width"] = f["/width"].value*units.kpc
+        parameters["Width"] = YTQuantity(f["/width"].value, "kpc")
         parameters["HubbleConstant"] = f["/hubble"].value
         parameters["OmegaMatter"] = f["/omega_matter"].value
         parameters["OmegaLambda"] = f["/omega_lambda"].value
@@ -107,13 +106,13 @@
         start_c = comm.rank*num_cells/comm.size
         end_c = (comm.rank+1)*num_cells/comm.size
         
-        photons["x"] = f["/x"][start_c:end_c]*units.kpc
-        photons["y"] = f["/y"][start_c:end_c]*units.kpc
-        photons["z"] = f["/z"][start_c:end_c]*units.kpc
-        photons["dx"] = f["/dx"][start_c:end_c]*units.kpc
-        photons["vx"] = f["/vx"][start_c:end_c]*units.km/units.s
-        photons["vy"] = f["/vy"][start_c:end_c]*units.km/units.s
-        photons["vz"] = f["/vz"][start_c:end_c]*units.km/units.s
+        photons["x"] = YTArray(f["/x"][start_c:end_c], "kpc")
+        photons["y"] = YTArray(f["/y"][start_c:end_c], "kpc")
+        photons["z"] = YTArray(f["/z"][start_c:end_c], "kpc")
+        photons["dx"] = YTArray(f["/dx"][start_c:end_c], "kpc")
+        photons["vx"] = YTArray(f["/vx"][start_c:end_c], "km/s")
+        photons["vy"] = YTArray(f["/vy"][start_c:end_c], "km/s")
+        photons["vz"] = YTArray(f["/vz"][start_c:end_c], "km/s")
 
         n_ph = f["/num_photons"][:]
         
@@ -128,7 +127,7 @@
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
         
-        photons["Energy"] = f["/energy"][start_e:end_e]*units.keV
+        photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
         
         f.close()
 
@@ -296,12 +295,12 @@
         dimension = 0
         width = 0.0
         for i, ax in enumerate("xyz"):
-            pos = data_source[ax]
-            delta = data_source["d%s"%(ax)]
-            le = np.min(pos-0.5*delta)
-            re = np.max(pos+0.5*delta)
+            le, re = data_source.quantities.extrema(ax)
+            delta_min, delta_max = data_source.quantities.extrema("d%s"%ax)
+            le -= 0.5*delta_max
+            re += 0.5*delta_max
             width = max(width, re-parameters["center"][i], parameters["center"][i]-le)
-            dimension = max(dimension, int(width/delta.min()))
+            dimension = max(dimension, int(width/delta_min))
         parameters["Dimension"] = 2*dimension
         parameters["Width"] = 2.*width.in_units("kpc")
                 
@@ -332,15 +331,15 @@
                 num_photons = sum(sizes_p)        
                 disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))]
                 disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))]
-                x = np.zeros((num_cells))
-                y = np.zeros((num_cells))
-                z = np.zeros((num_cells))
-                vx = np.zeros((num_cells))
-                vy = np.zeros((num_cells))
-                vz = np.zeros((num_cells))
-                dx = np.zeros((num_cells))
-                n_ph = np.zeros((num_cells), dtype="uint64")
-                e = np.zeros((num_photons))
+                x = np.zeros(num_cells)
+                y = np.zeros(num_cells)
+                z = np.zeros(num_cells)
+                vx = np.zeros(num_cells)
+                vy = np.zeros(num_cells)
+                vz = np.zeros(num_cells)
+                dx = np.zeros(num_cells)
+                n_ph = np.zeros(num_cells, dtype="uint64")
+                e = np.zeros(num_photons)
             else:
                 sizes_c = []
                 sizes_p = []
@@ -377,15 +376,15 @@
 
         else:
 
-            x = self.photons["x"].ndarray_view()
-            y = self.photons["y"].ndarray_view()
-            z = self.photons["z"].ndarray_view()
-            vx = self.photons["vx"].ndarray_view()
-            vy = self.photons["vy"].ndarray_view()
-            vz = self.photons["vz"].ndarray_view()
-            dx = self.photons["dx"].ndarray_view()
+            x = self.photons["x"].d
+            y = self.photons["y"].d
+            z = self.photons["z"].d
+            vx = self.photons["vx"].d
+            vy = self.photons["vy"].d
+            vz = self.photons["vz"].d
+            dx = self.photons["dx"].d
             n_ph = self.photons["NumberOfPhotons"]
-            e = self.photons["Energy"].ndarray_view()
+            e = self.photons["Energy"].d
                                                 
         if comm.rank == 0:
             
@@ -472,7 +471,7 @@
         else:
             sky_center = YTArray(sky_center, "degree")
 
-        dx = self.photons["dx"].ndarray_view()
+        dx = self.photons["dx"].d
         nx = self.parameters["Dimension"]
         if psf_sigma is not None:
              psf_sigma = parse_value(psf_sigma, "degree")
@@ -611,7 +610,7 @@
         events = {}
 
         dx_min = self.parameters["Width"].value/self.parameters["Dimension"]
-        dtheta = np.rad2deg(dx_min/D_A.value)*units.degree
+        dtheta = YTQuantity(np.rad2deg(dx_min/D_A.value), "degree")
         
         events["xpix"] = xsky[detected]/dx_min + 0.5*(nx+1)
         events["ypix"] = ysky[detected]/dx_min + 0.5*(nx+1)
@@ -749,8 +748,8 @@
         self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
         self.wcs.wcs.cunit = ["deg"]*2                                                
         x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
-        self.events["xsky"] = x*units.degree
-        self.events["ysky"] = y*units.degree
+        self.events["xsky"] = YTArray(x, "degree")
+        self.events["ysky"] = YTArray(y, "degree")
 
     def keys(self):
         return self.events.keys()
@@ -780,10 +779,10 @@
         
         f = h5py.File(h5file, "r")
 
-        parameters["ExposureTime"] = f["/exp_time"].value*units.s
-        parameters["Area"] = f["/area"].value*units.cm*units.cm
+        parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
+        parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
         parameters["Redshift"] = f["/redshift"].value
-        parameters["AngularDiameterDistance"] = f["/d_a"].value*units.Mpc
+        parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
         if "rmf" in f:
             parameters["RMF"] = f["/rmf"].value
         if "arf" in f:
@@ -799,13 +798,13 @@
 
         events["xpix"] = f["/xpix"][:]
         events["ypix"] = f["/ypix"][:]
-        events["eobs"] = f["/eobs"][:]*units.keV
+        events["eobs"] = YTArray(f["/eobs"][:], "keV")
         if "pi" in f:
             events["PI"] = f["/pi"][:]
         if "pha" in f:
             events["PHA"] = f["/pha"][:]
-        parameters["sky_center"] = f["/sky_center"][:]*units.deg
-        parameters["dtheta"] = f["/dtheta"].value*units.deg
+        parameters["sky_center"] = YTArray(f["/sky_center"][:], "deg")
+        parameters["dtheta"] = YTQuantity(f["/dtheta"].value, "deg")
         parameters["pix_center"] = f["/pix_center"][:]
         
         f.close()
@@ -824,10 +823,10 @@
         events = {}
         parameters = {}
         
-        parameters["ExposureTime"] = tblhdu.header["EXPOSURE"]*units.s
-        parameters["Area"] = tblhdu.header["AREA"]*units.cm*units.cm
+        parameters["ExposureTime"] = YTQuantity(tblhdu.header["EXPOSURE"], "s")
+        parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2")
         parameters["Redshift"] = tblhdu.header["REDSHIFT"]
-        parameters["AngularDiameterDistance"] = tblhdu.header["D_A"]*units.Mpc
+        parameters["AngularDiameterDistance"] = YTQuantity(tblhdu.header["D_A"], "Mpc")
         if "RMF" in tblhdu.header:
             parameters["RMF"] = tblhdu["RMF"]
         if "ARF" in tblhdu.header:
@@ -840,12 +839,12 @@
             parameters["Telescope"] = tblhdu["TELESCOP"]
         if "INSTRUME" in tblhdu.header:
             parameters["Instrument"] = tblhdu["INSTRUME"]
-        parameters["sky_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]])*units.deg
+        parameters["sky_center"] = YTArray([tblhdu["TCRVL2"],tblhdu["TCRVL3"]], "deg")
         parameters["pix_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]])
-        parameters["dtheta"] = tblhdu["TCRVL3"]*units.deg
+        parameters["dtheta"] = YTQuantity(tblhdu["TCRVL3"], "deg")
         events["xpix"] = tblhdu.data.field("X")
         events["ypix"] = tblhdu.data.field("Y")
-        events["eobs"] = (tblhdu.data.field("ENERGY")/1000.)*units.keV # Convert to keV
+        events["eobs"] = YTArray(tblhdu.data.field("ENERGY")/1000., "keV")
         if "PI" in tblhdu.columns.names:
             events["PI"] = tblhdu.data.field("PI")
         if "PHA" in tblhdu.columns.names:

diff -r 177b386b405a3a2c7f7ff417da560f5837414bb6 -r a4a09439c520ed991bdcf1256c15d8f9d80c0d21 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -14,7 +14,7 @@
 import numpy as np
 import os
 from yt.funcs import *
-from yt import units
+from yt.units.yt_array import YTQuantity
 import h5py
 
 try:
@@ -30,15 +30,14 @@
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
 hc = (hcgs*clight).in_units("keV*angstrom")
-cm3 = units.cm*units.cm*units.cm
 
 class SpectralModel(object):
 
     def __init__(self, emin, emax, nchan):
-        self.emin = emin*units.keV
-        self.emax = emax*units.keV
+        self.emin = YTQuantity(emin, "keV")
+        self.emax = YTQuantity(emax, "keV")
         self.nchan = nchan
-        self.ebins = np.linspace(emin, emax, nchan+1)*units.keV
+        self.ebins = np.linspace(emin, emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
@@ -100,7 +99,7 @@
             metal_spec = np.zeros((self.nchan))
         else:
             metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
-        return cosmic_spec*cm3/units.s, metal_spec*cm3/units.s
+        return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
         
 class XSpecAbsorbModel(SpectralModel):
     r"""
@@ -275,7 +274,7 @@
         mspec_r = np.zeros(self.nchan)
         tindex = np.searchsorted(self.Tvals, kT)-1
         if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
-            return cspec_l*cm3/units.s, mspec_l*cm3/units.s
+            return YTArray(cspec_l, "cm**3/s"), YTArray(mspec_l, "cm**3/s")
         dT = (kT-self.Tvals[tindex])/self.dTvals[tindex]
         # First do H,He, and trace elements
         for elem in self.cosmic_elem:
@@ -285,8 +284,8 @@
         for elem in self.metal_elem:
             mspec_l += self._make_spectrum(elem, tindex+2)
             mspec_r += self._make_spectrum(elem, tindex+3)
-        cosmic_spec = (cspec_l*(1.-dT)+cspec_r*dT)*cm3/units.s
-        metal_spec = (mspec_l*(1.-dT)+mspec_r*dT)*cm3/units.s
+        cosmic_spec = YTArray(cspec_l*(1.-dT)+cspec_r*dT, "cm**3/s")
+        metal_spec = YTArray(mspec_l*(1.-dT)+mspec_r*dT, "cm**3/s")
         return cosmic_spec, metal_spec
 
 class TableAbsorbModel(SpectralModel):
@@ -313,11 +312,11 @@
         f = h5py.File(self.filename,"r")
         emin = f["energy"][:].min()
         emax = f["energy"][:].max()
-        self.sigma = f["cross_section"][:]*units.cm*units.cm
+        self.sigma = YTArray(f["cross_section"][:], "cm**2")
         nchan = self.sigma.shape[0]
         f.close()
         SpectralModel.__init__(self, emin, emax, nchan)
-        self.nH = nH*1.0e22/(units.cm*units.cm)
+        self.nH = YTQuantity(nH*1.0e22, "cm**-2")
         
     def prepare(self):
         """

diff -r 177b386b405a3a2c7f7ff417da560f5837414bb6 -r a4a09439c520ed991bdcf1256c15d8f9d80c0d21 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -94,6 +94,7 @@
     "arcmin": (np.pi/10800., dimensions.angle), # arcminutes
     "arcsec": (np.pi/648000., dimensions.angle), # arcseconds
     "mas": (np.pi/648000000., dimensions.angle), # millarcseconds
+    "hourangle": (np.pi/12., dimensions.angle), # hour angle
     "steradian": (1.0, dimensions.solid_angle),
 
     # misc


https://bitbucket.org/yt_analysis/yt/commits/bc58b91ab4f6/
Changeset:   bc58b91ab4f6
Branch:      yt
User:        jzuhone
Date:        2014-11-10 19:45:22+00:00
Summary:     Small refinements to allow for using offsets along other directions in the future
Affected #:  2 files

diff -r a4a09439c520ed991bdcf1256c15d8f9d80c0d21 -r bc58b91ab4f633d9f95db6f0d46990c7ff04197f yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -301,7 +301,6 @@
                    self.dataset.domain_left_edge)/self.dataset.domain_dimensions
 
         if self.dataset.nprocs > 1:
-            float_size = np.dtype(">f4").itemsize
             gle_all = []
             gre_all = []
             shapes_all = []
@@ -325,8 +324,7 @@
                 shapes_all += shapes
                 levels_all += [levels[i]]*self.dataset.nprocs
                 new_gridfilenames += [self.grid_filenames[i]]*self.dataset.nprocs
-                file_offsets += [[slc[0], slc[1], slc[2].start*shp[0]*shp[1]*float_size]
-                                 for slc, shp in zip(slices, shapes)]
+                file_offsets += [[slc[0].start, slc[1].start, slc[2].start] for slc in slices]
             self.num_grids *= self.dataset.nprocs
             self.grids = np.empty(self.num_grids, dtype='object')
             self.grid_filenames = new_gridfilenames

diff -r a4a09439c520ed991bdcf1256c15d8f9d80c0d21 -r bc58b91ab4f633d9f95db6f0d46990c7ff04197f yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -17,6 +17,8 @@
 import numpy as np
 from yt.funcs import mylog, defaultdict
 
+float_size = np.dtype(">f4").itemsize
+
 class IOHandlerAthena(BaseIOHandler):
     _dataset_type = "athena"
     _offset_string = 'data:offsets=0'
@@ -40,8 +42,8 @@
                 continue
             f = open(grid.filename, "rb")
             data[grid.id] = {}
-            grid_ncells = np.prod(grid.ActiveDimensions)
             grid_dims = grid.ActiveDimensions
+            grid_ncells = np.prod(grid_dims)
             grid0_ncells = np.prod(grid.index.grid_dimensions[0,:])
             read_table_offset = get_read_table_offset(f)
             for field in fields:
@@ -50,7 +52,8 @@
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
                 if grid_ncells == grid0_ncells:
                     offset = offsetr
-                f.seek(read_table_offset+offset+grid.file_offset[2])
+                file_offset = grid.file_offset[2]*grid_dims[0]*grid_dims[1]*float_size
+                f.seek(read_table_offset+offset+file_offset)
                 if dtype == 'scalar':
                     v = np.fromfile(f, dtype='>f4',
                                     count=grid_ncells).reshape(grid_dims,order='F')


https://bitbucket.org/yt_analysis/yt/commits/af384aa4e7b1/
Changeset:   af384aa4e7b1
Branch:      yt
User:        jzuhone
Date:        2014-11-10 19:50:09+00:00
Summary:     doc fix
Affected #:  1 file

diff -r bc58b91ab4f633d9f95db6f0d46990c7ff04197f -r af384aa4e7b11937574abf9c936707952cf47e15 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -67,7 +67,7 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    add_field("density_squared", function=_density_squared)
+    ds.add_field("density_squared", function=_density_squared)
 
 Then we'll project this field along the z-axis.
 


https://bitbucket.org/yt_analysis/yt/commits/1064ee6d2010/
Changeset:   1064ee6d2010
Branch:      yt
User:        jzuhone
Date:        2014-11-10 21:16:59+00:00
Summary:     Bugfix
Affected #:  1 file

diff -r bc58b91ab4f633d9f95db6f0d46990c7ff04197f -r 1064ee6d2010a1ba6678fef33906411de5726099 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -27,7 +27,7 @@
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_objects
-from IPython import embed
+from  yt.units.yt_array import uconcatenate
 
 n_kT = 10000
 kT_min = 8.08e-2
@@ -206,6 +206,6 @@
             photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
 
         for key in photons:
-            photons[key] = np.concatenate(photons[key])
+            photons[key] = uconcatenate(photons[key])
 
         return photons


https://bitbucket.org/yt_analysis/yt/commits/b0f568d41e63/
Changeset:   b0f568d41e63
Branch:      yt
User:        jzuhone
Date:        2014-11-10 21:18:50+00:00
Summary:     Adding informational message
Affected #:  1 file

diff -r af384aa4e7b11937574abf9c936707952cf47e15 -r b0f568d41e6306a622fbb0d0cac69e1eb7d34bc5 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -305,7 +305,9 @@
         parameters["Width"] = 2.*width.in_units("kpc")
                 
         photons = photon_model(data_source, parameters)
-        
+
+        mylog.info("Finished generating photons.")
+
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
                         


https://bitbucket.org/yt_analysis/yt/commits/49a2756d3dbd/
Changeset:   49a2756d3dbd
Branch:      yt
User:        jzuhone
Date:        2014-11-10 21:19:05+00:00
Summary:     Merge
Affected #:  1 file

diff -r b0f568d41e6306a622fbb0d0cac69e1eb7d34bc5 -r 49a2756d3dbdd91533d994de76b4616c95ed6ac0 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -27,7 +27,7 @@
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_objects
-from IPython import embed
+from  yt.units.yt_array import uconcatenate
 
 n_kT = 10000
 kT_min = 8.08e-2
@@ -206,6 +206,6 @@
             photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
 
         for key in photons:
-            photons[key] = np.concatenate(photons[key])
+            photons[key] = uconcatenate(photons[key])
 
         return photons


https://bitbucket.org/yt_analysis/yt/commits/78161d4aa22a/
Changeset:   78161d4aa22a
Branch:      yt
User:        jzuhone
Date:        2014-11-10 21:50:32+00:00
Summary:     Overriding chunking for Athena datasets.
Affected #:  2 files

diff -r 49a2756d3dbdd91533d994de76b4616c95ed6ac0 -r 78161d4aa22a0120e7f016512b3b48bf8a6142eb yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -28,6 +28,8 @@
     mpc_conversion, sec_conversion
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
+from yt.geometry.geometry_handler import \
+    YTDataChunk
 
 from .fields import AthenaFieldInfo
 from yt.units.yt_array import YTQuantity
@@ -391,6 +393,14 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        gfiles = defaultdict(list)
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in gobjs:
+            yield YTDataChunk(dobj, "io", [subset],
+                              self._count_selection(dobj, [subset]),
+                              cache = cache)
+
 class AthenaDataset(Dataset):
     _index_class = AthenaHierarchy
     _field_info_class = AthenaFieldInfo

diff -r 49a2756d3dbdd91533d994de76b4616c95ed6ac0 -r 78161d4aa22a0120e7f016512b3b48bf8a6142eb yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -35,7 +35,6 @@
 
     def _read_chunk_data(self,chunk,fields):
         data = {}
-        grids_by_file = defaultdict(list)
         if len(chunk.objs) == 0: return data
         for grid in chunk.objs:
             if grid.filename is None:


https://bitbucket.org/yt_analysis/yt/commits/981847305d8f/
Changeset:   981847305d8f
Branch:      yt
User:        jzuhone
Date:        2014-11-10 23:01:18+00:00
Summary:     Fully 3D decomposition into virtual grids. Yahoo!
Affected #:  4 files

diff -r 78161d4aa22a0120e7f016512b3b48bf8a6142eb -r 981847305d8fd9ca464e9d57824f418f330e7050 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -141,12 +141,11 @@
 
    ds = yt.load("sloshing.0000.vtk", nprocs=8)
 
-which will subdivide each original grid into `nprocs` grids along the z-axis.
+which will subdivide each original grid into `nprocs` grids.
 
 .. note::
 
-    Virtual grids are only supported for 3D data, and each original grid's dimensions along the z-axis must
-    be an integer multiple of `nprocs`.
+    Virtual grids are only supported (and really only necessary) for 3D data.
 
 Alternative values for the following simulation parameters may be specified using a ``parameters``
 dict, accepting the following keys:

diff -r 78161d4aa22a0120e7f016512b3b48bf8a6142eb -r 981847305d8fd9ca464e9d57824f418f330e7050 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -34,7 +34,7 @@
 from .fields import AthenaFieldInfo
 from yt.units.yt_array import YTQuantity
 from yt.utilities.decompose import \
-    decompose_array
+    decompose_array, get_psize
 
 def _get_convert(fname):
     def _conv(data):
@@ -43,7 +43,8 @@
 
 class AthenaGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, id, index, level, start, dimensions, file_offset):
+    def __init__(self, id, index, level, start, dimensions,
+                 file_offset, read_dims):
         df = index.dataset.filename[4:-4]
         gname = index.grid_filenames[id]
         AMRGridPatch.__init__(self, id, filename = gname,
@@ -56,6 +57,7 @@
         self.stop_index = self.start_index + dimensions
         self.ActiveDimensions = dimensions.copy()
         self.file_offset = file_offset
+        self.read_dims = read_dims
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -309,17 +311,15 @@
             levels_all = []
             new_gridfilenames = []
             file_offsets = []
+            read_dims = []
             for i in range(levels.shape[0]):
-                if gdims[i][2] % self.ds.nprocs != 0:
-                    raise RuntimeError("Grid %04d cannot be split into virtual grids " % i +
-                                       "since it is not an integer multiple of nprocs!")
                 dx = dx_root/self.dataset.refine_by**(levels[i])
                 gle_orig = self.ds.arr(np.round(dle + dx*glis[i], decimals=12),
                                        "code_length")
                 gre_orig = self.ds.arr(np.round(gle_orig + dx*gdims[i], decimals=12),
                                        "code_length")
                 bbox = np.array([[le,re] for le, re in zip(gle_orig, gre_orig)])
-                psize = np.array([1,1,self.ds.nprocs])
+                psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs)
                 gle, gre, shapes, slices = decompose_array(gdims[i], psize, bbox)
                 gle_all += gle
                 gre_all += gre
@@ -327,6 +327,7 @@
                 levels_all += [levels[i]]*self.dataset.nprocs
                 new_gridfilenames += [self.grid_filenames[i]]*self.dataset.nprocs
                 file_offsets += [[slc[0].start, slc[1].start, slc[2].start] for slc in slices]
+                read_dims += [np.array([gdims[i][0], gdims[i][1], shape[2]], dtype="int") for shape in shapes]
             self.num_grids *= self.dataset.nprocs
             self.grids = np.empty(self.num_grids, dtype='object')
             self.grid_filenames = new_gridfilenames
@@ -339,12 +340,13 @@
             for i in range(self.num_grids):
                 self.grids[i] = self.grid(i,self,levels_all[i],
                                           glis[i], shapes_all[i],
-                                          file_offsets[i])
+                                          file_offsets[i], read_dims[i])
         else:
             self.grids = np.empty(self.num_grids, dtype='object')
             for i in range(levels.shape[0]):
                 self.grids[i] = self.grid(i,self,levels[i],
-                                          glis[i], gdims[i], [0]*3)
+                                          glis[i], gdims[i], [0]*3,
+                                          gdims[i])
                 dx = dx_root/self.dataset.refine_by**(levels[i])
                 dxs.append(dx)
 

diff -r 78161d4aa22a0120e7f016512b3b48bf8a6142eb -r 981847305d8fd9ca464e9d57824f418f330e7050 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -18,6 +18,7 @@
 from yt.funcs import mylog, defaultdict
 
 float_size = np.dtype(">f4").itemsize
+axis_list = ["_x","_y","_z"]
 
 class IOHandlerAthena(BaseIOHandler):
     _dataset_type = "athena"
@@ -42,8 +43,9 @@
             f = open(grid.filename, "rb")
             data[grid.id] = {}
             grid_dims = grid.ActiveDimensions
-            grid_ncells = np.prod(grid_dims)
-            grid0_ncells = np.prod(grid.index.grid_dimensions[0,:])
+            read_dims = grid.read_dims
+            grid_ncells = np.int(np.prod(read_dims))
+            grid0_ncells = np.int(np.prod(grid.index.grids[0].read_dims))
             read_table_offset = get_read_table_offset(f)
             for field in fields:
                 dtype, offsetr = grid.index._field_map[field]
@@ -51,23 +53,23 @@
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
                 if grid_ncells == grid0_ncells:
                     offset = offsetr
-                file_offset = grid.file_offset[2]*grid_dims[0]*grid_dims[1]*float_size
+                file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size
+                xread = slice(grid.file_offset[0],grid.file_offset[0]+grid_dims[0])
+                yread = slice(grid.file_offset[1],grid.file_offset[1]+grid_dims[1])
                 f.seek(read_table_offset+offset+file_offset)
                 if dtype == 'scalar':
+                    f.seek(read_table_offset+offset+file_offset)
                     v = np.fromfile(f, dtype='>f4',
-                                    count=grid_ncells).reshape(grid_dims,order='F')
+                                    count=grid_ncells).reshape(read_dims,order='F')
                 if dtype == 'vector':
+                    vec_offset = axis_list.index(field[-1][-2:])
+                    f.seek(read_table_offset+offset+3*file_offset)
                     v = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
-                if '_x' in field[-1]:
-                    v = v[0::3].reshape(grid_dims,order='F')
-                elif '_y' in field[-1]:
-                    v = v[1::3].reshape(grid_dims,order='F')
-                elif '_z' in field[-1]:
-                    v = v[2::3].reshape(grid_dims,order='F')
+                    v = v[vec_offset::3].reshape(read_dims,order='F')
                 if grid.ds.field_ordering == 1:
-                    data[grid.id][field] = v.T.astype("float64")
+                    data[grid.id][field] = v[xread,yread,:].T.astype("float64")
                 else:
-                    data[grid.id][field] = v.astype("float64")
+                    data[grid.id][field] = v[xread,yread,:].astype("float64")
             f.close()
         return data
     

diff -r 78161d4aa22a0120e7f016512b3b48bf8a6142eb -r 981847305d8fd9ca464e9d57824f418f330e7050 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -78,6 +78,10 @@
     prj2 = ds1.proj("density",0)
 
     yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
+    yield assert_allclose, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
+    for ax in "xyz":
+        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
+    yield assert_allclose, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
     yield assert_equal, prj1["density"], prj2["density"]
 
     ytcfg["yt","skip_dataset_cache"] = "False"


https://bitbucket.org/yt_analysis/yt/commits/1bb41ad944be/
Changeset:   1bb41ad944be
Branch:      yt
User:        jzuhone
Date:        2014-11-06 01:50:53+00:00
Summary:     Merging
Affected #:  89 files

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -96,7 +96,7 @@
 
 **Point** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTPointBase`    
-    | Usage: ``point(coord, ds=None, field_parameters=None)``
+    | Usage: ``point(coord, ds=None, field_parameters=None, data_source=None)``
     | A point defined by a single cell at specified coordinates.
 
 1D Objects
@@ -104,14 +104,14 @@
 
 **Ray (Axis-Aligned)** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase`
-    | Usage: ``ortho_ray(axis, coord, ds=None, field_parameters=None)``
+    | Usage: ``ortho_ray(axis, coord, ds=None, field_parameters=None, data_source=None)``
     | A line (of data cells) stretching through the full domain 
       aligned with one of the x,y,z axes.  Defined by an axis and a point
       to be intersected.
 
 **Ray (Arbitrarily-Aligned)** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRayBase`
-    | Usage: ``ray(start_coord, end_coord, ds=None, field_parameters=None)``
+    | Usage: ``ray(start_coord, end_coord, ds=None, field_parameters=None, data_source=None)``
     | A line (of data cells) defined by arbitrary start and end coordinates. 
 
 2D Objects
@@ -119,13 +119,13 @@
 
 **Slice (Axis-Aligned)** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTSliceBase`
-    | Usage: ``slice(axis, coord, center=None, ds=None, field_parameters=None)``
+    | Usage: ``slice(axis, coord, center=None, ds=None, field_parameters=None, data_source=None)``
     | A plane normal to one of the axes and intersecting a particular 
       coordinate.
 
 **Slice (Arbitrarily-Aligned)** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTCuttingPlaneBase`
-    | Usage: ``cutting(normal, coord, north_vector=None, ds=None, field_parameters=None)``
+    | Usage: ``cutting(normal, coord, north_vector=None, ds=None, field_parameters=None, data_source=None)``
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
 
@@ -141,8 +141,8 @@
 
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegionBase`
-    | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None)``
-    | Alternatively: ``box(left_edge, right_edge, fields=None, ds=None, field_parameters=None)``
+    | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
+    | Alternatively: ``box(left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
     | A box-like region aligned with the grid axis orientation.  It is 
       defined by a left_edge, a right_edge, and a center.  The left_edge
       and right_edge are the minimum and maximum bounds in the three axes
@@ -152,14 +152,14 @@
 
 **Disk/Cylinder** 
     | Class: :class:`~yt.data_objects.selection_data_containers.YTDiskBase`
-    | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None)``
+    | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None, data_source=None)``
     | A cylinder defined by a point at the center of one of the circular bases,
       a normal vector to it defining the orientation of the length of the
       cylinder, and radius and height values for the cylinder's dimensions.
 
 **Ellipsoid** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTEllipsoidBase`
-    | Usage: ``ellipsoid(center, semi_major_axis_length, semi_medium_axis_length, semi_minor_axis_length, semi_major_vector, tilt, fields=None, ds=None, field_parameters=None)``
+    | Usage: ``ellipsoid(center, semi_major_axis_length, semi_medium_axis_length, semi_minor_axis_length, semi_major_vector, tilt, fields=None, ds=None, field_parameters=None, data_source=None)``
     | An ellipsoid with axis magnitudes set by semi_major_axis_length, 
      semi_medium_axis_length, and semi_minor_axis_length.  semi_major_vector 
      sets the direction of the semi_major_axis.  tilt defines the orientation 
@@ -167,7 +167,7 @@
 
 **Sphere** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTSphereBase`
-    | Usage: ``sphere(center, radius, ds=None, field_parameters=None)``
+    | Usage: ``sphere(center, radius, ds=None, field_parameters=None, data_source=None)``
     | A sphere defined by a central coordinate and a radius.
 
 
@@ -176,6 +176,12 @@
 
 See also the section on :ref:`filtering-data`.
 
+**Intersecting Regions**
+    | Most Region objects provide a data_source parameter, which allows you to subselect
+    | one region from another (in the coordinate system of the DataSet). Note, this can
+    | easily lead to empty data for non-intersecting regions.
+    | Usage: ``slice(axis, coord, ds, data_source=sph)``
+
 **Boolean Regions** 
     | **Note: not yet implemented in yt 3.0**
     | Usage: ``boolean()``

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2faff88abc93fe2bc9d91467db786a8b69ec3ece6783a7055942ecc7c47a0817"
+  "signature": "sha256:c7cfb2db456d127bb633b7eee7ad6fe14290aa622ac62694c7840d80137afaba"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -36,7 +36,7 @@
      "input": [
       "import yt\n",
       "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
-      "          \n",
+      "\n",
       "dd = ds.all_data()\n",
       "maxval, maxloc = ds.find_max('density')\n",
       "\n",
@@ -222,6 +222,69 @@
      "level": 3,
      "metadata": {},
      "source": [
+      "Electrostatic/Electromagnetic Units"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Electromagnetic units can be a bit tricky, because the units for such quantities in different unit systems can have entirely different dimensions, even if they are meant to represent the same physical quantities. For example, in the SI system of units, current in Amperes is a fundamental unit of measure, so the unit of charge \"coulomb\" is equal to one ampere-second. On the other hand, in the Gaussian/CGS system, there is no equivalent base electromagnetic unit, and the electrostatic charge unit \"esu\" is equal to one $\\mathrm{cm^{3/2}g^{-1/2}s^{-1}}$ (which does not have any apparent physical significance). `yt` recognizes this difference:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "q1 = yt.YTArray(1.0,\"C\") # Coulombs\n",
+      "q2 = yt.YTArray(1.0,\"esu\") # electrostatic units / statcoulomb\n",
+      "\n",
+      "print \"units =\", q1.in_mks().units, \", dims =\", q1.units.dimensions\n",
+      "print \"units =\", q2.in_cgs().units, \", dims =\", q2.units.dimensions"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Under the hood, the `yt` units system has a translation layer that converts between these two systems, without any further effort required. For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import elementary_charge\n",
+      "\n",
+      "print elementary_charge\n",
+      "elementary_charge_C = elementary_charge.in_units(\"C\")\n",
+      "print elementary_charge_C"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The electromagnetic unit translations `yt` understands are:\n",
+      "\n",
+      "* Charge: 1 coulomb (C) $\\leftrightarrow$ 0.1c electrostatic unit (esu, Fr)\n",
+      "* Current: 1 ampere (A, C/s) $\\leftrightarrow$ 0.1c statampere (statA, esu/s, Fr) \n",
+      "* Magnetic Field: 1 tesla (T) $\\leftrightarrow 10^4$ gauss (G)\n",
+      "\n",
+      "where \"Fr\" is the franklin, an alternative name for the electrostatic unit, and c is the speed of light. "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
       "Working with views and converting to ndarray"
      ]
     },
@@ -324,10 +387,9 @@
      "collapsed": false,
      "input": [
       "from astropy import units as u\n",
-      "from yt import YTQuantity, YTArray\n",
       "\n",
       "x = 42.0 * u.meter\n",
-      "y = YTQuantity.from_astropy(x) "
+      "y = yt.YTQuantity.from_astropy(x) "
      ],
      "language": "python",
      "metadata": {},
@@ -349,7 +411,7 @@
      "collapsed": false,
      "input": [
       "a = np.random.random(size=10) * u.km/u.s\n",
-      "b = YTArray.from_astropy(a)"
+      "b = yt.YTArray.from_astropy(a)"
      ],
      "language": "python",
      "metadata": {},
@@ -436,7 +498,7 @@
      "collapsed": false,
      "input": [
       "k1 = kboltz.to_astropy()\n",
-      "k2 = YTQuantity.from_astropy(kb)\n",
+      "k2 = yt.YTQuantity.from_astropy(kb)\n",
       "print k1 == k2"
      ],
      "language": "python",
@@ -447,7 +509,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "c = YTArray.from_astropy(a)\n",
+      "c = yt.YTArray.from_astropy(a)\n",
       "d = c.to_astropy()\n",
       "print a == d"
      ],

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8ba193cc3867e2185133bbf3952bd5834e6c63993208635c71cf55fa6f27b491"
+  "signature": "sha256:67eb4b2a3d1017bac09209ebc939e8c1fe154660fa15f76862019dfc8652ec32"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -305,9 +305,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Overriding Code Unit Definitions"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "On occasion, you might have a dataset for a supported frontend that does not have the conversions to code units accessible (for example, Athena data) or you may want to change them outright. `yt` provides a mechanism so that one may provide their own code unit definitions to `load`, which override the default rules for a given frontend for defining code units. This is provided through the `units_override` dictionary. We'll use an example of an Athena dataset. First, a call to `load` without `units_override`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds1 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\")\n",
+      "print ds1.length_unit\n",
+      "print ds1.mass_unit\n",
+      "print ds1.time_unit\n",
+      "sp1 = ds1.sphere(\"c\",(0.1,\"unitary\"))\n",
+      "print sp1[\"density\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This is a galaxy cluster dataset, so it is not likely that the units of density are correct. We happen to know that the unit definitions are different, so we can override the units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "units_override = {\"length_unit\":(1.0,\"Mpc\"),\n",
+      "                  \"time_unit\":(1.0,\"Myr\"),\n",
+      "                  \"mass_unit\":(1.0e14,\"Msun\")}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`units_override` can take the following keys:\n",
+      "\n",
+      "* `length_unit`\n",
+      "* `time_unit`\n",
+      "* `mass_unit`\n",
+      "* `magnetic_unit`\n",
+      "* `temperature_unit`\n",
+      "\n",
+      "and the associated values can be (value, unit) tuples, `YTQuantities`, or floats (in the latter case they are assumed to have the corresponding cgs unit). "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds2 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)\n",
+      "print ds2.length_unit\n",
+      "print ds2.mass_unit\n",
+      "print ds2.time_unit\n",
+      "sp2 = ds2.sphere(\"c\",(0.1,\"unitary\"))\n",
+      "print sp2[\"density\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This option should be used very carefully, and *only* if you know that the dataset does not provide units or that the unit definitions generated are incorrect for some reason. "
+     ]
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- /dev/null
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -0,0 +1,179 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:cee652d703dd3369d81ebc670882d3734f73d0274aab98823a784d8039355480"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://docs.astropy.org/en/latest/units/equivalencies.html)). The possible unit equivalencies are:\n",
+      "\n",
+      "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n",
+      "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",
+      "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n",
+      "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)\n",
+      "* `\"schwarzschild\"`: conversions between mass and Schwarzschild radius ($R_S = 2GM/c^2$)\n",
+      "* `\"compton\"`: conversions between mass and Compton wavelength ($\\lambda = h/mc$)\n",
+      "\n",
+      "The following unit equivalencies only apply under conditions applicable for an ideal gas with a constant mean molecular weight $\\mu$ and ratio of specific heats $\\gamma$:\n",
+      "\n",
+      "* `\"number_density\"`: conversions between density and number density ($n = \\rho/\\mu{m_p}$)\n",
+      "* `\"sound_speed\"`: conversions between temperature and sound speed for an ideal gas ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
+      "\n",
+      "A `YTArray` or `YTQuantity` can be converted to an equivalent using `to_equivalent`, where the unit and the equivalence name are provided as arguments:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "import numpy as np\n",
+      "\n",
+      "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
+      "\n",
+      "dd = ds.all_data()\n",
+      "\n",
+      "print dd[\"temperature\"].to_equivalent(\"erg\", \"thermal\")\n",
+      "print dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\")\n",
+      "\n",
+      "# Rest energy of the proton\n",
+      "from yt.utilities.physical_constants import mp\n",
+      "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
+      "print E_p"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Equivalencies can go in both directions, without any information required other than the unit you want to convert to:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import clight\n",
+      "v = 0.1*clight\n",
+      "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
+      "print g\n",
+      "print g.to_equivalent(\"c\", \"lorentz\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Special Equivalencies"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some equivalencies can take supplemental information. The `\"number_density\"` equivalence can take a custom mean molecular weight (default is $\\mu = 0.6$):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd[\"density\"].max()\n",
+      "print dd[\"density\"].to_equivalent(\"cm**-3\", \"number_density\").max()\n",
+      "print dd[\"density\"].to_equivalent(\"cm**-3\", \"number_density\", mu=0.75).max()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `\"sound_speed\"` equivalence optionally takes the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$ (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\").mean()\n",
+      "print dd[\"temperature\"].to_equivalent(\"km/s\", \"sound_speed\", gamma=4./3., mu=0.5).mean()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "These options must be used with caution, and only if you know the underlying data adheres to these assumptions!"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Determining Valid Equivalencies"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If a certain equivalence does not exist for a particular unit, then an error will be thrown:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.exceptions import YTInvalidUnitEquivalence\n",
+      "\n",
+      "try:\n",
+      "    x = v.to_equivalent(\"angstrom\", \"spectral\")\n",
+      "except YTInvalidUnitEquivalence as e:\n",
+      "    print e"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To list the equivalencies available for a given `YTArray` or `YTQuantity`, use the `list_equivalencies` method:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "E_p.list_equivalencies()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -33,6 +33,7 @@
    comoving_units_and_code_units
    comparing_units_from_different_datasets
    units_and_plotting
+   unit_equivalencies
 
 .. note::
 

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/analyzing/units/unit_equivalencies.rst
--- /dev/null
+++ b/doc/source/analyzing/units/unit_equivalencies.rst
@@ -0,0 +1,7 @@
+.. _symbolic_units:
+
+Symbolic units: :code:`yt.units`
+================================
+
+.. notebook:: 6)_Unit_Equivalencies.ipynb
+   :skip_exceptions:

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -113,29 +113,56 @@
 
 yt works in cgs ("Gaussian") units by default, but Athena data is not
 normally stored in these units. If you would like to convert data to
-cgs units, you may supply conversions for length, time, and mass to ``load``:
+cgs units, you may supply conversions for length, time, and mass to ``load`` using
+the ``units_override`` functionality:
 
 .. code-block:: python
 
    import yt
-   ds = yt.load("id0/cluster_merger.0250.vtk",
-                parameters={"length_unit":(1.0,"Mpc"),
-                            "time_unit"(1.0,"Myr"),
-                            "mass_unit":(1.0e14,"Msun")})
+
+   units_override = {"length_unit":(1.0,"Mpc"),
+                     "time_unit"(1.0,"Myr"),
+                     "mass_unit":(1.0e14,"Msun")}
+
+   ds = yt.load("id0/cluster_merger.0250.vtk", units_override=units_override)
 
 This means that the yt fields, e.g. ``("gas","density")``, ``("gas","x-velocity")``,
 ``("gas","magnetic_field_x")``, will be in cgs units, but the Athena fields, e.g.,
 ``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
 in code units.
 
+Alternative values for the following simulation parameters may be specified using a ``parameters``
+dict, accepting the following keys:
+
+* ``Gamma``: ratio of specific heats, Type: Float
+* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or ``"cylindrical"``
+* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values corresponding to each dimension
+
+.. code-block:: python
+
+   import yt
+
+   parameters = {"gamma":4./3., "geometry":"cylindrical", "periodicity":(False,False,False)}
+
+   ds = yt.load("relativistic_jet_0000.vtk", parameters=parameters)
+
 .. rubric:: Caveats
 
 * yt primarily works with primitive variables. If the Athena
   dataset contains conservative variables, the yt primitive fields will be generated from the
   conserved variables on disk.
+* Special relativistic datasets may be loaded, but are not fully supported. In particular, the relationships between
+  quantities such as pressure and thermal energy will be incorrect, as it is currently assumed that their relationship
+  is that of an ideal a :math:`\gamma`-law equation of state.
 * Domains may be visualized assuming periodicity.
 * Particle list data is currently unsupported.
 
+.. note::
+
+   The old behavior of supplying unit conversions using a ``parameters``
+   dict supplied to ``load`` for Athena datasets is still supported, but is being deprecated in
+   favor of ``units_override``, which provides the same functionality.
+
 .. _loading-orion-data:
 
 BoxLib Data

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -40,14 +40,15 @@
 
 .. code-block:: python
 
-   from yt.config import ytcfg
-   ytcfg["yt", "loglevel"] = "1"
+   import yt
+   yt.funcs.mylog.setLevel(1)
 
-   from yt.mods import *
-   ds = load("my_data0001")
+   ds = yt.load("my_data0001")
    ds.print_stats()
 
-This has the same effect as setting ``loglevel = 1`` in the configuration file.
+This has the same effect as setting ``loglevel = 1`` in the configuration
+file. Note that a log level of 1 means that all log messages are printed to
+stdout.  To disable logging, set the log level to 50.
 
 Setting Configuration On the Command Line
 -----------------------------------------
@@ -95,3 +96,5 @@
   quiet.
 * ``stdoutStreamLogging`` (default: ``'False'``): If true, logging is directed
   to stdout rather than stderr
+* ``skip_dataset_cache`` (default: ``'False'``): If true, automatic caching of datasets
+  is turned off.
\ No newline at end of file

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 doc/source/reference/faq/index.rst
--- a/doc/source/reference/faq/index.rst
+++ b/doc/source/reference/faq/index.rst
@@ -214,26 +214,37 @@
 
 The plugin file is a means of modifying the available fields, quantities, data
 objects and so on without modifying the source code of yt.  The plugin file
-will be executed if it is detected, and it must be:
+will be executed if it is detected.  It must be located in a ``.yt`` folder
+in your home directory and be named ``my_plugins.py``:
 
 .. code-block:: bash
 
    $HOME/.yt/my_plugins.py
 
-The code in this file can thus add fields, add derived quantities, add
+The code in this file can add fields, define functions, define
 datatypes, and on and on.  It is executed at the bottom of ``yt.mods``, and so
-it is provided with the entire namespace available in the module ``yt.mods`` --
-which is the primary entry point to yt, and which contains most of the
-functionality of yt.  For example, if I created a plugin file containing:
+it is provided with the entire namespace available in the module ``yt.mods``.
+For example, if I created a plugin file containing:
 
 .. code-block:: python
 
    def _myfunc(field, data):
        return np.random.random(data["density"].shape)
-   add_field("SomeQuantity", function=_myfunc)
+   add_field("some_quantity", function=_myfunc, units='')
 
-then all of my data objects would have access to the field "SomeQuantity"
-despite its lack of use.
+then all of my data objects would have access to the field "some_quantity".
+Note that the units must be specified as a string, see
+:ref:`data_selection_and_fields` for more details on units and derived fields.
+
+.. note::
+
+   Since the ``my_plugins.py`` is parsed inside of ``yt.mods``, you must import
+   yt using ``yt.mods`` to use the plugins file.  If you import using
+   ``import yt``, the plugins file will not be parsed.  You can tell that your
+   plugins file is being parsed by watching for a logging message when you
+   import yt.  Note that both the ``yt load`` and ``iyt`` command line entry
+   points invoke ``from yt.mods import *``, so the ``my_plugins.py`` file
+   will be parsed if you enter yt that way.
 
 You can also define other convenience functions in your plugin file.  For
 instance, you could define some variables or functions, and even import common

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -98,7 +98,12 @@
 
 import yt.utilities.physical_constants as physical_constants
 import yt.units as units
-from yt.units.yt_array import YTArray, YTQuantity
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity, \
+    uconcatenate, \
+    uintersect1d, \
+    uunion1d
 
 from yt.fields.api import \
     field_plugins, \

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -598,7 +598,7 @@
         # 1. From saved_fields, e.g. we've already got it.
         # 2. From the halo binary files off disk.
         # 3. Use the unique particle indexes of the halo to select a missing
-        # field from an AMR Sphere.
+        # field from a Sphere.
         if key in self._saved_fields:
             # We've already got it.
             return self._saved_fields[key]
@@ -675,7 +675,7 @@
         
         Returns
         -------
-        ellipsoid : `yt.data_objects.api.AMREllipsoidBase`
+        ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
         
         Examples
@@ -754,7 +754,7 @@
         # 1. From saved_fields, e.g. we've already got it.
         # 2. From the halo h5 files off disk.
         # 3. Use the unique particle indexes of the halo to select a missing
-        # field from an AMR Sphere.
+        # field from a Sphere.
         if key in self._saved_fields:
             # We've already got it.
             return self._saved_fields[key]
@@ -868,7 +868,7 @@
         
         Returns
         -------
-        ellipsoid : `yt.data_objects.api.AMREllipsoidBase`
+        ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
         
         Examples
@@ -1670,7 +1670,7 @@
     ----------
     ds : `Dataset`
         The dataset on which halo finding will be conducted.
-    subvolume : `yt.data_objects.api.AMRData`, optional
+    subvolume : `yt.data_objects.data_containers.YTSelectionContainer`, optional
         A region over which HOP will be run, which can be used to run HOP
         on a subvolume of the full volume. Default = None, which defaults
         to the full volume automatically.
@@ -1772,7 +1772,7 @@
     ----------
     ds : `Dataset`
         The dataset on which halo finding will be conducted.
-    subvolume : `yt.data_objects.api.AMRData`, optional
+    subvolume : `yt.data_objects.data_containers.YTSelectionContainer`, optional
         A region over which HOP will be run, which can be used to run HOP
         on a subvolume of the full volume. Default = None, which defaults
         to the full volume automatically.

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -150,7 +150,7 @@
         Parameters
         ----------
 
-        data_source : `yt.data_objects.api.AMRData`
+        data_source : `yt.data_objects.data_containers.YTSelectionContainer`
             The data source from which the photons will be generated.
         redshift : float
             The cosmological redshift for the photons.

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -39,7 +39,7 @@
         self.emax = emax*units.keV
         self.nchan = nchan
         self.ebins = np.linspace(emin, emax, nchan+1)*units.keV
-        self.de = np.diff(self.ebins)*units.keV
+        self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
     def prepare(self):

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -121,7 +121,7 @@
             The width of the projection.
         nx : integer, optional
             The dimensions on a side of the projection image.
-        source : yt.data_objects.api.AMRData, optional
+        source : yt.data_objects.data_containers.YTSelectionContainer, optional
             If specified, this will be the data source used for selecting regions to project.
 
         Examples
@@ -183,7 +183,7 @@
             The width of the projection.
         nx : integer, optional
             The dimensions on a side of the projection image.
-        source : yt.data_objects.api.AMRData, optional
+        source : yt.data_objects.data_containers.YTSelectionContainer, optional
             If specified, this will be the data source used for selecting regions to project.
             Currently unsupported in yt 2.x.
 

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -39,6 +39,7 @@
     storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoreddatasets = '500',
+    skip_dataset_cache = 'False',
     loadfieldplugins = 'True',
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',
@@ -97,6 +98,8 @@
 class YTConfigParser(ConfigParser.ConfigParser):
     def __setitem__(self, key, val):
         self.set(key[0], key[1], val)
+    def __getitem__(self, key):
+        self.get(key[0], key[1])
 
 if os.path.exists(os.path.expanduser("~/.yt/config")):
     ytcfg = YTConfigParser(ytcfg_defaults)

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -42,7 +42,7 @@
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects, parallel_root_only, ParallelAnalysisInterface
+    parallel_objects, parallel_root_only 
 from yt.units.unit_object import Unit
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.grid_data_format.writer import write_to_gdf
@@ -183,7 +183,7 @@
     center : array_like, optional
         The 'center' supplied to fields that use it.  Note that this does
         not have to have `coord` as one value.  Strictly optional.
-    data_source : `yt.data_objects.api.AMRData`, optional
+    data_source : `yt.data_objects.data_containers.YTSelectionContainer`, optional
         If specified, this will be the data source used for selecting
         regions to project.
     method : string, optional
@@ -833,7 +833,7 @@
             new_fields.append(output_field)
         level_state.fields = new_fields
 
-class YTSurfaceBase(YTSelectionContainer3D, ParallelAnalysisInterface):
+class YTSurfaceBase(YTSelectionContainer3D):
     r"""This surface object identifies isocontours on a cell-by-cell basis,
     with no consideration of global connectedness, and returns the vertices
     of the Triangles in that isocontour.
@@ -850,7 +850,7 @@
     
     Parameters
     ----------
-    data_source : AMR3DDataObject
+    data_source : YTSelectionContainer
         This is the object which will used as a source
     surface_field : string
         Any field that can be obtained in a data object.  This is the field
@@ -886,7 +886,6 @@
                          ("index", "z"))
     vertices = None
     def __init__(self, data_source, surface_field, field_value):
-        ParallelAnalysisInterface.__init__(self)
         self.data_source = data_source
         self.surface_field = surface_field
         self.field_value = field_value

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -41,6 +41,8 @@
 from yt.fields.derived_field import \
     ValidateSpatial
 import yt.geometry.selection_routines
+from yt.geometry.selection_routines import \
+    compose_selector
 from yt.extern.six import add_metaclass
 
 def force_array(item, shape):
@@ -101,8 +103,15 @@
         sets its initial set of fields, and the remainder of the arguments
         are passed as field_parameters.
         """
-        if ds != None:
+        # ds is typically set in the new object type created in Dataset._add_object_class
+        # but it can also be passed as a parameter to the constructor, in which case it will 
+        # override the default. This code ensures it is never not set.
+        if ds is not None:
             self.ds = ds
+        else:
+            if not hasattr(self, "ds"):
+                raise RuntimeError("Error: ds must be set either through class type or parameter to the constructor")
+
         self._current_particle_type = "all"
         self._current_fluid_type = self.ds.default_fluid_type
         self.ds.objects.append(weakref.proxy(self))
@@ -542,10 +551,22 @@
     _sort_by = None
     _selector = None
     _current_chunk = None
+    _data_source = None
+    _dimensionality = None
 
-    def __init__(self, *args, **kwargs):
-        super(YTSelectionContainer, self).__init__(*args, **kwargs)
-
+    def __init__(self, ds, field_parameters, data_source=None):
+        ParallelAnalysisInterface.__init__(self)
+        super(YTSelectionContainer, self).__init__(ds, field_parameters)
+        self._data_source = data_source
+        if data_source is not None:
+            if data_source.ds is not self.ds:
+                raise RuntimeError("Attempted to construct a DataContainer with a data_source from a different DataSet", ds, data_source.ds)
+            else:
+                print "DataSets: ", self.ds, data_source.ds
+            if data_source._dimensionality < self._dimensionality:
+                raise RuntimeError("Attempted to construct a DataContainer with a data_source of lower dimensionality (%u vs %u)" %
+                                    (data_source._dimensionality, self._dimensionality))
+ 
     @property
     def selector(self):
         if self._selector is not None: return self._selector
@@ -555,7 +576,11 @@
                          "%s_selector" % self._type_name, None)
         if sclass is None:
             raise YTDataSelectorNotImplemented(self._type_name)
-        self._selector = sclass(self)
+
+        if self._data_source is not None:
+            self._selector = compose_selector(self, self._data_source.selector, sclass(self))
+        else:
+            self._selector = sclass(self)
         return self._selector
 
     def chunks(self, fields, chunking_style, **kwargs):
@@ -765,30 +790,32 @@
 
 class YTSelectionContainer0D(YTSelectionContainer):
     _spatial = False
-    def __init__(self, ds, field_parameters):
+    _dimensionality = 0
+    def __init__(self, ds, field_parameters = None, data_source = None):
         super(YTSelectionContainer0D, self).__init__(
-            ds, field_parameters)
+            ds, field_parameters, data_source)
 
 class YTSelectionContainer1D(YTSelectionContainer):
     _spatial = False
-    def __init__(self, ds, field_parameters):
+    _dimensionality = 1
+    def __init__(self, ds, field_parameters = None, data_source = None):
         super(YTSelectionContainer1D, self).__init__(
-            ds, field_parameters)
+            ds, field_parameters, data_source)
         self._grids = None
         self._sortkey = None
         self._sorted = {}
 
 class YTSelectionContainer2D(YTSelectionContainer):
     _key_fields = ['px','py','pdx','pdy']
+    _dimensionality = 2
     """
     Prepares the YTSelectionContainer2D, normal to *axis*.  If *axis* is 4, we are not
     aligned with any axis.
     """
     _spatial = False
-    def __init__(self, axis, ds, field_parameters):
-        ParallelAnalysisInterface.__init__(self)
+    def __init__(self, axis, ds, field_parameters = None, data_source = None):
         super(YTSelectionContainer2D, self).__init__(
-            ds, field_parameters)
+            ds, field_parameters, data_source)
         # We need the ds, which will exist by now, for fix_axis.
         self.axis = fix_axis(axis, self.ds)
         self.set_field_parameter("axis", axis)
@@ -910,9 +937,9 @@
     _key_fields = ['x','y','z','dx','dy','dz']
     _spatial = False
     _num_ghost_zones = 0
-    def __init__(self, center, ds = None, field_parameters = None):
-        ParallelAnalysisInterface.__init__(self)
-        super(YTSelectionContainer3D, self).__init__(ds, field_parameters)
+    _dimensionality = 3
+    def __init__(self, center, ds, field_parameters = None, data_source = None):
+        super(YTSelectionContainer3D, self).__init__(ds, field_parameters, data_source)
         self._set_center(center)
         self.coords = None
         self._grids = None
@@ -1273,9 +1300,9 @@
     """
     _type_name = "boolean"
     _con_args = ("regions",)
-    def __init__(self, regions, fields = None, ds = None, **kwargs):
+    def __init__(self, regions, fields = None, ds = None, field_parameters = None, data_source = None):
         # Center is meaningless, but we'll define it all the same.
-        YTSelectionContainer3D.__init__(self, [0.5]*3, fields, ds, **kwargs)
+        YTSelectionContainer3D.__init__(self, [0.5]*3, fields, ds, field_parameters, data_source)
         self.regions = regions
         self._all_regions = []
         self._some_overlap = []

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -761,6 +761,7 @@
         self.field_data = YTFieldData()
         if weight_field is not None:
             self.variance = YTFieldData()
+            weight_field = self.data_source._determine_fields(weight_field)[0]
         self.weight_field = weight_field
         self.field_units = {}
         ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
@@ -774,7 +775,7 @@
             A list of fields to create profile histograms for
         
         """
-        fields = ensure_list(fields)
+        fields = self.data_source._determine_fields(fields)
         temp_storage = ProfileFieldAccumulator(len(fields), self.size)
         cfields = fields + list(self.bin_fields)
         citer = self.data_source.chunks(cfields, "io")
@@ -907,9 +908,11 @@
         if not np.any(filter): return None
         arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
-            arr[:,i] = chunk[field][filter]
+            units = chunk.ds.field_info[field].units
+            arr[:,i] = chunk[field][filter].in_units(units)
         if self.weight_field is not None:
-            weight_data = chunk[self.weight_field]
+            units = chunk.ds.field_info[self.weight_field].units
+            weight_data = chunk[self.weight_field].in_units(units)
         else:
             weight_data = np.ones(filter.size, dtype="float64")
         weight_data = weight_data[filter]
@@ -1230,6 +1233,16 @@
         self.z_bins.convert_to_units(new_unit)
         self.z = 0.5*(self.z_bins[1:]+self.z_bins[:-1])
 
+
+def sanitize_field_tuple_keys(input_dict, data_source):
+    if input_dict is not None:
+        dummy = {}
+        for item in input_dict:
+            dummy[data_source._determine_fields(item)[0]] = input_dict[item]
+        return dummy
+    else:
+        return input_dict
+
 def create_profile(data_source, bin_fields, fields, n_bins=64,
                    extrema=None, logs=None, units=None,
                    weight_field="cell_mass",
@@ -1242,7 +1255,7 @@
 
     Parameters
     ----------
-    data_source : AMR3DData Object
+    data_source : YTSelectionContainer Object
         The data object to be profiled.
     bin_fields : list of strings
         List of the binning fields for profiling.
@@ -1293,7 +1306,7 @@
     >>> print profile["gas", "temperature"]
 
     """
-    bin_fields = ensure_list(bin_fields)
+    bin_fields = data_source._determine_fields(bin_fields)
     fields = ensure_list(fields)
     if len(bin_fields) == 1:
         cls = Profile1D
@@ -1305,16 +1318,9 @@
         raise NotImplementedError
     bin_fields = data_source._determine_fields(bin_fields)
     fields = data_source._determine_fields(fields)
-    if units is not None:
-        dummy = {}
-        for item in units:
-            dummy[data_source._determine_fields(item)[0]] = units[item]
-        units.update(dummy)
-    if extrema is not None:
-        dummy = {}
-        for item in extrema:
-            dummy[data_source._determine_fields(item)[0]] = extrema[item]
-        extrema.update(dummy)
+    units = sanitize_field_tuple_keys(units, data_source)
+    extrema = sanitize_field_tuple_keys(extrema, data_source)
+    logs = sanitize_field_tuple_keys(logs, data_source)
     if weight_field is not None:
         weight_field, = data_source._determine_fields([weight_field])
     if not iterable(n_bins):
@@ -1322,18 +1328,21 @@
     if not iterable(accumulation):
         accumulation = [accumulation] * len(bin_fields)
     if logs is None:
-        logs = [data_source.ds._get_field_info(f[0],f[1]).take_log
-                for f in bin_fields]
-    else:
-        logs = [logs[bin_field[-1]] for bin_field in bin_fields]
+        logs = {}
+    logs_list = []
+    for bin_field in bin_fields:
+        if bin_field in logs:
+            logs_list.append(logs[bin_field])
+        else:
+            logs_list.append(data_source.ds.field_info[bin_field].take_log)
+    logs = logs_list
     if extrema is None:
         ex = [data_source.quantities["Extrema"](f, non_zero=l)
               for f, l in zip(bin_fields, logs)]
     else:
         ex = []
         for bin_field in bin_fields:
-            bf_units = data_source.ds._get_field_info(
-                bin_field[0], bin_field[1]).units
+            bf_units = data_source.ds.field_info[bin_field].units
             try:
                 field_ex = list(extrema[bin_field[-1]])
             except KeyError:

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -51,8 +51,11 @@
     ds: Dataset, optional
         An optional dataset to use rather than self.ds
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived
-         fields.
+        A dictionary of field parameters than can be accessed by derived
+        fields.
+    data_source: optional
+        Draw the selection from the provided data source rather than
+        all data associated with the data_set
 
     Examples
     --------
@@ -64,8 +67,8 @@
     """
     _type_name = "point"
     _con_args = ('p',)
-    def __init__(self, p, ds = None, field_parameters = None):
-        super(YTPointBase, self).__init__(ds, field_parameters)
+    def __init__(self, p, ds=None, field_parameters=None, data_source=None):
+        super(YTPointBase, self).__init__(ds, field_parameters, data_source)
         self.p = p
 
 class YTOrthoRayBase(YTSelectionContainer1D):
@@ -92,6 +95,9 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
+    data_source: optional
+        Draw the selection from the provided data source rather than
+        all data associated with the data_set
 
     Examples
     --------
@@ -104,8 +110,9 @@
     _key_fields = ['x','y','z','dx','dy','dz']
     _type_name = "ortho_ray"
     _con_args = ('axis', 'coords')
-    def __init__(self, axis, coords, ds=None, field_parameters=None):
-        super(YTOrthoRayBase, self).__init__(ds, field_parameters)
+    def __init__(self, axis, coords, ds=None, 
+                 field_parameters=None, data_source=None):
+        super(YTOrthoRayBase, self).__init__(ds, field_parameters, data_source)
         self.axis = axis
         xax = self.ds.coordinates.x_axis[self.axis]
         yax = self.ds.coordinates.y_axis[self.axis]
@@ -144,6 +151,9 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
+    data_source: optional
+        Draw the selection from the provided data source rather than
+        all data associated with the data_set
 
     Examples
     --------
@@ -156,8 +166,9 @@
     _type_name = "ray"
     _con_args = ('start_point', 'end_point')
     _container_fields = ("t", "dts")
-    def __init__(self, start_point, end_point, ds=None, field_parameters=None):
-        super(YTRayBase, self).__init__(ds, field_parameters)
+    def __init__(self, start_point, end_point, ds=None,
+                 field_parameters=None, data_source=None):
+        super(YTRayBase, self).__init__(ds, field_parameters, data_source)
         self.start_point = self.ds.arr(start_point,
                             'code_length', dtype='float64')
         self.end_point = self.ds.arr(end_point,
@@ -204,6 +215,9 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
+    data_source: optional
+        Draw the selection from the provided data source rather than
+        all data associated with the data_set
 
     Examples
     --------
@@ -217,10 +231,10 @@
     _type_name = "slice"
     _con_args = ('axis', 'coord')
     _container_fields = ("px", "py", "pdx", "pdy")
-
     def __init__(self, axis, coord, center=None, ds=None,
-                 field_parameters = None):
-        YTSelectionContainer2D.__init__(self, axis, ds, field_parameters)
+                 field_parameters=None, data_source=None):
+        YTSelectionContainer2D.__init__(self, axis, ds,
+                                        field_parameters, data_source)
         self._set_center(center)
         self.coord = coord
 
@@ -285,6 +299,9 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
+    data_source: optional
+        Draw the selection from the provided data source rather than
+        all data associated with the data_set
 
     Notes
     -----
@@ -308,10 +325,10 @@
     _type_name = "cutting"
     _con_args = ('normal', 'center')
     _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz")
-
-    def __init__(self, normal, center, north_vector = None, 
-                 ds = None, field_parameters = None):
-        YTSelectionContainer2D.__init__(self, 4, ds, field_parameters)
+    def __init__(self, normal, center, north_vector=None,
+                 ds=None, field_parameters=None, data_source=None):
+        YTSelectionContainer2D.__init__(self, 4, ds,
+                                        field_parameters, data_source)
         self._set_center(center)
         self.set_field_parameter('center',center)
         # Let's set up our plane equation
@@ -465,7 +482,7 @@
 
     Parameters
     ----------
-    center : array_like 
+    center : array_like
         coordinate to which the normal, radius, and height all reference
     normal : array_like
         the normal vector defining the direction of lengthwise part of the 
@@ -482,6 +499,9 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
+    data_source: optional
+        Draw the selection from the provided data source rather than
+        all data associated with the data_set
 
     Examples
     --------
@@ -494,8 +514,9 @@
     _type_name = "disk"
     _con_args = ('center', '_norm_vec', 'radius', 'height')
     def __init__(self, center, normal, radius, height, fields=None,
-                 ds=None, **kwargs):
-        YTSelectionContainer3D.__init__(self, center, fields, ds, **kwargs)
+                 ds=None, field_parameters=None, data_source=None):
+        YTSelectionContainer3D.__init__(self, center, ds,
+                                        field_parameters, data_source)
         self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
         self.set_field_parameter("normal", self._norm_vec)
         self.set_field_parameter("center", self.center)
@@ -523,9 +544,10 @@
     """
     _type_name = "region"
     _con_args = ('center', 'left_edge', 'right_edge')
-    def __init__(self, center, left_edge, right_edge, fields = None,
-                 ds = None, **kwargs):
-        YTSelectionContainer3D.__init__(self, center, ds, **kwargs)
+    def __init__(self, center, left_edge, right_edge, fields=None,
+                 ds=None, field_parameters=None, data_source=None):
+        YTSelectionContainer3D.__init__(self, center, ds,
+                                        field_parameters, data_source)
         if not isinstance(left_edge, YTArray):
             self.left_edge = self.ds.arr(left_edge, 'code_length')
         else:
@@ -542,8 +564,10 @@
     """
     _type_name = "data_collection"
     _con_args = ("_obj_list",)
-    def __init__(self, obj_list, ds=None, field_parameters=None, center=None):
-        YTSelectionContainer3D.__init__(self, center, ds, field_parameters)
+    def __init__(self, obj_list, ds=None, field_parameters=None,
+                 data_source=None, center=None):
+        YTSelectionContainer3D.__init__(self, center, ds,
+                                        field_parameters, data_source)
         self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],
                                 dtype="int64")
         self._obj_list = obj_list
@@ -569,8 +593,10 @@
     """
     _type_name = "sphere"
     _con_args = ('center', 'radius')
-    def __init__(self, center, radius, ds = None, field_parameters = None):
-        super(YTSphereBase, self).__init__(center, ds, field_parameters)
+    def __init__(self, center, radius, ds=None,
+                 field_parameters=None, data_source=None):
+        super(YTSphereBase, self).__init__(center, ds,
+                                           field_parameters, data_source)
         # Unpack the radius, if necessary
         radius = fix_length(radius, self.ds)
         if radius < self.index.get_smallest_dx():
@@ -615,8 +641,9 @@
     _type_name = "ellipsoid"
     _con_args = ('center', '_A', '_B', '_C', '_e0', '_tilt')
     def __init__(self, center, A, B, C, e0, tilt, fields=None,
-                 ds=None, field_parameters = None):
-        YTSelectionContainer3D.__init__(self, center, ds, field_parameters)
+                 ds=None, field_parameters=None, data_source=None):
+        YTSelectionContainer3D.__init__(self, center, ds,
+                                        field_parameters, data_source)
         # make sure the magnitudes of semi-major axes are in order
         if A<B or B<C:
             raise YTEllipsoidOrdering(ds, A, B, C)
@@ -625,10 +652,10 @@
         self._B = self.ds.quan(B, 'code_length')
         self._C = self.ds.quan(C, 'code_length')
         if self._C < self.index.get_smallest_dx():
-            raise YTSphereTooSmall(ds, self._C, self.index.get_smallest_dx())
+            raise YTSphereTooSmall(self.ds, self._C, self.index.get_smallest_dx())
         self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
-        
+ 
         # find the t1 angle needed to rotate about z axis to align e0 to x
         t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
@@ -684,9 +711,10 @@
     """
     _type_name = "cut_region"
     _con_args = ("base_object", "conditionals")
-    def __init__(self, base_object, conditionals, ds = None,
-                 field_parameters = None):
-        super(YTCutRegionBase, self).__init__(base_object.center, ds, field_parameters)
+    def __init__(self, base_object, conditionals, ds=None,
+                 field_parameters=None, data_source=None):
+        super(YTCutRegionBase, self).__init__(base_object.center, ds,
+                                              field_parameters, data_source)
         self.conditionals = ensure_list(conditionals)
         self.base_object = base_object
         self._selector = None
@@ -762,4 +790,3 @@
     @property
     def fwidth(self):
         return self.base_object.fwidth[self._cond_ind,:]
-

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -134,7 +134,9 @@
             return obj
         apath = os.path.abspath(filename)
         #if not os.path.exists(apath): raise IOError(filename)
-        if apath not in _cached_datasets:
+        if ytcfg.getboolean("yt","skip_dataset_cache"):
+            obj = object.__new__(cls)
+        elif apath not in _cached_datasets:
             obj = object.__new__(cls)
             if obj._skip_cache is False:
                 _cached_datasets[apath] = obj
@@ -142,7 +144,7 @@
             obj = _cached_datasets[apath]
         return obj
 
-    def __init__(self, filename, dataset_type=None, file_style=None):
+    def __init__(self, filename, dataset_type=None, file_style=None, units_override=None):
         """
         Base class for generating new output types.  Principally consists of
         a *filename* and a *dataset_type* which will be passed on to children.
@@ -157,6 +159,9 @@
         self.known_filters = self.known_filters or {}
         self.particle_unions = self.particle_unions or {}
         self.field_units = self.field_units or {}
+        if units_override is None:
+            units_override = {}
+        self.units_override = units_override
 
         # path stuff
         self.parameter_filename = str(filename)
@@ -667,6 +672,8 @@
 
     def set_code_units(self):
         self._set_code_unit_attributes()
+        # here we override units, if overrides have been provided.
+        self._override_code_units()
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
@@ -679,6 +686,24 @@
             self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
                                    DW.units.dimensions)
 
+    def _override_code_units(self):
+        if len(self.units_override) == 0:
+            return
+        mylog.warning("Overriding code units. This is an experimental and potentially "+
+                      "dangerous option that may yield inconsistent results, and must be used "+
+                      "very carefully, and only if you know what you want from it.")
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
+                          ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
+            val = self.units_override.get("%s_unit" % unit, None)
+            if val is not None:
+                if isinstance(val, YTQuantity):
+                    val = (val.v, str(val.units))
+                elif not isinstance(val, tuple):
+                    val = (val, cgs)
+                u = getattr(self, "%s_unit" % unit)
+                mylog.info("Overriding %s_unit: %g %s -> %g %s.", unit, u.v, u.units, val[0], val[1])
+                setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
+
     _arr = None
     @property
     def arr(self):

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/data_objects/tests/test_compose.py
--- /dev/null
+++ b/yt/data_objects/tests/test_compose.py
@@ -0,0 +1,146 @@
+from yt.testing import *
+from yt.fields.local_fields import add_field
+from yt.units.yt_array import YTArray, uintersect1d
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+# Copied from test_boolean for computing a unique identifier for
+# each cell from cell positions
+def _IDFIELD(field, data):
+    width = data.ds.domain_right_edge - data.ds.domain_left_edge
+    min_dx = YTArray(1.0/8192, input_units='code_length',
+                     registry=data.ds.unit_registry)
+    delta = width / min_dx
+    x = data['x'] - min_dx / 2.
+    y = data['y'] - min_dx / 2.
+    z = data['z'] - min_dx / 2.
+    xi = x / min_dx
+    yi = y / min_dx
+    zi = z / min_dx
+    index = xi + delta[0] * (yi + delta[1] * zi)
+    index = index.astype('int64')
+    return index
+
+def test_compose_no_overlap():
+    r"""Test to make sure that composed data objects that don't
+    overlap behave the way we expect (return empty collections)
+    """
+    empty = np.array([])
+    for n in [1, 2, 4, 8]:
+        ds = fake_random_ds(64, nprocs=n)
+        ds.add_field("ID", function=_IDFIELD)
+
+        # position parameters for initial region
+        center = [0.25]*3
+        left_edge = [0.1]*3
+        right_edge = [0.4]*3
+        normal = [1, 0, 0]
+        radius = height = 0.15
+
+        # initial 3D regions
+        sources = [ds.sphere(center, radius),
+                   ds.region(center, left_edge, right_edge),
+                   ds.disk(center, normal, radius, height)]
+
+        # position parameters for non-overlapping regions
+        center = [0.75]*3
+        left_edge = [0.6]*3
+        right_edge = [0.9]*3
+
+        # subselect non-overlapping 0, 1, 2, 3D regions
+        for data1 in sources:
+            data2 = ds.sphere(center, radius, data_source=data1)
+            yield assert_array_equal, data2['ID'], empty
+
+            data2 = ds.region(center, left_edge, right_edge, data_source=data1)
+            yield assert_array_equal, data2['ID'], empty  
+
+            data2 = ds.disk(center, normal, radius, height, data_source=data1)
+            yield assert_array_equal, data2['ID'], empty
+
+            for d in range(3):
+                data2 = ds.slice(d, center[d], data_source=data1)
+                yield assert_array_equal, data2['ID'], empty
+
+            for d in range(3):
+                data2 = ds.ortho_ray(d, center[0:d] + center[d+1:], data_source=data1)
+                yield assert_array_equal, data2['ID'], empty
+
+            data2 = ds.point(center, data_source=data1)
+            yield assert_array_equal, data2['ID'], empty
+
+def test_compose_overlap():
+    r"""Test to make sure that composed data objects that do
+    overlap behave the way we expect 
+    """
+    empty = np.array([])
+    for n in [1, 2, 4, 8]:
+        ds = fake_random_ds(64, nprocs=n)
+        ds.add_field("ID", function=_IDFIELD)
+
+        # position parameters for initial region
+        center = [0.4, 0.5, 0.5]
+        left_edge = [0.1]*3
+        right_edge = [0.7]*3
+        normal = [1, 0, 0]
+        radius = height = 0.15
+
+        # initial 3D regions
+        sources = [ds.sphere(center, radius),
+                   ds.region(center, left_edge, right_edge),
+                   ds.disk(center, normal, radius, height)]
+
+        # position parameters for overlapping regions
+        center = [0.6, 0.5, 0.5]
+        left_edge = [0.3]*3
+        right_edge = [0.9]*3
+
+        # subselect non-overlapping 0, 1, 2, 3D regions
+        for data1 in sources:
+            id1 = data1['ID']
+
+            data2 = ds.sphere(center, radius)
+            data3 = ds.sphere(center, radius, data_source=data1)
+            id2 = data2['ID']
+            id3 = data3['ID']
+            id3.sort()
+            yield assert_array_equal, uintersect1d(id1, id2), id3
+
+            data2 = ds.region(center, left_edge, right_edge)
+            data3 = ds.region(center, left_edge, right_edge, data_source=data1)
+            id2 = data2['ID']
+            id3 = data3['ID']
+            id3.sort()
+            yield assert_array_equal, uintersect1d(id1, id2), id3
+
+            data2 = ds.disk(center, normal, radius, height)
+            data3 = ds.disk(center, normal, radius, height, data_source=data1)
+            id2 = data2['ID']
+            id3 = data3['ID']
+            id3.sort()
+            yield assert_array_equal, uintersect1d(id1, id2), id3
+
+            for d in range(3):
+                data2 = ds.slice(d, center[d])
+                data3 = ds.slice(d, center[d], data_source=data1)
+                id2 = data2['ID']
+                id3 = data3['ID']
+                id3.sort()
+                yield assert_array_equal, uintersect1d(id1, id2), id3
+
+            for d in range(3):
+                data2 = ds.ortho_ray(d, center[0:d] + center[d+1:])
+                data3 = ds.ortho_ray(d, center[0:d] + center[d+1:], data_source=data1)
+                id2 = data2['ID']
+                id3 = data3['ID']
+                id3.sort()
+                yield assert_array_equal, uintersect1d(id1, id2), id3
+
+            data2 = ds.point(center)
+            data3 = ds.point(center, data_source=data1)
+            id2 = data2['ID']
+            id3 = data3['ID']
+            id3.sort()
+            yield assert_array_equal, uintersect1d(id1, id2), id3

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,7 +1,7 @@
 from yt.testing import *
 from yt.data_objects.profiles import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    Profile1D, Profile2D, Profile3D
+    Profile1D, Profile2D, Profile3D, create_profile
 
 _fields = ("density", "temperature", "dinosaurs", "tribbles")
 _units = ("g/cm**3", "K", "dyne", "erg")
@@ -87,13 +87,26 @@
     for nb in [8, 16, 32, 64]:
         # We log all the fields or don't log 'em all.  No need to do them
         # individually.
-        for lf in [True, False]: 
-            p1d = Profile1D(dd, 
-                "density",     nb, rmi*e1, rma*e2, lf,
-                weight_field = None)
-            p1d.add_fields(["ones", "temperature"])
-            yield assert_equal, p1d["ones"].sum(), nv
-            yield assert_rel_equal, tt, p1d["temperature"].sum(), 7
+        for lf in [True, False]:
+            direct_profile = Profile1D(
+                dd, "density", nb, rmi*e1, rma*e2, lf, weight_field = None)
+            direct_profile.add_fields(["ones", "temperature"])
+
+            indirect_profile_s = create_profile(
+                dd, "density", ["ones", "temperature"], n_bins=nb,
+                extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf}, 
+                weight_field=None)
+
+            indirect_profile_t = create_profile(
+                dd, ("gas", "density"),
+                [("index", "ones"), ("gas", "temperature")], n_bins=nb,
+                extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf}, 
+                weight_field=None)
+
+            for p1d in [direct_profile, indirect_profile_s,
+                        indirect_profile_t]:
+                yield assert_equal, p1d["index", "ones"].sum(), nv
+                yield assert_rel_equal, tt, p1d["gas", "temperature"].sum(), 7
 
             p2d = Profile2D(dd, 
                 "density",     nb, rmi*e1, rma*e2, lf,
@@ -154,6 +167,12 @@
         p3d.add_fields(["ones"])
         yield assert_equal, p3d["ones"], np.ones((nb,nb,nb))
 
+extrema_s = {'particle_position_x': (0, 1)}
+logs_s = {'particle_position_x': False}
+
+extrema_t = {('all', 'particle_position_x'): (0, 1)}
+logs_t = {('all', 'particle_position_x'): False}
+
 def test_particle_profiles():
     for nproc in [1, 2, 4, 8]:
         ds = fake_random_ds(32, nprocs=nproc, particles = 32**3)
@@ -164,6 +183,18 @@
         p1d.add_fields(["particle_ones"])
         yield assert_equal, p1d["particle_ones"].sum(), 32**3
 
+        p1d = create_profile(dd, ["particle_position_x"], ["particle_ones"],
+                             weight_field=None, n_bins=128, extrema=extrema_s,
+                             logs=logs_s)
+        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+
+        p1d = create_profile(dd,
+                             [("all", "particle_position_x")],
+                             [("all", "particle_ones")],
+                             weight_field=None, n_bins=128, extrema=extrema_t,
+                             logs=logs_t)
+        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+
         p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
                             "particle_position_y", 128, 0.0, 1.0, False,
                         weight_field = None)

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/fields/api.py
--- a/yt/fields/api.py
+++ b/yt/fields/api.py
@@ -26,6 +26,11 @@
 from . import particle_fields
 #from . import species_fields
 from . import vector_operations
+from . import local_fields
+from . import my_plugin_fields
+
+from .local_fields import add_field, derived_field
+
 
 from .derived_field import \
     DerivedField, \
@@ -38,6 +43,3 @@
     FieldDetector
 from .field_info_container import \
     FieldInfoContainer
-
-from . import local_fields
-from .local_fields import add_field, derived_field

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -102,6 +102,15 @@
                        function=_xray_emissivity,
                        units="") # add correct units here
 
+    def _mazzotta_weighting(field, data):
+        # Spectroscopic-like weighting field for galaxy clusters
+        # Only useful as a weight_field for temperature, metallicity, velocity
+        return data["density"]*data["density"]*data["kT"]**-0.25/mh/mh
+
+    registry.add_field((ftype,"mazzotta_weighting"),
+                       function=_mazzotta_weighting,
+                       units="keV**-0.25*cm**-6")
+    
     def _sz_kinetic(field, data):
         scale = 0.88 * sigma_thompson / mh / clight
         vel_axis = data.get_field_parameter("axis")

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -55,7 +55,7 @@
     def _plasma_beta(field,data):
         """This assumes that your front end has provided Bx, By, Bz in
         units of Gauss. If you use MKS, make sure to write your own
-        PlasmaBeta field to deal with non-unitary \mu_0.
+        plasma_beta field to deal with non-unitary \mu_0.
         """
         return data[ftype,'pressure']/data[ftype,'magnetic_energy']
     registry.add_field((ftype, "plasma_beta"),
@@ -69,6 +69,10 @@
              units="erg / cm**3")
 
     def _magnetic_field_strength(field,data):
+        """This assumes that your front end has provided Bx, By, Bz in
+        units of Gauss. If you use MKS, make sure to write your own
+        PlasmaBeta field to deal with non-unitary \mu_0.
+        """
         return np.sqrt(8.*np.pi*data[ftype,"magnetic_energy"])
     registry.add_field((ftype,"magnetic_field_strength"),
                        function=_magnetic_field_strength,
@@ -110,3 +114,17 @@
              units="gauss",
              validators=[ValidateParameter("normal")])
 
+    def _alfven_speed(field,data):
+        """This assumes that your front end has provided Bx, By, Bz in
+        units of Gauss. If you use MKS, make sure to write your own
+        alfven_speed field to deal with non-unitary \mu_0.
+        """
+        return data[ftype,'magnetic_field_strength']/np.sqrt(4.*np.pi*data[ftype,'density'])
+    registry.add_field((ftype, "alfven_speed"), function=_alfven_speed,
+                       units="cm/s")
+
+    def _mach_alfven(field,data):
+        return data[ftype,'velocity_magnitude']/data[ftype,'alfven_speed']
+    registry.add_field((ftype, "mach_alfven"), function=_mach_alfven,
+                       units="dimensionless")
+

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/fields/my_plugin_fields.py
--- /dev/null
+++ b/yt/fields/my_plugin_fields.py
@@ -0,0 +1,31 @@
+"""
+This is a container for storing fields defined in the my_plugins.py file.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from .field_plugin_registry import \
+    register_field_plugin
+
+from .field_info_container import \
+    FieldInfoContainer
+
+# Empty FieldInfoContainer
+my_plugins_fields = FieldInfoContainer(None, [], None)
+
+ at register_field_plugin
+def setup_my_plugins_fields(registry, ftype="gas", slice_info=None):
+    # fields end up inside this container when added via add_field in
+    # my_plugins.py. See yt.funcs.enable_plugins to see how this is set up.
+    registry.update(my_plugins_fields)

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -85,9 +85,12 @@
     _index_class = SkeletonHierarchy
     _field_info_class = SkeletonFieldInfo
     
-    def __init__(self, filename, dataset_type='skeleton'):
+    def __init__(self, filename, dataset_type='skeleton',
+                 storage_filename=None,
+                 units_override=None):
         self.fluid_types += ('skeleton',)
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -39,3 +39,5 @@
         for frontend in _frontends:
             _mod = "yt.frontends.%s.api" % frontend
             setattr(self, frontend, importlib.import_module(_mod))
+        setattr(self, 'api', importlib.import_module('yt.frontends.api'))
+        setattr(self, '__name__', 'yt.frontends.api')

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/art/api.py
--- a/yt/frontends/art/api.py
+++ b/yt/frontends/art/api.py
@@ -24,3 +24,5 @@
 
 from .io import \
       IOHandlerART
+
+from . import tests

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -166,7 +166,8 @@
                  skip_particles=False, skip_stars=False,
                  limit_level=None, spread_age=True,
                  force_max_level=None, file_particle_header=None,
-                 file_particle_data=None, file_particle_stars=None):
+                 file_particle_data=None, file_particle_stars=None,
+                 units_override=None):
         self.fluid_types += ("art", )
         if fields is None:
             fields = fluid_fields
@@ -186,7 +187,8 @@
         self.spread_age = spread_age
         self.domain_left_edge = np.zeros(3, dtype='float')
         self.domain_right_edge = np.zeros(3, dtype='float')+1.0
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _find_files(self, file_amr):

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -16,7 +16,8 @@
 
 from yt.testing import \
     requires_file, \
-    assert_equal
+    assert_equal, \
+    units_override_check
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     big_patch_amr, \
@@ -48,3 +49,9 @@
 @requires_file(d9p)
 def test_ARTDataset():
     assert isinstance(data_dir_load(d9p), ARTDataset)
+
+ at requires_file(d9p)
+def test_units_override():
+    for test in units_override_check(d9p):
+        yield test
+

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/artio/api.py
--- a/yt/frontends/artio/api.py
+++ b/yt/frontends/artio/api.py
@@ -22,3 +22,5 @@
 
 from .io import \
     IOHandlerARTIO
+
+from . import tests

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -314,7 +314,8 @@
     _field_info_class = ARTIOFieldInfo
 
     def __init__(self, filename, dataset_type='artio',
-                 storage_filename=None, max_range = 1024):
+                 storage_filename=None, max_range = 1024,
+                 units_override=None):
         if self._handle is not None:
             return
         self.max_range = max_range
@@ -324,7 +325,8 @@
         self._handle = artio_fileset(self._fileset_prefix)
         self.artio_parameters = self._handle.parameters
         # Here we want to initiate a traceback, if the reader is not built.
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override=units_override)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -50,3 +50,8 @@
 @requires_file(sizmbhloz)
 def test_ARTIODataset():
     assert isinstance(data_dir_load(sizmbhloz), ARTIODataset)
+
+ at requires_file(sizmbhloz)
+def test_units_override():
+    for test in units_override_check(sizmbhloz):
+        yield test

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/athena/api.py
--- a/yt/frontends/athena/api.py
+++ b/yt/frontends/athena/api.py
@@ -22,3 +22,5 @@
 
 from .io import \
       IOHandlerAthena
+
+from . import tests

diff -r ea7e7a2406fd0676485649f63dd97745d5d532ff -r 1bb41ad944be569fb8339a6d3aa7f17f6fac9bc4 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -285,7 +285,8 @@
 
         # Need to reset the units in the dataset based on the correct
         # domain left/right/dimensions.
-        self.dataset._set_code_unit_attributes()
+        # DEV: Is this really necessary?
+        #self.dataset._set_code_unit_attributes()
 
         if self.dataset.dimensionality <= 2 :
             self.dataset.domain_dimensions[2] = np.int(1)
@@ -352,12 +353,24 @@
     _dataset_type = "athena"
 
     def __init__(self, filename, dataset_type='athena',
-                 storage_filename=None, parameters=None):
+                 storage_filename=None, parameters=None,
+                 units_override=None):
         self.fluid_types += ("athena",)
         if parameters is None:
             parameters = {}
         self.specified_parameters = parameters
-        Dataset.__init__(self, filename, dataset_type)
+        if units_override is None:
+            units_override = {}
+        # This is for backwards-compatibility
+        already_warned = False
+        for k,v in self.specified_parameters.items():
+            if k.endswith("_unit") and k not in units_override:
+                if not already_warned:
+                    mylog.warning("Supplying unit conversions from the parameters dict is deprecated, "+
+                                  "and will be removed in a future release. Use units_override instead.")
+                    already_warned = True
+                units_override[k] = self.specified_parameters.pop(k)
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.filename = filename
         if storage_filename is None:
             storage_filename = '%s.yt' % filename.split('/')[-1]
@@ -372,23 +385,21 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
+        if "length_unit" not in self.units_override:
+            self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            val = self.specified_parameters.get("%s_unit" % unit, None)
-            if val is None:
-                if unit == "length": self.no_cgs_equiv_length = True
-                mylog.warning("No %s conversion to cgs provided.  " +
-                              "Assuming 1.0 = 1.0 %s", unit, cgs)
-                val = 1.0
-            if not isinstance(val, tuple):
-                val = (val, cgs)
-            setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
-        self.velocity_unit = self.length_unit/self.time_unit
-        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
-                                  (self.time_unit**2 * self.length_unit))
-        self.magnetic_unit.convert_to_units("gauss")
+            # We set these to cgs for now, but they may be overridden later.
+            mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
+            setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
 
     def set_code_units(self):
         super(AthenaDataset, self).set_code_units()
+        mag_unit = getattr(self, "magnetic_unit", None)
+        if mag_unit is None:
+            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                         (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit.convert_to_units("gauss")
+
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
 
     def _parse_parameter_file(self):

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/ce76946af029/
Changeset:   ce76946af029
Branch:      yt
User:        jzuhone
Date:        2014-11-11 02:15:11+00:00
Summary:     bugfix
Affected #:  1 file

diff -r 981847305d8fd9ca464e9d57824f418f330e7050 -r ce76946af029a319f144074b06c23822c8400a97 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -37,7 +37,7 @@
         self.emin = YTQuantity(emin, "keV")
         self.emax = YTQuantity(emax, "keV")
         self.nchan = nchan
-        self.ebins = np.linspace(emin, emax, nchan+1)
+        self.ebins = np.linspace(self.emin, self.emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
@@ -231,7 +231,7 @@
         vec = np.zeros(self.nchan)
         E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
-        ebins = self.ebins.ndarray_view()
+        ebins = self.ebins.d
         if self.thermal_broad:
             vec = np.zeros(self.nchan)
             sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value


https://bitbucket.org/yt_analysis/yt/commits/724ea69c8531/
Changeset:   724ea69c8531
Branch:      yt
User:        jzuhone
Date:        2014-11-11 02:00:38+00:00
Summary:     Doc fixes
Affected #:  2 files

diff -r 981847305d8fd9ca464e9d57824f418f330e7050 -r 724ea69c853148c4f260f81a28b934864c536da9 doc/source/analyzing/analysis_modules/_images/dsquared.png
Binary file doc/source/analyzing/analysis_modules/_images/dsquared.png has changed

diff -r 981847305d8fd9ca464e9d57824f418f330e7050 -r 724ea69c853148c4f260f81a28b934864c536da9 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -67,7 +67,7 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    ds.add_field("density_squared", function=_density_squared)
+    ds.add_field("density_squared", function=_density_squared, units="g**2/cm**6")
 
 Then we'll project this field along the z-axis.
 


https://bitbucket.org/yt_analysis/yt/commits/4ccb6cd4cffa/
Changeset:   4ccb6cd4cffa
Branch:      yt
User:        jzuhone
Date:        2014-11-11 02:16:29+00:00
Summary:     Merge
Affected #:  1 file

diff -r 724ea69c853148c4f260f81a28b934864c536da9 -r 4ccb6cd4cffa380a41fa054cb5090afaabbe8b97 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -37,7 +37,7 @@
         self.emin = YTQuantity(emin, "keV")
         self.emax = YTQuantity(emax, "keV")
         self.nchan = nchan
-        self.ebins = np.linspace(emin, emax, nchan+1)
+        self.ebins = np.linspace(self.emin, self.emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
@@ -231,7 +231,7 @@
         vec = np.zeros(self.nchan)
         E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
-        ebins = self.ebins.ndarray_view()
+        ebins = self.ebins.d
         if self.thermal_broad:
             vec = np.zeros(self.nchan)
             sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value


https://bitbucket.org/yt_analysis/yt/commits/b4f6e9688ad1/
Changeset:   b4f6e9688ad1
Branch:      yt
User:        jzuhone
Date:        2014-11-11 02:18:55+00:00
Summary:     Merge
Affected #:  3 files



https://bitbucket.org/yt_analysis/yt/commits/0b81022d6b56/
Changeset:   0b81022d6b56
Branch:      yt
User:        jzuhone
Date:        2014-11-11 23:00:15+00:00
Summary:     Better memory management for photon_simulator.
Affected #:  3 files

diff -r b4f6e9688ad17500a1bbc0ddc89312d86c43241c -r 0b81022d6b56262e3872ea802670bba73e0e6e50 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -27,7 +27,7 @@
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_objects
-from  yt.units.yt_array import uconcatenate
+from yt.units.yt_array import uconcatenate
 
 n_kT = 10000
 kT_min = 8.08e-2
@@ -59,11 +59,15 @@
     Zmet : float or string, optional
         The metallicity. If a float, assumes a constant metallicity throughout.
         If a string, is taken to be the name of the metallicity field.
+    photons_per_chunk : integer
+        The maximum number of photons that are allocated per chunk. Increase or decrease
+        as needed.
     """
-    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, photons_per_chunk=10000000):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
+        self.photons_per_chunk = photons_per_chunk
 
     def __call__(self, data_source, parameters):
         
@@ -134,60 +138,64 @@
 
             cell_em = EM[idxs]*vol_scale
 
-            u = np.random.random(cell_em.shape)
+            number_of_photons = np.zeros(num_cells)
+            energies = np.zeros(self.photons_per_chunk)
 
-            number_of_photons = np.zeros(num_cells)
-            energies = []
+            start_e = 0
+            end_e = 0
 
             pbar = get_pbar("Generating photons for chunk ", num_cells)
 
             for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
 
                 kT = kT_bins[ikT] + 0.5*dkT
-        
-                em_sum_c = cell_em[ibegin:iend].sum()
+
+                n_current = iend-ibegin
+
+                cem = cell_em[ibegin:iend]
+
+                em_sum_c = cem.sum()
                 if isinstance(self.Zmet, basestring):
-                    em_sum_m = (metalZ*cell_em)[ibegin:iend].sum()
+                    em_sum_m = (metalZ[ibegin:iend]*cem).sum()
                 else:
                     em_sum_m = metalZ*em_sum_c
 
                 cspec, mspec = self.spectral_model.get_spectrum(kT)
 
                 cumspec_c = np.cumsum(cspec.d)
-                counts_c = cumspec_c[:]/cumspec_c[-1]
-                counts_c = np.insert(counts_c, 0, 0.0)
                 tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
+                cumspec_c /= cumspec_c[-1]
+                cumspec_c = np.insert(cumspec_c, 0, 0.0)
 
                 cumspec_m = np.cumsum(mspec.d)
-                counts_m = cumspec_m[:]/cumspec_m[-1]
-                counts_m = np.insert(counts_m, 0, 0.0)
                 tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
+                cumspec_m /= cumspec_m[-1]
+                cumspec_m = np.insert(cumspec_m, 0, 0.0)
 
-                v = u[ibegin:iend]
+                u = np.random.random(size=n_current)
 
-                cell_norm_c = tot_ph_c*cell_em[ibegin:iend]/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= v)
+                cell_norm_c = tot_ph_c*cem/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u)
             
                 if isinstance(self.Zmet, basestring):
-                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cell_em[ibegin:iend]/em_sum_m
+                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem/em_sum_m
                 else:
-                    cell_norm_m = tot_ph_m*metalZ*cell_em[ibegin:iend]/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= v)
+                    cell_norm_m = tot_ph_m*metalZ*cem/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u)
             
-                cell_n = cell_n_c + cell_n_m
+                number_of_photons[ibegin:iend] = cell_n_c + cell_n_m
 
-                number_of_photons[ibegin:iend] = cell_n
+                end_e += int((cell_n_c+cell_n_m).sum())
 
-                for cn, cn_c, cn_m in zip(cell_n, cell_n_c, cell_n_m):
-                    if cn > 0:
-                        randvec_c = np.random.uniform(size=cn_c)
-                        randvec_c.sort()
-                        randvec_m = np.random.uniform(size=cn_m)
-                        randvec_m.sort()
-                        cell_e_c = np.interp(randvec_c, counts_c, energy)
-                        cell_e_m = np.interp(randvec_m, counts_m, energy)
-                        energies.append(np.concatenate([cell_e_c,cell_e_m]))
+                if end_e > self.photons_per_chunk:
+                    raise RuntimeError("Number of photons generated for this chunk "+
+                                       "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
+                                       "Increase photons_per_chunk!")
+
+                energies[start_e:end_e] = _generate_energies(cell_n_c, cell_n_m, cumspec_c, cumspec_m, energy)
             
+                start_e = end_e
+
                 pbar.update(iend)
 
             pbar.finish()
@@ -196,7 +204,7 @@
             idxs = idxs[active_cells]
 
             photons["NumberOfPhotons"].append(number_of_photons[active_cells])
-            photons["Energy"].append(ds.arr(np.concatenate(energies), "keV"))
+            photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
             photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
             photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc"))
             photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc"))
@@ -209,3 +217,18 @@
             photons[key] = uconcatenate(photons[key])
 
         return photons
+
+def _generate_energies(cell_n_c, cell_n_m, counts_c, counts_m, energy):
+    energies = np.array([])
+    for cn_c, cn_m in zip(cell_n_c, cell_n_m):
+        if cn_c > 0:
+            randvec_c = np.random.uniform(size=cn_c)
+            randvec_c.sort()
+            cell_e_c = np.interp(randvec_c, counts_c, energy)
+            energies = np.append(energies, cell_e_c)
+        if cn_m > 0: 
+            randvec_m = np.random.uniform(size=cn_m)
+            randvec_m.sort()
+            cell_e_m = np.interp(randvec_m, counts_m, energy)
+            energies = np.append(energies, cell_e_m)
+    return energies

diff -r b4f6e9688ad17500a1bbc0ddc89312d86c43241c -r 0b81022d6b56262e3872ea802670bba73e0e6e50 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -317,6 +317,7 @@
         """
         Write the photons to the HDF5 file *photonfile*.
         """
+
         if parallel_capable:
             
             mpi_long = get_mpi_type("int64")

diff -r b4f6e9688ad17500a1bbc0ddc89312d86c43241c -r 0b81022d6b56262e3872ea802670bba73e0e6e50 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -29,7 +29,9 @@
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
-hc = (hcgs*clight).in_units("keV*angstrom")
+hc = (hcgs*clight).in_units("keV*angstrom").v
+cl = clight.v
+K = 1.0/np.sqrt(2.*np.pi)
 
 class SpectralModel(object):
 
@@ -79,25 +81,25 @@
         xspec.AllModels.setEnergies("%f %f %d lin" %
                                     (self.emin.value, self.emax.value, self.nchan))
         self.model = xspec.Model(self.model_name)
+        self.thermal_comp = getattr(self.model,self.model_name)
         if self.model_name == "bremss":
             self.norm = 3.02e-15
         else:
             self.norm = 1.0e-14
-        
+        self.thermal_comp.norm = 1.0
+        self.thermal_comp.Redshift = 0.0
+
     def get_spectrum(self, kT):
         """
         Get the thermal emission spectrum given a temperature *kT* in keV. 
         """
-        m = getattr(self.model,self.model_name)
-        m.kT = kT
-        m.Abundanc = 0.0
-        m.norm = 1.0
-        m.Redshift = 0.0
+        self.thermal_comp.kT = kT
+        self.thermal_comp.Abundanc = 0.0
         cosmic_spec = self.norm*np.array(self.model.values(0))
-        m.Abundanc = 1.0
         if self.model_name == "bremss":
-            metal_spec = np.zeros((self.nchan))
+            metal_spec = np.zeros(self.nchan)
         else:
+            self.thermal_comp.Abundanc = 1.0
             metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
         return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
         
@@ -187,7 +189,7 @@
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
         SpectralModel.__init__(self, emin, emax, nchan)
-        self.wvbins = (hc/self.ebins[::-1]).ndarray_view()
+        self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
         # Non-trace metals
@@ -229,12 +231,12 @@
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
         vec = np.zeros(self.nchan)
-        E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
+        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.d
         if self.thermal_broad:
             vec = np.zeros(self.nchan)
-            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value
+            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/cl
             for E, sig, a in zip(E0, sigma, amp):
                 cdf = stats.norm(E,sig).cdf(ebins)
                 vec += np.diff(cdf)*a
@@ -254,13 +256,13 @@
         e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
         continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
 
-        tmpspec += np.interp(self.emid.ndarray_view(), e_cont, continuum)*self.de.ndarray_view()
+        tmpspec += np.interp(self.emid.d, e_cont, continuum)*self.de.d
         
         n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
         e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
         pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
         
-        tmpspec += np.interp(self.emid.ndarray_view(), e_pseudo, pseudo)*self.de.ndarray_view()
+        tmpspec += np.interp(self.emid.d, e_pseudo, pseudo)*self.de.d
         
         return tmpspec
 


https://bitbucket.org/yt_analysis/yt/commits/479df28c8949/
Changeset:   479df28c8949
Branch:      yt
User:        jzuhone
Date:        2014-11-11 23:24:09+00:00
Summary:     Updating docstring
Affected #:  1 file

diff -r b4f6e9688ad17500a1bbc0ddc89312d86c43241c -r 479df28c894918d03a06a33fbeedff847a2a0de0 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -192,15 +192,15 @@
         determine them, the *photons* dict needs to have the following items, corresponding
         to cells which have photons:
 
-        "x" : the x-position of the cell relative to the source center in kpc, NumPy array of floats
-        "y" : the y-position of the cell relative to the source center in kpc, NumPy array of floats
-        "z" : the z-position of the cell relative to the source center in kpc, NumPy array of floats
-        "vx" : the x-velocity of the cell in km/s, NumPy array of floats
-        "vy" : the y-velocity of the cell in km/s, NumPy array of floats
-        "vz" : the z-velocity of the cell in km/s, NumPy array of floats
-        "dx" : the width of the cell in kpc, NumPy array of floats
+        "x" : the x-position of the cell relative to the source center in kpc, YTArray
+        "y" : the y-position of the cell relative to the source center in kpc, YTArray
+        "z" : the z-position of the cell relative to the source center in kpc, YTArray
+        "vx" : the x-velocity of the cell in km/s, YTArray
+        "vy" : the y-velocity of the cell in km/s, YTArray
+        "vz" : the z-velocity of the cell in km/s, YTArray
+        "dx" : the width of the cell in kpc, YTArray
         "NumberOfPhotons" : the number of photons in the cell, NumPy array of integers
-        "Energy" : the source rest-frame energies of the photons, NumPy array of floats
+        "Energy" : the source rest-frame energies of the photons, YTArray
 
         The last array is not the same size as the others because it contains the energies in all of
         the cells in a single 1-D array. The first photons["NumberOfPhotons"][0] elements are
@@ -211,6 +211,9 @@
         create photons based on the fields in the dataset could be created. 
 
         >>> from scipy.stats import powerlaw
+        >>> import numpy as np
+        >>> import yt
+        >>> from yt.analysis_modules.photon_simulator import *
         >>> def line_func(source, parameters):
         ...
         ...     ds = source.ds
@@ -218,18 +221,20 @@
         ...     num_photons = parameters["num_photons"]
         ...     E0  = parameters["line_energy"] # Energies are in keV
         ...     sigE = parameters["line_sigma"] 
+        ...     src_ctr = parameters["center"]
         ...
         ...     energies = norm.rvs(loc=E0, scale=sigE, size=num_photons)
-        ...     
-        ...     photons["x"] = np.zeros((1)) # Place everything in the center cell
-        ...     photons["y"] = np.zeros((1))
-        ...     photons["z"] = np.zeros((1))
-        ...     photons["vx"] = np.zeros((1))
-        ...     photons["vy"] = np.zeros((1))
-        ...     photons["vz"] = 100.*np.ones((1))
-        ...     photons["dx"] = source["dx"][0]*ds.units["kpc"]*np.ones((1)) 
-        ...     photons["NumberOfPhotons"] = num_photons*np.ones((1))
-        ...     photons["Energy"] = np.array(energies)
+        ...
+        ...     # Place everything in the center cell
+        ...     for i, ax in enumerate("xyz"):
+        ...         photons[ax] = (ds.domain_center[0]-src_ctr[0]).in_units("kpc")
+        ...     photons["vx"] = ds.arr([0], "km/s")
+        ...     photons["vy"] = ds.arr([0], "km/s")
+        ...     photons["vz"] = ds.arr([100.0], "km/s")
+        ...     ds.find_field_value_at_point
+        ...     photons["dx"] = ds.find_field_values_at_point("dx", ds.domain_center).in_units("kpc")
+        ...     photons["NumberOfPhotons"] = np.array(num_photons*np.ones((1))
+        ...     photons["Energy"] = ds.arr(energies, "keV")
         >>>
         >>> redshift = 0.05
         >>> area = 6000.0
@@ -237,11 +242,12 @@
         >>> parameters = {"num_photons" : 10000, "line_energy" : 5.0,
         ...               "line_sigma" : 0.1}
         >>> ddims = (128,128,128)
-        >>> random_data = {"Density":np.random.random(ddims)}
-        >>> ds = load_uniform_grid(random_data, ddims)
+        >>> random_data = {"density":(np.random.random(ddims),"g/cm**3")}
+        >>> ds = yt.load_uniform_grid(random_data, ddims)
         >>> dd = ds.all_data
         >>> my_photons = PhotonList.from_user_model(dd, redshift, area,
-        ...                                         time, line_func)
+        ...                                         time, line_func,
+        ...                                         parameters=parameters)
 
         """
 
@@ -626,7 +632,7 @@
         
         num_events = len(events["xpix"])
             
-        if comm.rank == 0: mylog.info("Total number of observed photons: %d" % (num_events))
+        if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)


https://bitbucket.org/yt_analysis/yt/commits/0de0665d6543/
Changeset:   0de0665d6543
Branch:      yt
User:        jzuhone
Date:        2014-11-11 23:24:21+00:00
Summary:     Merge
Affected #:  3 files

diff -r 479df28c894918d03a06a33fbeedff847a2a0de0 -r 0de0665d6543c9015cf1a854268fd2a168054b7a yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -27,7 +27,7 @@
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_objects
-from  yt.units.yt_array import uconcatenate
+from yt.units.yt_array import uconcatenate
 
 n_kT = 10000
 kT_min = 8.08e-2
@@ -59,11 +59,15 @@
     Zmet : float or string, optional
         The metallicity. If a float, assumes a constant metallicity throughout.
         If a string, is taken to be the name of the metallicity field.
+    photons_per_chunk : integer
+        The maximum number of photons that are allocated per chunk. Increase or decrease
+        as needed.
     """
-    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, photons_per_chunk=10000000):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
+        self.photons_per_chunk = photons_per_chunk
 
     def __call__(self, data_source, parameters):
         
@@ -134,60 +138,64 @@
 
             cell_em = EM[idxs]*vol_scale
 
-            u = np.random.random(cell_em.shape)
+            number_of_photons = np.zeros(num_cells)
+            energies = np.zeros(self.photons_per_chunk)
 
-            number_of_photons = np.zeros(num_cells)
-            energies = []
+            start_e = 0
+            end_e = 0
 
             pbar = get_pbar("Generating photons for chunk ", num_cells)
 
             for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
 
                 kT = kT_bins[ikT] + 0.5*dkT
-        
-                em_sum_c = cell_em[ibegin:iend].sum()
+
+                n_current = iend-ibegin
+
+                cem = cell_em[ibegin:iend]
+
+                em_sum_c = cem.sum()
                 if isinstance(self.Zmet, basestring):
-                    em_sum_m = (metalZ*cell_em)[ibegin:iend].sum()
+                    em_sum_m = (metalZ[ibegin:iend]*cem).sum()
                 else:
                     em_sum_m = metalZ*em_sum_c
 
                 cspec, mspec = self.spectral_model.get_spectrum(kT)
 
                 cumspec_c = np.cumsum(cspec.d)
-                counts_c = cumspec_c[:]/cumspec_c[-1]
-                counts_c = np.insert(counts_c, 0, 0.0)
                 tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
+                cumspec_c /= cumspec_c[-1]
+                cumspec_c = np.insert(cumspec_c, 0, 0.0)
 
                 cumspec_m = np.cumsum(mspec.d)
-                counts_m = cumspec_m[:]/cumspec_m[-1]
-                counts_m = np.insert(counts_m, 0, 0.0)
                 tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
+                cumspec_m /= cumspec_m[-1]
+                cumspec_m = np.insert(cumspec_m, 0, 0.0)
 
-                v = u[ibegin:iend]
+                u = np.random.random(size=n_current)
 
-                cell_norm_c = tot_ph_c*cell_em[ibegin:iend]/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= v)
+                cell_norm_c = tot_ph_c*cem/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u)
             
                 if isinstance(self.Zmet, basestring):
-                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cell_em[ibegin:iend]/em_sum_m
+                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem/em_sum_m
                 else:
-                    cell_norm_m = tot_ph_m*metalZ*cell_em[ibegin:iend]/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= v)
+                    cell_norm_m = tot_ph_m*metalZ*cem/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u)
             
-                cell_n = cell_n_c + cell_n_m
+                number_of_photons[ibegin:iend] = cell_n_c + cell_n_m
 
-                number_of_photons[ibegin:iend] = cell_n
+                end_e += int((cell_n_c+cell_n_m).sum())
 
-                for cn, cn_c, cn_m in zip(cell_n, cell_n_c, cell_n_m):
-                    if cn > 0:
-                        randvec_c = np.random.uniform(size=cn_c)
-                        randvec_c.sort()
-                        randvec_m = np.random.uniform(size=cn_m)
-                        randvec_m.sort()
-                        cell_e_c = np.interp(randvec_c, counts_c, energy)
-                        cell_e_m = np.interp(randvec_m, counts_m, energy)
-                        energies.append(np.concatenate([cell_e_c,cell_e_m]))
+                if end_e > self.photons_per_chunk:
+                    raise RuntimeError("Number of photons generated for this chunk "+
+                                       "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
+                                       "Increase photons_per_chunk!")
+
+                energies[start_e:end_e] = _generate_energies(cell_n_c, cell_n_m, cumspec_c, cumspec_m, energy)
             
+                start_e = end_e
+
                 pbar.update(iend)
 
             pbar.finish()
@@ -196,7 +204,7 @@
             idxs = idxs[active_cells]
 
             photons["NumberOfPhotons"].append(number_of_photons[active_cells])
-            photons["Energy"].append(ds.arr(np.concatenate(energies), "keV"))
+            photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
             photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
             photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc"))
             photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc"))
@@ -209,3 +217,18 @@
             photons[key] = uconcatenate(photons[key])
 
         return photons
+
+def _generate_energies(cell_n_c, cell_n_m, counts_c, counts_m, energy):
+    energies = np.array([])
+    for cn_c, cn_m in zip(cell_n_c, cell_n_m):
+        if cn_c > 0:
+            randvec_c = np.random.uniform(size=cn_c)
+            randvec_c.sort()
+            cell_e_c = np.interp(randvec_c, counts_c, energy)
+            energies = np.append(energies, cell_e_c)
+        if cn_m > 0: 
+            randvec_m = np.random.uniform(size=cn_m)
+            randvec_m.sort()
+            cell_e_m = np.interp(randvec_m, counts_m, energy)
+            energies = np.append(energies, cell_e_m)
+    return energies

diff -r 479df28c894918d03a06a33fbeedff847a2a0de0 -r 0de0665d6543c9015cf1a854268fd2a168054b7a yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -323,6 +323,7 @@
         """
         Write the photons to the HDF5 file *photonfile*.
         """
+
         if parallel_capable:
             
             mpi_long = get_mpi_type("int64")

diff -r 479df28c894918d03a06a33fbeedff847a2a0de0 -r 0de0665d6543c9015cf1a854268fd2a168054b7a yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -29,7 +29,9 @@
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
-hc = (hcgs*clight).in_units("keV*angstrom")
+hc = (hcgs*clight).in_units("keV*angstrom").v
+cl = clight.v
+K = 1.0/np.sqrt(2.*np.pi)
 
 class SpectralModel(object):
 
@@ -79,25 +81,25 @@
         xspec.AllModels.setEnergies("%f %f %d lin" %
                                     (self.emin.value, self.emax.value, self.nchan))
         self.model = xspec.Model(self.model_name)
+        self.thermal_comp = getattr(self.model,self.model_name)
         if self.model_name == "bremss":
             self.norm = 3.02e-15
         else:
             self.norm = 1.0e-14
-        
+        self.thermal_comp.norm = 1.0
+        self.thermal_comp.Redshift = 0.0
+
     def get_spectrum(self, kT):
         """
         Get the thermal emission spectrum given a temperature *kT* in keV. 
         """
-        m = getattr(self.model,self.model_name)
-        m.kT = kT
-        m.Abundanc = 0.0
-        m.norm = 1.0
-        m.Redshift = 0.0
+        self.thermal_comp.kT = kT
+        self.thermal_comp.Abundanc = 0.0
         cosmic_spec = self.norm*np.array(self.model.values(0))
-        m.Abundanc = 1.0
         if self.model_name == "bremss":
-            metal_spec = np.zeros((self.nchan))
+            metal_spec = np.zeros(self.nchan)
         else:
+            self.thermal_comp.Abundanc = 1.0
             metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
         return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
         
@@ -187,7 +189,7 @@
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
         SpectralModel.__init__(self, emin, emax, nchan)
-        self.wvbins = (hc/self.ebins[::-1]).ndarray_view()
+        self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
         # Non-trace metals
@@ -229,12 +231,12 @@
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
         vec = np.zeros(self.nchan)
-        E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
+        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.d
         if self.thermal_broad:
             vec = np.zeros(self.nchan)
-            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value
+            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/cl
             for E, sig, a in zip(E0, sigma, amp):
                 cdf = stats.norm(E,sig).cdf(ebins)
                 vec += np.diff(cdf)*a
@@ -254,13 +256,13 @@
         e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
         continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
 
-        tmpspec += np.interp(self.emid.ndarray_view(), e_cont, continuum)*self.de.ndarray_view()
+        tmpspec += np.interp(self.emid.d, e_cont, continuum)*self.de.d
         
         n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
         e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
         pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
         
-        tmpspec += np.interp(self.emid.ndarray_view(), e_pseudo, pseudo)*self.de.ndarray_view()
+        tmpspec += np.interp(self.emid.d, e_pseudo, pseudo)*self.de.d
         
         return tmpspec
 


https://bitbucket.org/yt_analysis/yt/commits/ec11b8791b69/
Changeset:   ec11b8791b69
Branch:      yt
User:        jzuhone
Date:        2014-11-11 23:25:35+00:00
Summary:     Doc fix
Affected #:  1 file

diff -r 0de0665d6543c9015cf1a854268fd2a168054b7a -r ec11b8791b69e8a68e380ffedfb3342ec604f4bf yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -210,7 +210,6 @@
         spectrum of photons is created. More complicated examples which actually
         create photons based on the fields in the dataset could be created. 
 
-        >>> from scipy.stats import powerlaw
         >>> import numpy as np
         >>> import yt
         >>> from yt.analysis_modules.photon_simulator import *


https://bitbucket.org/yt_analysis/yt/commits/cbd15441fb1d/
Changeset:   cbd15441fb1d
Branch:      yt
User:        jzuhone
Date:        2014-11-11 23:34:41+00:00
Summary:     Doc info on photons_per_chunk
Affected #:  1 file

diff -r ec11b8791b69e8a68e380ffedfb3342ec604f4bf -r cbd15441fb1d65089dddc8cfbd9991f2a66af4a6 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -141,7 +141,8 @@
 
 .. code:: python
 
-    thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3)
+    thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
+                                       photons_per_chunk=100000000)
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -150,6 +151,14 @@
 assume that is the name of the metallicity field (which may be spatially
 varying).
 
+The ``ThermalPhotonModel`` iterates over "chunks" of the supplied data source
+to generate the photons, to reduce memory usage and make parallelization more
+efficient. For each chunk, memory is set aside for the photon energies that will
+be generated. ``photons_per_chunk`` is an optional keyword argument which controls
+the size of this array. For large numbers of photons, you may find that
+this parameter needs to be set higher, or if you are looking to decrease memory
+usage, you might set this parameter lower.
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more


https://bitbucket.org/yt_analysis/yt/commits/c752dd47fab9/
Changeset:   c752dd47fab9
Branch:      yt
User:        jzuhone
Date:        2014-11-11 23:46:44+00:00
Summary:     Adding thermal broadening to XSPEC thermal models.
Affected #:  1 file

diff -r cbd15441fb1d65089dddc8cfbd9991f2a66af4a6 -r c752dd47fab9c3b04da7ddc7786871d2cbaa998c yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -69,8 +69,9 @@
     --------
     >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000)
     """
-    def __init__(self, model_name, emin, emax, nchan):
+    def __init__(self, model_name, emin, emax, nchan, thermal_broad=False):
         self.model_name = model_name
+        self.thermal_broad = thermal_broad
         SpectralModel.__init__(self, emin, emax, nchan)
         
     def prepare(self):
@@ -88,6 +89,8 @@
             self.norm = 1.0e-14
         self.thermal_comp.norm = 1.0
         self.thermal_comp.Redshift = 0.0
+        if self.thermal_broad:
+            xspec.Xset.addModelString("APECTHERMAL","yes")
 
     def get_spectrum(self, kT):
         """


https://bitbucket.org/yt_analysis/yt/commits/a6f70f755a68/
Changeset:   a6f70f755a68
Branch:      yt
User:        jzuhone
Date:        2014-11-12 02:05:51+00:00
Summary:     No shifting
Affected #:  1 file

diff -r c752dd47fab9c3b04da7ddc7786871d2cbaa998c -r a6f70f755a683058274e30417bab75c5ad13aba5 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -430,7 +430,7 @@
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
-                        convolve_energies=False):
+                        convolve_energies=False, no_shifting=False):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -461,6 +461,8 @@
             The names of the ARF and/or RMF files to convolve the photons with.
         convolve_energies : boolean, optional
             If this is set, the photon energies will be convolved with the RMF.
+        no_shifting : boolean, optional
+            If set, the photon energies will not be Doppler shifted.
             
         Examples
         --------
@@ -567,12 +569,13 @@
         x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
         y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
         z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-                    
-        vz = self.photons["vx"]*z_hat[0] + \
-             self.photons["vy"]*z_hat[1] + \
-             self.photons["vz"]*z_hat[2]
-        shift = -vz.in_cgs()/clight
-        shift = np.sqrt((1.-shift)/(1.+shift))
+
+        if not no_shifting:
+            vz = self.photons["vx"]*z_hat[0] + \
+                 self.photons["vy"]*z_hat[1] + \
+                 self.photons["vz"]*z_hat[2]
+            shift = -vz.in_cgs()/clight
+            shift = np.sqrt((1.-shift)/(1.+shift))
 
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
@@ -586,8 +589,11 @@
         z *= delta
         x += self.photons["x"][obs_cells]
         y += self.photons["y"][obs_cells]
-        z += self.photons["z"][obs_cells]  
-        eobs = self.photons["Energy"][idxs]*shift[obs_cells]
+        z += self.photons["z"][obs_cells]
+        if no_shifting:
+            eobs = self.photons["Energy"][idxs]
+        else:
+            eobs = self.photons["Energy"][idxs]*shift[obs_cells]
 
         xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
         ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]


https://bitbucket.org/yt_analysis/yt/commits/688494555cce/
Changeset:   688494555cce
Branch:      yt
User:        jzuhone
Date:        2014-11-12 17:11:51+00:00
Summary:     Documentation updates
Affected #:  2 files

diff -r a6f70f755a683058274e30417bab75c5ad13aba5 -r 688494555cced3ac6952daac4d5ad8376681a97d doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -46,17 +46,23 @@
 .. code:: python
 
     import yt
+    #yt.enable_parallelism() # If you want to run in parallel this should go here!
     from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
+.. note::
+
+    For parallel runs using ``mpi4py``, the call to ``yt.enable_parallelism`` should go *before*
+    the import of the ``photon_simulator`` module, as shown above.
+
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
     ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk",
-                 parameters={"time_unit":(1.0,"Myr"),
-                            "length_unit":(1.0,"Mpc"),
-                            "mass_unit":(1.0e14,"Msun")}) 
+                 units_override={"time_unit":(1.0,"Myr"),
+                                 "length_unit":(1.0,"Mpc"),
+                                 "mass_unit":(1.0e14,"Msun")})
 
 First, to get a sense of what the resulting image will look like, let's
 make a new yt field called ``"density_squared"``, since the X-ray
@@ -443,11 +449,11 @@
 .. code:: python
 
    data = {}
-   data["density"] = dens
-   data["temperature"] = temp
-   data["velocity_x"] = np.zeros(ddims)
-   data["velocity_y"] = np.zeros(ddims)
-   data["velocity_z"] = np.zeros(ddims)
+   data["density"] = (dens, "g/cm**3")
+   data["temperature"] = (temp, "K")
+   data["velocity_x"] = (np.zeros(ddims), "cm/s")
+   data["velocity_y"] = (np.zeros(ddims), "cm/s")
+   data["velocity_z"] = (np.zeros(ddims), "cm/s")
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
@@ -460,7 +466,7 @@
 
 .. code:: python
 
-   sphere = ds.sphere(ds.domain_center, (1.0,"Mpc"))
+   sphere = ds.sphere("c", (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r a6f70f755a683058274e30417bab75c5ad13aba5 -r 688494555cced3ac6952daac4d5ad8376681a97d yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1165,7 +1165,7 @@
 
         if energy_bins:
             spectype = "energy"
-            espec = self.events["eobs"].ndarray_view()
+            espec = self.events["eobs"].d
             range = (emin, emax)
             spec, ee = np.histogram(espec, bins=nchan, range=range)
             bins = 0.5*(ee[1:]+ee[:-1])


https://bitbucket.org/yt_analysis/yt/commits/cf5721b973b4/
Changeset:   cf5721b973b4
Branch:      yt
User:        jzuhone
Date:        2014-11-12 18:04:27+00:00
Summary:     Doc fixes
Affected #:  2 files

diff -r 688494555cced3ac6952daac4d5ad8376681a97d -r cf5721b973b43c7b1081f9f09682b56eb4b1af3a doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -389,8 +389,9 @@
 
     events3 = EventList.join_events(events1, events2)
 
-**WARNING**: This doesn't check for parameter consistency between the
-two lists!
+.. warning:: This doesn't check for parameter consistency between the
+   two lists! It will simply assume that they are the same and use the
+   `parameters` dictionary from the first `EventList`.
 
 Creating a X-ray observation from an in-memory dataset
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++

diff -r 688494555cced3ac6952daac4d5ad8376681a97d -r cf5721b973b43c7b1081f9f09682b56eb4b1af3a yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -230,7 +230,6 @@
         ...     photons["vx"] = ds.arr([0], "km/s")
         ...     photons["vy"] = ds.arr([0], "km/s")
         ...     photons["vz"] = ds.arr([100.0], "km/s")
-        ...     ds.find_field_value_at_point
         ...     photons["dx"] = ds.find_field_values_at_point("dx", ds.domain_center).in_units("kpc")
         ...     photons["NumberOfPhotons"] = np.array(num_photons*np.ones((1))
         ...     photons["Energy"] = ds.arr(energies, "keV")


https://bitbucket.org/yt_analysis/yt/commits/f4c0d50b4d54/
Changeset:   f4c0d50b4d54
Branch:      yt
User:        jzuhone
Date:        2014-11-13 19:34:56+00:00
Summary:     Helpful info messages
Affected #:  1 file

diff -r cf5721b973b43c7b1081f9f09682b56eb4b1af3a -r f4c0d50b4d541ce62c6272326135f317921e5ca0 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -203,6 +203,9 @@
             active_cells = number_of_photons > 0
             idxs = idxs[active_cells]
 
+            mylog.info("Number of photons generated for this chunk: %d" % int(number_of_photons.sum()))
+            mylog.info("Number of cells with photons: %d" % int(active_cells.sum()))
+
             photons["NumberOfPhotons"].append(number_of_photons[active_cells])
             photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
             photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))


https://bitbucket.org/yt_analysis/yt/commits/cbdb8025d56a/
Changeset:   cbdb8025d56a
Branch:      yt
User:        jzuhone
Date:        2014-11-16 18:58:54+00:00
Summary:     Fixing missed parenthesis.
Affected #:  1 file

diff -r f4c0d50b4d541ce62c6272326135f317921e5ca0 -r cbdb8025d56a03ffd37cf894b2b9c3706f4ab47d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -132,7 +132,7 @@
 in code units.
 
 Some 3D Athena outputs may have large grids (especially parallel datasets subsequently joined with
-the `join_vtk` script, and may benefit from being subdivided into "virtual grids". For this purpose,
+the `join_vtk` script), and may benefit from being subdivided into "virtual grids". For this purpose,
 one can pass in the `nprocs` parameter:
 
 .. code-block:: python


https://bitbucket.org/yt_analysis/yt/commits/6696c68fb3ba/
Changeset:   6696c68fb3ba
Branch:      yt
User:        jzuhone
Date:        2014-11-18 03:48:54+00:00
Summary:     Implement __add__ for joining two event lists together and filter events using ds9 regions
Affected #:  3 files

diff -r a6f70f755a683058274e30417bab75c5ad13aba5 -r 6696c68fb3badaf606fade3a8d3e5dfadd8ce4c1 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -303,11 +303,11 @@
             187.49897546,  187.47307048]) degree, 
      'ysky': YTArray([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
             12.33327653,  12.32955225]) degree, 
-     'ypix': YTArray([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
-            129.17278711,  120.11508562]) (dimensionless), 
+     'ypix': array([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
+            129.17278711,  120.11508562]), 
      'PI': array([ 27,  15,  25, ..., 609, 611, 672]), 
-     'xpix': YTArray([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
-            130.93509652,  192.50639633]) (dimensionless)}
+     'xpix': array([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
+            130.93509652,  192.50639633])}
 
 
 We can bin up the events into an image and save it to a FITS file. The
@@ -377,11 +377,11 @@
    files in subtle ways that we haven't been able to identify. Please email
    jzuhone at gmail.com if you find any bugs!
 
-Two ``EventList`` instances can be joined togther like this:
+Two ``EventList`` instances can be added together:
 
 .. code:: python
 
-    events3 = EventList.join_events(events1, events2)
+    events3 = events1+events2
 
 **WARNING**: This doesn't check for parameter consistency between the
 two lists!

diff -r a6f70f755a683058274e30417bab75c5ad13aba5 -r 6696c68fb3badaf606fade3a8d3e5dfadd8ce4c1 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -138,7 +138,7 @@
 
             cell_em = EM[idxs]*vol_scale
 
-            number_of_photons = np.zeros(num_cells)
+            number_of_photons = np.zeros(num_cells, dtype="uint64")
             energies = np.zeros(self.photons_per_chunk)
 
             start_e = 0

diff -r a6f70f755a683058274e30417bab75c5ad13aba5 -r 6696c68fb3badaf606fade3a8d3e5dfadd8ce4c1 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -199,7 +199,7 @@
         "vy" : the y-velocity of the cell in km/s, YTArray
         "vz" : the z-velocity of the cell in km/s, YTArray
         "dx" : the width of the cell in kpc, YTArray
-        "NumberOfPhotons" : the number of photons in the cell, NumPy array of integers
+        "NumberOfPhotons" : the number of photons in the cell, NumPy array of unsigned 64-bit integers
         "Energy" : the source rest-frame energies of the photons, YTArray
 
         The last array is not the same size as the others because it contains the energies in all of
@@ -230,9 +230,8 @@
         ...     photons["vx"] = ds.arr([0], "km/s")
         ...     photons["vy"] = ds.arr([0], "km/s")
         ...     photons["vz"] = ds.arr([100.0], "km/s")
-        ...     ds.find_field_value_at_point
         ...     photons["dx"] = ds.find_field_values_at_point("dx", ds.domain_center).in_units("kpc")
-        ...     photons["NumberOfPhotons"] = np.array(num_photons*np.ones((1))
+        ...     photons["NumberOfPhotons"] = np.array(num_photons*np.ones(1), dtype="uint64")
         ...     photons["Energy"] = ds.arr(energies, "keV")
         >>>
         >>> redshift = 0.05
@@ -782,6 +781,33 @@
 
     def __repr__(self):
         return self.events.__repr__()
+
+    def __add__(self, other):
+        events = {}
+        for item1, item2 in zip(self.items(), other.items()):
+            k1, v1 = item1
+            k2, v2 = item2
+            events[k1] = uconcatenate([v1,v2])
+        return EventList(events, self.parameters)
+
+    def filter_events(self, region):
+        """                                                                                                                                 
+        Filter events using a ds9 region. Requires the pyregion package.                                                                    
+        Returns a new EventList.                                                                                                            
+        """
+        import pyregion
+        import os
+        if os.path.exists(region):
+            reg = pyregion.open(region)
+        else:
+            reg = pyregion.parse(region)
+        r = reg.as_imagecoord(header=self.wcs.to_header())
+        f = r.get_filter()
+        idxs = f.inside_x_y(self.events["xpix"], self.events["ypix"])
+        new_events = {}
+        for k, v in self.events.items():
+            new_events[k] = v[idxs]
+        return EventList(new_events, self.parameters)
    
     @classmethod
     def from_h5_file(cls, h5file):


https://bitbucket.org/yt_analysis/yt/commits/0c3127bcf242/
Changeset:   0c3127bcf242
Branch:      yt
User:        jzuhone
Date:        2014-11-18 03:49:28+00:00
Summary:     Merging
Affected #:  4 files

diff -r 6696c68fb3badaf606fade3a8d3e5dfadd8ce4c1 -r 0c3127bcf24277286b6786f55bed18cf1332075b doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -46,17 +46,23 @@
 .. code:: python
 
     import yt
+    #yt.enable_parallelism() # If you want to run in parallel this should go here!
     from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
+.. note::
+
+    For parallel runs using ``mpi4py``, the call to ``yt.enable_parallelism`` should go *before*
+    the import of the ``photon_simulator`` module, as shown above.
+
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
     ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk",
-                 parameters={"time_unit":(1.0,"Myr"),
-                            "length_unit":(1.0,"Mpc"),
-                            "mass_unit":(1.0e14,"Msun")}) 
+                 units_override={"time_unit":(1.0,"Myr"),
+                                 "length_unit":(1.0,"Mpc"),
+                                 "mass_unit":(1.0e14,"Msun")})
 
 First, to get a sense of what the resulting image will look like, let's
 make a new yt field called ``"density_squared"``, since the X-ray
@@ -383,8 +389,9 @@
 
     events3 = events1+events2
 
-**WARNING**: This doesn't check for parameter consistency between the
-two lists!
+.. warning:: This doesn't check for parameter consistency between the
+   two lists! It will simply assume that they are the same and use the
+   `parameters` dictionary from the first `EventList`.
 
 Creating a X-ray observation from an in-memory dataset
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -443,11 +450,11 @@
 .. code:: python
 
    data = {}
-   data["density"] = dens
-   data["temperature"] = temp
-   data["velocity_x"] = np.zeros(ddims)
-   data["velocity_y"] = np.zeros(ddims)
-   data["velocity_z"] = np.zeros(ddims)
+   data["density"] = (dens, "g/cm**3")
+   data["temperature"] = (temp, "K")
+   data["velocity_x"] = (np.zeros(ddims), "cm/s")
+   data["velocity_y"] = (np.zeros(ddims), "cm/s")
+   data["velocity_z"] = (np.zeros(ddims), "cm/s")
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
@@ -460,7 +467,7 @@
 
 .. code:: python
 
-   sphere = ds.sphere(ds.domain_center, (1.0,"Mpc"))
+   sphere = ds.sphere("c", (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r 6696c68fb3badaf606fade3a8d3e5dfadd8ce4c1 -r 0c3127bcf24277286b6786f55bed18cf1332075b doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -132,7 +132,7 @@
 in code units.
 
 Some 3D Athena outputs may have large grids (especially parallel datasets subsequently joined with
-the `join_vtk` script, and may benefit from being subdivided into "virtual grids". For this purpose,
+the `join_vtk` script), and may benefit from being subdivided into "virtual grids". For this purpose,
 one can pass in the `nprocs` parameter:
 
 .. code-block:: python

diff -r 6696c68fb3badaf606fade3a8d3e5dfadd8ce4c1 -r 0c3127bcf24277286b6786f55bed18cf1332075b yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -203,6 +203,9 @@
             active_cells = number_of_photons > 0
             idxs = idxs[active_cells]
 
+            mylog.info("Number of photons generated for this chunk: %d" % int(number_of_photons.sum()))
+            mylog.info("Number of cells with photons: %d" % int(active_cells.sum()))
+
             photons["NumberOfPhotons"].append(number_of_photons[active_cells])
             photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
             photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))

diff -r 6696c68fb3badaf606fade3a8d3e5dfadd8ce4c1 -r 0c3127bcf24277286b6786f55bed18cf1332075b yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1191,7 +1191,7 @@
 
         if energy_bins:
             spectype = "energy"
-            espec = self.events["eobs"].ndarray_view()
+            espec = self.events["eobs"].d
             range = (emin, emax)
             spec, ee = np.histogram(espec, bins=nchan, range=range)
             bins = 0.5*(ee[1:]+ee[:-1])


https://bitbucket.org/yt_analysis/yt/commits/94cf5b29476b/
Changeset:   94cf5b29476b
Branch:      yt
User:        jzuhone
Date:        2014-11-18 03:54:27+00:00
Summary:     Missed this
Affected #:  1 file

diff -r 0c3127bcf24277286b6786f55bed18cf1332075b -r 94cf5b29476b9eb8c99ffbb478a1993db62e015a yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -28,7 +28,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      parallel_capable
-from yt.units.yt_array import YTQuantity, YTArray
+from yt.units.yt_array import YTQuantity, YTArray, uconcatenate
 import h5py
 from yt.utilities.on_demand_imports import _astropy
 pyfits = _astropy.pyfits


https://bitbucket.org/yt_analysis/yt/commits/f886c24e3062/
Changeset:   f886c24e3062
Branch:      yt
User:        jzuhone
Date:        2014-11-18 04:09:28+00:00
Summary:     Check two EventLists for parameter consistency when adding them.
Affected #:  2 files

diff -r 94cf5b29476b9eb8c99ffbb478a1993db62e015a -r f886c24e30621d4d7404085f47615643c81fd6fb doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -383,15 +383,15 @@
    files in subtle ways that we haven't been able to identify. Please email
    jzuhone at gmail.com if you find any bugs!
 
-Two ``EventList`` instances can be added together:
+Two ``EventList`` instances can be added together, which is useful if they were
+created using different data sources:
 
 .. code:: python
 
     events3 = events1+events2
 
-.. warning:: This doesn't check for parameter consistency between the
-   two lists! It will simply assume that they are the same and use the
-   `parameters` dictionary from the first `EventList`.
+.. warning:: This only works if the two event lists were generated using
+    the same parameters!
 
 Creating a X-ray observation from an in-memory dataset
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++

diff -r 94cf5b29476b9eb8c99ffbb478a1993db62e015a -r f886c24e30621d4d7404085f47615643c81fd6fb yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -80,7 +80,10 @@
                     for i in xrange(self.num_cells)]
         else:
             return self.photons[key]
-    
+
+    def __repr__(self):
+        return self.photons.__repr__()
+
     @classmethod
     def from_file(cls, filename):
         r"""
@@ -783,6 +786,18 @@
         return self.events.__repr__()
 
     def __add__(self, other):
+        keys1 = self.parameters.keys()
+        keys2 = other.parameters.keys()
+        keys1.sort()
+        keys2.sort()
+        if keys1 != keys2:
+            raise RuntimeError("The two EventLists do not have the same parameters!")
+        for k1, k2 in zip(keys1, keys2):
+            v1 = self.parameters[k1]
+            v2 = other.parameters[k2]
+            if v1 != v2:
+                raise RuntimeError("The values for the parameter %s in the two EventLists" % k1 +
+                                   " are not identical (%s vs. %s)!" % (v1, v2))
         events = {}
         for item1, item2 in zip(self.items(), other.items()):
             k1, v1 = item1


https://bitbucket.org/yt_analysis/yt/commits/f1c2b5bb5330/
Changeset:   f1c2b5bb5330
Branch:      yt
User:        jzuhone
Date:        2014-11-18 04:19:32+00:00
Summary:     Making this as general as possible
Affected #:  1 file

diff -r f886c24e30621d4d7404085f47615643c81fd6fb -r f1c2b5bb53308f425096cedcaa69464042bb3acc yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -795,7 +795,7 @@
         for k1, k2 in zip(keys1, keys2):
             v1 = self.parameters[k1]
             v2 = other.parameters[k2]
-            if v1 != v2:
+            if np.all(v1 != v2):
                 raise RuntimeError("The values for the parameter %s in the two EventLists" % k1 +
                                    " are not identical (%s vs. %s)!" % (v1, v2))
         events = {}
@@ -907,19 +907,6 @@
         
         return cls(events, parameters)
 
-    @classmethod
-    def join_events(cls, events1, events2):
-        """
-        Join two sets of events, *events1* and *events2*.
-        """
-        events = {}
-        for item1, item2 in zip(events1.items(), events2.items()):
-            k1, v1 = item1
-            k2, v2 = item2
-            events[k1] = np.concatenate([v1,v2])
-        
-        return cls(events, events1.parameters)
-                
     @parallel_root_only
     def write_fits_file(self, fitsfile, clobber=False):
         """


https://bitbucket.org/yt_analysis/yt/commits/ab20a1503b2b/
Changeset:   ab20a1503b2b
Branch:      yt
User:        jzuhone
Date:        2014-11-18 04:21:04+00:00
Summary:     Minor nit
Affected #:  1 file

diff -r f1c2b5bb53308f425096cedcaa69464042bb3acc -r ab20a1503b2b615a73ee467af8b9f525e53dcebc yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -796,7 +796,7 @@
             v1 = self.parameters[k1]
             v2 = other.parameters[k2]
             if np.all(v1 != v2):
-                raise RuntimeError("The values for the parameter %s in the two EventLists" % k1 +
+                raise RuntimeError("The values for the parameter '%s' in the two EventLists" % k1 +
                                    " are not identical (%s vs. %s)!" % (v1, v2))
         events = {}
         for item1, item2 in zip(self.items(), other.items()):


https://bitbucket.org/yt_analysis/yt/commits/5212a80995e4/
Changeset:   5212a80995e4
Branch:      yt
User:        jzuhone
Date:        2014-11-18 04:24:13+00:00
Summary:     Another minor nit
Affected #:  1 file

diff -r ab20a1503b2b615a73ee467af8b9f525e53dcebc -r 5212a80995e47ab9ab9b1c0cdaff81bc7db82da4 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -752,7 +752,7 @@
     
 class EventList(object) :
 
-    def __init__(self, events, parameters) :
+    def __init__(self, events, parameters):
 
         self.events = events
         self.parameters = parameters


https://bitbucket.org/yt_analysis/yt/commits/839b7b07b42a/
Changeset:   839b7b07b42a
Branch:      yt
User:        jzuhone
Date:        2014-11-18 04:34:44+00:00
Summary:     making this a bit more lenient on float comparisons (but not much)
Affected #:  1 file

diff -r 5212a80995e47ab9ab9b1c0cdaff81bc7db82da4 -r 839b7b07b42a21264a13a7e4e2f453921aca076d yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -795,7 +795,11 @@
         for k1, k2 in zip(keys1, keys2):
             v1 = self.parameters[k1]
             v2 = other.parameters[k2]
-            if np.all(v1 != v2):
+            if isinstance(v1, basestring) or isinstance(v2, basestring):
+                check_equal = v1 == v2
+            else:
+                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
+            if not check_equal:
                 raise RuntimeError("The values for the parameter '%s' in the two EventLists" % k1 +
                                    " are not identical (%s vs. %s)!" % (v1, v2))
         events = {}


https://bitbucket.org/yt_analysis/yt/commits/316a99bb2473/
Changeset:   316a99bb2473
Branch:      yt
User:        jzuhone
Date:        2014-11-18 04:42:24+00:00
Summary:     Documenting the ds9 region filtering.
Affected #:  1 file

diff -r 839b7b07b42a21264a13a7e4e2f453921aca076d -r 316a99bb2473e2cf50a3cd5e5f6a40ac2e4dd90c doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -393,6 +393,14 @@
 .. warning:: This only works if the two event lists were generated using
     the same parameters!
 
+Finally, a new ``EventList`` can be created from a subset of an existing ``EventList``,
+defined by a ds9 region (this functionality requires the
+`pyregion <http://pyregion.readthedocs.org>`_ package to be installed):
+
+.. code:: python
+
+    circle_events = events.filter_events("circle.reg")
+
 Creating a X-ray observation from an in-memory dataset
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 


https://bitbucket.org/yt_analysis/yt/commits/b96cc6686511/
Changeset:   b96cc6686511
Branch:      yt
User:        jzuhone
Date:        2014-11-18 04:47:54+00:00
Summary:     Catch a situation when the region contains no events.
Affected #:  1 file

diff -r 316a99bb2473e2cf50a3cd5e5f6a40ac2e4dd90c -r b96cc6686511c62f299b8838160bd629ea723fce yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -823,6 +823,8 @@
         r = reg.as_imagecoord(header=self.wcs.to_header())
         f = r.get_filter()
         idxs = f.inside_x_y(self.events["xpix"], self.events["ypix"])
+        if idxs.sum() == 0:
+            raise RuntimeError("No events are inside this region!")
         new_events = {}
         for k, v in self.events.items():
             new_events[k] = v[idxs]


https://bitbucket.org/yt_analysis/yt/commits/97ff7e53d570/
Changeset:   97ff7e53d570
Branch:      yt
User:        jzuhone
Date:        2014-11-18 16:35:16+00:00
Summary:     Only apply velocity shifting and broadening if no_shifting is False (the default).
Affected #:  1 file

diff -r b96cc6686511c62f299b8838160bd629ea723fce -r 97ff7e53d570c0a9bfd9286c06fce836168ad397 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -26,8 +26,11 @@
 import ppv_utils
 from yt.funcs import is_root
 
-def create_vlos(normal):
-    if isinstance(normal, basestring):
+def create_vlos(normal, no_shifting):
+    if no_shifting:
+        def _v_los(field, data):
+            return data.ds.arr(data["zeros"], "cm/s")
+    elif isinstance(normal, basestring):
         def _v_los(field, data): 
             return -data["velocity_%s" % normal]
     else:
@@ -48,7 +51,7 @@
 class PPVCube(object):
     def __init__(self, ds, normal, field, center="c", width=(1.0,"unitary"),
                  dims=(100,100,100), velocity_bounds=None, thermal_broad=False,
-                 atomic_weight=56., method="integrate"):
+                 atomic_weight=56., method="integrate", no_shifting=False):
         r""" Initialize a PPVCube object.
 
         Parameters
@@ -88,6 +91,9 @@
             Set the projection method to be used.
             "integrate" : line of sight integration over the line element.
             "sum" : straight summation over the line of sight.
+        no_shifting : boolean, optional
+            If set, no shifting due to velocity will occur but only thermal broadening.
+            Should not be set when *thermal_broad* is False, otherwise nothing happens!
 
         Examples
         --------
@@ -102,6 +108,10 @@
         self.width = width
         self.particle_mass = atomic_weight*mh
         self.thermal_broad = thermal_broad
+        self.no_shifting = no_shifting
+
+        if no_shifting and not thermal_broad:
+            raise RuntimeError("no_shifting cannot be True when thermal_broad is False!")
 
         self.center = ds.coordinates.sanitize_center(center, normal)[0]
 
@@ -135,7 +145,7 @@
 
         self.current_v = 0.0
 
-        _vlos = create_vlos(normal)
+        _vlos = create_vlos(normal, self.no_shifting)
         self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s")
 
         _intensity = self.create_intensity()


https://bitbucket.org/yt_analysis/yt/commits/d17b7b5036dc/
Changeset:   d17b7b5036dc
Branch:      yt
User:        jzuhone
Date:        2014-11-18 19:42:17+00:00
Summary:     Bugfix
Affected #:  1 file

diff -r 97ff7e53d570c0a9bfd9286c06fce836168ad397 -r d17b7b5036dc57e7c98fd26f4c23eb124dc7ac6a yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -297,7 +297,7 @@
         w.wcs.cunit = [units,units,vunit]
         w.wcs.ctype = [types[0],types[1],vtype]
 
-        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
+        fib = FITSImageBuffer(self.data.transpose(2,0,1), fields=self.field, wcs=w)
         fib[0].header["bunit"] = re.sub('()', '', str(self.proj_units))
         fib[0].header["btype"] = self.field
 


https://bitbucket.org/yt_analysis/yt/commits/863eeb36693d/
Changeset:   863eeb36693d
Branch:      yt
User:        ngoldbaum
Date:        2014-11-21 02:00:36+00:00
Summary:     Merged in jzuhone/yt (pull request #1304)

Improving memory management for photon_simulator, and virtual grids for Athena datasets.
Affected #:  14 files

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f doc/source/analyzing/analysis_modules/_images/dsquared.png
Binary file doc/source/analyzing/analysis_modules/_images/dsquared.png has changed

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -46,17 +46,23 @@
 .. code:: python
 
     import yt
+    #yt.enable_parallelism() # If you want to run in parallel this should go here!
     from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
+.. note::
+
+    For parallel runs using ``mpi4py``, the call to ``yt.enable_parallelism`` should go *before*
+    the import of the ``photon_simulator`` module, as shown above.
+
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
     ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk",
-                 parameters={"time_unit":(1.0,"Myr"),
-                            "length_unit":(1.0,"Mpc"),
-                            "mass_unit":(1.0e14,"Msun")}) 
+                 units_override={"time_unit":(1.0,"Myr"),
+                                 "length_unit":(1.0,"Mpc"),
+                                 "mass_unit":(1.0e14,"Msun")})
 
 First, to get a sense of what the resulting image will look like, let's
 make a new yt field called ``"density_squared"``, since the X-ray
@@ -67,7 +73,7 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    add_field("density_squared", function=_density_squared)
+    ds.add_field("density_squared", function=_density_squared, units="g**2/cm**6")
 
 Then we'll project this field along the z-axis.
 
@@ -141,7 +147,8 @@
 
 .. code:: python
 
-    thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3)
+    thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
+                                       photons_per_chunk=100000000)
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -150,6 +157,14 @@
 assume that is the name of the metallicity field (which may be spatially
 varying).
 
+The ``ThermalPhotonModel`` iterates over "chunks" of the supplied data source
+to generate the photons, to reduce memory usage and make parallelization more
+efficient. For each chunk, memory is set aside for the photon energies that will
+be generated. ``photons_per_chunk`` is an optional keyword argument which controls
+the size of this array. For large numbers of photons, you may find that
+this parameter needs to be set higher, or if you are looking to decrease memory
+usage, you might set this parameter lower.
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more
@@ -294,11 +309,11 @@
             187.49897546,  187.47307048]) degree, 
      'ysky': YTArray([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
             12.33327653,  12.32955225]) degree, 
-     'ypix': YTArray([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
-            129.17278711,  120.11508562]) (dimensionless), 
+     'ypix': array([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
+            129.17278711,  120.11508562]), 
      'PI': array([ 27,  15,  25, ..., 609, 611, 672]), 
-     'xpix': YTArray([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
-            130.93509652,  192.50639633]) (dimensionless)}
+     'xpix': array([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
+            130.93509652,  192.50639633])}
 
 
 We can bin up the events into an image and save it to a FITS file. The
@@ -368,14 +383,23 @@
    files in subtle ways that we haven't been able to identify. Please email
    jzuhone at gmail.com if you find any bugs!
 
-Two ``EventList`` instances can be joined togther like this:
+Two ``EventList`` instances can be added together, which is useful if they were
+created using different data sources:
 
 .. code:: python
 
-    events3 = EventList.join_events(events1, events2)
+    events3 = events1+events2
 
-**WARNING**: This doesn't check for parameter consistency between the
-two lists!
+.. warning:: This only works if the two event lists were generated using
+    the same parameters!
+
+Finally, a new ``EventList`` can be created from a subset of an existing ``EventList``,
+defined by a ds9 region (this functionality requires the
+`pyregion <http://pyregion.readthedocs.org>`_ package to be installed):
+
+.. code:: python
+
+    circle_events = events.filter_events("circle.reg")
 
 Creating a X-ray observation from an in-memory dataset
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -434,11 +458,11 @@
 .. code:: python
 
    data = {}
-   data["density"] = dens
-   data["temperature"] = temp
-   data["velocity_x"] = np.zeros(ddims)
-   data["velocity_y"] = np.zeros(ddims)
-   data["velocity_z"] = np.zeros(ddims)
+   data["density"] = (dens, "g/cm**3")
+   data["temperature"] = (temp, "K")
+   data["velocity_x"] = (np.zeros(ddims), "cm/s")
+   data["velocity_y"] = (np.zeros(ddims), "cm/s")
+   data["velocity_z"] = (np.zeros(ddims), "cm/s")
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
@@ -451,7 +475,7 @@
 
 .. code:: python
 
-   sphere = ds.sphere(ds.domain_center, (1.0,"Mpc"))
+   sphere = ds.sphere("c", (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -131,6 +131,22 @@
 ``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
 in code units.
 
+Some 3D Athena outputs may have large grids (especially parallel datasets subsequently joined with
+the `join_vtk` script), and may benefit from being subdivided into "virtual grids". For this purpose,
+one can pass in the `nprocs` parameter:
+
+.. code-block:: python
+
+   import yt
+
+   ds = yt.load("sloshing.0000.vtk", nprocs=8)
+
+which will subdivide each original grid into `nprocs` grids.
+
+.. note::
+
+    Virtual grids are only supported (and really only necessary) for 3D data.
+
 Alternative values for the following simulation parameters may be specified using a ``parameters``
 dict, accepting the following keys:
 

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -26,12 +26,12 @@
 from yt.funcs import *
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system
-from yt import units
+     communication_system, parallel_objects
+from yt.units.yt_array import uconcatenate
 
-N_TBIN = 10000
-TMIN = 8.08e-2
-TMAX = 50.
+n_kT = 10000
+kT_min = 8.08e-2
+kT_max = 50.
 
 comm = communication_system.communicators[-1]
 
@@ -59,11 +59,15 @@
     Zmet : float or string, optional
         The metallicity. If a float, assumes a constant metallicity throughout.
         If a string, is taken to be the name of the metallicity field.
+    photons_per_chunk : integer
+        The maximum number of photons that are allocated per chunk. Increase or decrease
+        as needed.
     """
-    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, photons_per_chunk=10000000):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
+        self.photons_per_chunk = photons_per_chunk
 
     def __call__(self, data_source, parameters):
         
@@ -74,130 +78,160 @@
         redshift = parameters["FiducialRedshift"]
         D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
         dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**3)
-                
+        src_ctr = parameters["center"]
+
         vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
 
-        num_cells = data_source["temperature"].shape[0]
-        start_c = comm.rank*num_cells/comm.size
-        end_c = (comm.rank+1)*num_cells/comm.size
-        
-        kT = (kboltz*data_source["temperature"][start_c:end_c]).in_units("keV").to_ndarray()
-        vol = data_source["cell_volume"][start_c:end_c].in_cgs().to_ndarray()
-        EM = (data_source["density"][start_c:end_c]/mp).to_ndarray()**2
-        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+        my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
 
-        data_source.clear_data()
-    
-        x = data_source["x"][start_c:end_c].copy()
-        y = data_source["y"][start_c:end_c].copy()
-        z = data_source["z"][start_c:end_c].copy()
-        dx = data_source["dx"][start_c:end_c].copy()
-
-        data_source.clear_data()
-        
-        vx = data_source["velocity_x"][start_c:end_c].copy()
-        vy = data_source["velocity_y"][start_c:end_c].copy()
-        vz = data_source["velocity_z"][start_c:end_c].copy()
-    
-        if isinstance(self.Zmet, basestring):
-            metalZ = data_source[self.Zmet][start_c:end_c].to_ndarray()
-        else:
-            metalZ = self.Zmet*np.ones(EM.shape)
-        
-        data_source.clear_data()
-
-        idxs = np.argsort(kT)
-        dshape = idxs.shape
-
-        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
-        dkT = kT_bins[1]-kT_bins[0]
-        kT_idxs = np.digitize(kT[idxs], kT_bins)
-        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
-        bcounts = np.bincount(kT_idxs).astype("int")
-        bcounts = bcounts[bcounts > 0]
-        n = int(0)
-        bcell = []
-        ecell = []
-        for bcount in bcounts:
-            bcell.append(n)
-            ecell.append(n+bcount)
-            n += bcount
-        kT_idxs = np.unique(kT_idxs)
-        
         self.spectral_model.prepare()
         energy = self.spectral_model.ebins
-    
-        cell_em = EM[idxs]*vol_scale
-    
-        number_of_photons = np.zeros(dshape, dtype='uint64')
-        energies = []
-    
-        u = np.random.random(cell_em.shape)
-        
-        pbar = get_pbar("Generating Photons", dshape[0])
 
-        for i, ikT in enumerate(kT_idxs):
+        citer = data_source.chunks(["kT","cell_volume","density",
+                                    "x","y","z","dx","velocity_x",
+                                    "velocity_y","velocity_z"], "io")
 
-            ibegin = bcell[i]
-            iend = ecell[i]
-            kT = kT_bins[ikT] + 0.5*dkT
-        
-            em_sum_c = cell_em[ibegin:iend].sum()
-            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+        photons = {}
+        photons["x"] = []
+        photons["y"] = []
+        photons["z"] = []
+        photons["vx"] = []
+        photons["vy"] = []
+        photons["vz"] = []
+        photons["dx"] = []
+        photons["Energy"] = []
+        photons["NumberOfPhotons"] = []
 
-            cspec, mspec = self.spectral_model.get_spectrum(kT)
-            cspec *= dist_fac*em_sum_c/vol_scale
-            mspec *= dist_fac*em_sum_m/vol_scale
+        spectral_norm = area.v*exp_time.v*dist_fac/vol_scale
 
-            cumspec_c = np.cumsum(cspec.ndarray_view())
-            counts_c = cumspec_c[:]/cumspec_c[-1]
-            counts_c = np.insert(counts_c, 0, 0.0)
-            tot_ph_c = cumspec_c[-1]*area.value*exp_time.value
+        for chunk in parallel_objects(citer):
 
-            cumspec_m = np.cumsum(mspec.ndarray_view())
-            counts_m = cumspec_m[:]/cumspec_m[-1]
-            counts_m = np.insert(counts_m, 0, 0.0)
-            tot_ph_m = cumspec_m[-1]*area.value*exp_time.value
+            kT = chunk["kT"].v
+            num_cells = len(kT)
+            if num_cells == 0:
+                continue
+            vol = chunk["cell_volume"].in_cgs().v
+            EM = (chunk["density"]/mp).v**2
+            EM *= 0.5*(1.+self.X_H)*self.X_H*vol
 
-            for icell in xrange(ibegin, iend):
+            if isinstance(self.Zmet, basestring):
+                metalZ = chunk[self.Zmet].v
+            else:
+                metalZ = self.Zmet
+
+            idxs = np.argsort(kT)
+
+            kT_bins = np.linspace(kT_min, max(my_kT_max, kT_max), num=n_kT+1)
+            dkT = kT_bins[1]-kT_bins[0]
+            kT_idxs = np.digitize(kT[idxs], kT_bins)
+            kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1
+            bcounts = np.bincount(kT_idxs).astype("int")
+            bcounts = bcounts[bcounts > 0]
+            n = int(0)
+            bcell = []
+            ecell = []
+            for bcount in bcounts:
+                bcell.append(n)
+                ecell.append(n+bcount)
+                n += bcount
+            kT_idxs = np.unique(kT_idxs)
+
+            cell_em = EM[idxs]*vol_scale
+
+            number_of_photons = np.zeros(num_cells, dtype="uint64")
+            energies = np.zeros(self.photons_per_chunk)
+
+            start_e = 0
+            end_e = 0
+
+            pbar = get_pbar("Generating photons for chunk ", num_cells)
+
+            for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
+
+                kT = kT_bins[ikT] + 0.5*dkT
+
+                n_current = iend-ibegin
+
+                cem = cell_em[ibegin:iend]
+
+                em_sum_c = cem.sum()
+                if isinstance(self.Zmet, basestring):
+                    em_sum_m = (metalZ[ibegin:iend]*cem).sum()
+                else:
+                    em_sum_m = metalZ*em_sum_c
+
+                cspec, mspec = self.spectral_model.get_spectrum(kT)
+
+                cumspec_c = np.cumsum(cspec.d)
+                tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
+                cumspec_c /= cumspec_c[-1]
+                cumspec_c = np.insert(cumspec_c, 0, 0.0)
+
+                cumspec_m = np.cumsum(mspec.d)
+                tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
+                cumspec_m /= cumspec_m[-1]
+                cumspec_m = np.insert(cumspec_m, 0, 0.0)
+
+                u = np.random.random(size=n_current)
+
+                cell_norm_c = tot_ph_c*cem/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u)
             
-                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+                if isinstance(self.Zmet, basestring):
+                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem/em_sum_m
+                else:
+                    cell_norm_m = tot_ph_m*metalZ*cem/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u)
             
-                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+                number_of_photons[ibegin:iend] = cell_n_c + cell_n_m
+
+                end_e += int((cell_n_c+cell_n_m).sum())
+
+                if end_e > self.photons_per_chunk:
+                    raise RuntimeError("Number of photons generated for this chunk "+
+                                       "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
+                                       "Increase photons_per_chunk!")
+
+                energies[start_e:end_e] = _generate_energies(cell_n_c, cell_n_m, cumspec_c, cumspec_m, energy)
             
-                cell_n = cell_n_c + cell_n_m
+                start_e = end_e
 
-                if cell_n > 0:
-                    number_of_photons[icell] = cell_n
-                    randvec_c = np.random.uniform(size=cell_n_c)
-                    randvec_c.sort()
-                    randvec_m = np.random.uniform(size=cell_n_m)
-                    randvec_m.sort()
-                    cell_e_c = np.interp(randvec_c, counts_c, energy)
-                    cell_e_m = np.interp(randvec_m, counts_m, energy)
-                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
-                
-                pbar.update(icell)
+                pbar.update(iend)
 
-        pbar.finish()
-            
-        active_cells = number_of_photons > 0
-        idxs = idxs[active_cells]
-        
-        photons = {}
+            pbar.finish()
 
-        src_ctr = parameters["center"]
-        
-        photons["x"] = (x[idxs]-src_ctr[0]).in_units("kpc")
-        photons["y"] = (y[idxs]-src_ctr[1]).in_units("kpc")
-        photons["z"] = (z[idxs]-src_ctr[2]).in_units("kpc")
-        photons["vx"] = vx[idxs].in_units("km/s")
-        photons["vy"] = vy[idxs].in_units("km/s")
-        photons["vz"] = vz[idxs].in_units("km/s")
-        photons["dx"] = dx[idxs].in_units("kpc")
-        photons["NumberOfPhotons"] = number_of_photons[active_cells]
-        photons["Energy"] = np.concatenate(energies)*units.keV
-    
+            active_cells = number_of_photons > 0
+            idxs = idxs[active_cells]
+
+            mylog.info("Number of photons generated for this chunk: %d" % int(number_of_photons.sum()))
+            mylog.info("Number of cells with photons: %d" % int(active_cells.sum()))
+
+            photons["NumberOfPhotons"].append(number_of_photons[active_cells])
+            photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
+            photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
+            photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc"))
+            photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc"))
+            photons["vx"].append(chunk["velocity_x"][idxs].in_units("km/s"))
+            photons["vy"].append(chunk["velocity_y"][idxs].in_units("km/s"))
+            photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
+            photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
+
+        for key in photons:
+            photons[key] = uconcatenate(photons[key])
+
         return photons
+
+def _generate_energies(cell_n_c, cell_n_m, counts_c, counts_m, energy):
+    energies = np.array([])
+    for cn_c, cn_m in zip(cell_n_c, cell_n_m):
+        if cn_c > 0:
+            randvec_c = np.random.uniform(size=cn_c)
+            randvec_c.sort()
+            cell_e_c = np.interp(randvec_c, counts_c, energy)
+            energies = np.append(energies, cell_e_c)
+        if cn_m > 0: 
+            randvec_m = np.random.uniform(size=cn_m)
+            randvec_m.sort()
+            cell_e_m = np.interp(randvec_m, counts_m, energy)
+            energies = np.append(energies, cell_e_m)
+    return energies

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -28,8 +28,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      parallel_capable
-from yt import units
-from yt.units.yt_array import YTQuantity
+from yt.units.yt_array import YTQuantity, YTArray, uconcatenate
 import h5py
 from yt.utilities.on_demand_imports import _astropy
 pyfits = _astropy.pyfits
@@ -81,7 +80,10 @@
                     for i in xrange(self.num_cells)]
         else:
             return self.photons[key]
-    
+
+    def __repr__(self):
+        return self.photons.__repr__()
+
     @classmethod
     def from_file(cls, filename):
         r"""
@@ -93,12 +95,12 @@
         
         f = h5py.File(filename, "r")
 
-        parameters["FiducialExposureTime"] = f["/fid_exp_time"].value*units.s
-        parameters["FiducialArea"] = f["/fid_area"].value*units.cm*units.cm
+        parameters["FiducialExposureTime"] = YTQuantity(f["/fid_exp_time"].value, "s")
+        parameters["FiducialArea"] = YTQuantity(f["/fid_area"].value, "cm**2")
         parameters["FiducialRedshift"] = f["/fid_redshift"].value
-        parameters["FiducialAngularDiameterDistance"] = f["/fid_d_a"].value*units.Mpc
+        parameters["FiducialAngularDiameterDistance"] = YTQuantity(f["/fid_d_a"].value, "Mpc")
         parameters["Dimension"] = f["/dimension"].value
-        parameters["Width"] = f["/width"].value*units.kpc
+        parameters["Width"] = YTQuantity(f["/width"].value, "kpc")
         parameters["HubbleConstant"] = f["/hubble"].value
         parameters["OmegaMatter"] = f["/omega_matter"].value
         parameters["OmegaLambda"] = f["/omega_lambda"].value
@@ -107,13 +109,13 @@
         start_c = comm.rank*num_cells/comm.size
         end_c = (comm.rank+1)*num_cells/comm.size
         
-        photons["x"] = f["/x"][start_c:end_c]*units.kpc
-        photons["y"] = f["/y"][start_c:end_c]*units.kpc
-        photons["z"] = f["/z"][start_c:end_c]*units.kpc
-        photons["dx"] = f["/dx"][start_c:end_c]*units.kpc
-        photons["vx"] = f["/vx"][start_c:end_c]*units.km/units.s
-        photons["vy"] = f["/vy"][start_c:end_c]*units.km/units.s
-        photons["vz"] = f["/vz"][start_c:end_c]*units.km/units.s
+        photons["x"] = YTArray(f["/x"][start_c:end_c], "kpc")
+        photons["y"] = YTArray(f["/y"][start_c:end_c], "kpc")
+        photons["z"] = YTArray(f["/z"][start_c:end_c], "kpc")
+        photons["dx"] = YTArray(f["/dx"][start_c:end_c], "kpc")
+        photons["vx"] = YTArray(f["/vx"][start_c:end_c], "km/s")
+        photons["vy"] = YTArray(f["/vy"][start_c:end_c], "km/s")
+        photons["vz"] = YTArray(f["/vz"][start_c:end_c], "km/s")
 
         n_ph = f["/num_photons"][:]
         
@@ -128,7 +130,7 @@
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
         
-        photons["Energy"] = f["/energy"][start_e:end_e]*units.keV
+        photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
         
         f.close()
 
@@ -193,15 +195,15 @@
         determine them, the *photons* dict needs to have the following items, corresponding
         to cells which have photons:
 
-        "x" : the x-position of the cell relative to the source center in kpc, NumPy array of floats
-        "y" : the y-position of the cell relative to the source center in kpc, NumPy array of floats
-        "z" : the z-position of the cell relative to the source center in kpc, NumPy array of floats
-        "vx" : the x-velocity of the cell in km/s, NumPy array of floats
-        "vy" : the y-velocity of the cell in km/s, NumPy array of floats
-        "vz" : the z-velocity of the cell in km/s, NumPy array of floats
-        "dx" : the width of the cell in kpc, NumPy array of floats
-        "NumberOfPhotons" : the number of photons in the cell, NumPy array of integers
-        "Energy" : the source rest-frame energies of the photons, NumPy array of floats
+        "x" : the x-position of the cell relative to the source center in kpc, YTArray
+        "y" : the y-position of the cell relative to the source center in kpc, YTArray
+        "z" : the z-position of the cell relative to the source center in kpc, YTArray
+        "vx" : the x-velocity of the cell in km/s, YTArray
+        "vy" : the y-velocity of the cell in km/s, YTArray
+        "vz" : the z-velocity of the cell in km/s, YTArray
+        "dx" : the width of the cell in kpc, YTArray
+        "NumberOfPhotons" : the number of photons in the cell, NumPy array of unsigned 64-bit integers
+        "Energy" : the source rest-frame energies of the photons, YTArray
 
         The last array is not the same size as the others because it contains the energies in all of
         the cells in a single 1-D array. The first photons["NumberOfPhotons"][0] elements are
@@ -211,7 +213,9 @@
         spectrum of photons is created. More complicated examples which actually
         create photons based on the fields in the dataset could be created. 
 
-        >>> from scipy.stats import powerlaw
+        >>> import numpy as np
+        >>> import yt
+        >>> from yt.analysis_modules.photon_simulator import *
         >>> def line_func(source, parameters):
         ...
         ...     ds = source.ds
@@ -219,18 +223,19 @@
         ...     num_photons = parameters["num_photons"]
         ...     E0  = parameters["line_energy"] # Energies are in keV
         ...     sigE = parameters["line_sigma"] 
+        ...     src_ctr = parameters["center"]
         ...
         ...     energies = norm.rvs(loc=E0, scale=sigE, size=num_photons)
-        ...     
-        ...     photons["x"] = np.zeros((1)) # Place everything in the center cell
-        ...     photons["y"] = np.zeros((1))
-        ...     photons["z"] = np.zeros((1))
-        ...     photons["vx"] = np.zeros((1))
-        ...     photons["vy"] = np.zeros((1))
-        ...     photons["vz"] = 100.*np.ones((1))
-        ...     photons["dx"] = source["dx"][0]*ds.units["kpc"]*np.ones((1)) 
-        ...     photons["NumberOfPhotons"] = num_photons*np.ones((1))
-        ...     photons["Energy"] = np.array(energies)
+        ...
+        ...     # Place everything in the center cell
+        ...     for i, ax in enumerate("xyz"):
+        ...         photons[ax] = (ds.domain_center[0]-src_ctr[0]).in_units("kpc")
+        ...     photons["vx"] = ds.arr([0], "km/s")
+        ...     photons["vy"] = ds.arr([0], "km/s")
+        ...     photons["vz"] = ds.arr([100.0], "km/s")
+        ...     photons["dx"] = ds.find_field_values_at_point("dx", ds.domain_center).in_units("kpc")
+        ...     photons["NumberOfPhotons"] = np.array(num_photons*np.ones(1), dtype="uint64")
+        ...     photons["Energy"] = ds.arr(energies, "keV")
         >>>
         >>> redshift = 0.05
         >>> area = 6000.0
@@ -238,11 +243,12 @@
         >>> parameters = {"num_photons" : 10000, "line_energy" : 5.0,
         ...               "line_sigma" : 0.1}
         >>> ddims = (128,128,128)
-        >>> random_data = {"Density":np.random.random(ddims)}
-        >>> ds = load_uniform_grid(random_data, ddims)
+        >>> random_data = {"density":(np.random.random(ddims),"g/cm**3")}
+        >>> ds = yt.load_uniform_grid(random_data, ddims)
         >>> dd = ds.all_data
         >>> my_photons = PhotonList.from_user_model(dd, redshift, area,
-        ...                                         time, line_func)
+        ...                                         time, line_func,
+        ...                                         parameters=parameters)
 
         """
 
@@ -296,17 +302,19 @@
         dimension = 0
         width = 0.0
         for i, ax in enumerate("xyz"):
-            pos = data_source[ax]
-            delta = data_source["d%s"%(ax)]
-            le = np.min(pos-0.5*delta)
-            re = np.max(pos+0.5*delta)
+            le, re = data_source.quantities.extrema(ax)
+            delta_min, delta_max = data_source.quantities.extrema("d%s"%ax)
+            le -= 0.5*delta_max
+            re += 0.5*delta_max
             width = max(width, re-parameters["center"][i], parameters["center"][i]-le)
-            dimension = max(dimension, int(width/delta.min()))
+            dimension = max(dimension, int(width/delta_min))
         parameters["Dimension"] = 2*dimension
         parameters["Width"] = 2.*width.in_units("kpc")
                 
         photons = photon_model(data_source, parameters)
-        
+
+        mylog.info("Finished generating photons.")
+
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
                         
@@ -316,6 +324,7 @@
         """
         Write the photons to the HDF5 file *photonfile*.
         """
+
         if parallel_capable:
             
             mpi_long = get_mpi_type("int64")
@@ -332,15 +341,15 @@
                 num_photons = sum(sizes_p)        
                 disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))]
                 disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))]
-                x = np.zeros((num_cells))
-                y = np.zeros((num_cells))
-                z = np.zeros((num_cells))
-                vx = np.zeros((num_cells))
-                vy = np.zeros((num_cells))
-                vz = np.zeros((num_cells))
-                dx = np.zeros((num_cells))
-                n_ph = np.zeros((num_cells), dtype="uint64")
-                e = np.zeros((num_photons))
+                x = np.zeros(num_cells)
+                y = np.zeros(num_cells)
+                z = np.zeros(num_cells)
+                vx = np.zeros(num_cells)
+                vy = np.zeros(num_cells)
+                vz = np.zeros(num_cells)
+                dx = np.zeros(num_cells)
+                n_ph = np.zeros(num_cells, dtype="uint64")
+                e = np.zeros(num_photons)
             else:
                 sizes_c = []
                 sizes_p = []
@@ -377,15 +386,15 @@
 
         else:
 
-            x = self.photons["x"].ndarray_view()
-            y = self.photons["y"].ndarray_view()
-            z = self.photons["z"].ndarray_view()
-            vx = self.photons["vx"].ndarray_view()
-            vy = self.photons["vy"].ndarray_view()
-            vz = self.photons["vz"].ndarray_view()
-            dx = self.photons["dx"].ndarray_view()
+            x = self.photons["x"].d
+            y = self.photons["y"].d
+            z = self.photons["z"].d
+            vx = self.photons["vx"].d
+            vy = self.photons["vy"].d
+            vz = self.photons["vz"].d
+            dx = self.photons["dx"].d
             n_ph = self.photons["NumberOfPhotons"]
-            e = self.photons["Energy"].ndarray_view()
+            e = self.photons["Energy"].d
                                                 
         if comm.rank == 0:
             
@@ -423,7 +432,7 @@
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
-                        convolve_energies=False):
+                        convolve_energies=False, no_shifting=False):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -454,6 +463,8 @@
             The names of the ARF and/or RMF files to convolve the photons with.
         convolve_energies : boolean, optional
             If this is set, the photon energies will be convolved with the RMF.
+        no_shifting : boolean, optional
+            If set, the photon energies will not be Doppler shifted.
             
         Examples
         --------
@@ -472,7 +483,7 @@
         else:
             sky_center = YTArray(sky_center, "degree")
 
-        dx = self.photons["dx"].ndarray_view()
+        dx = self.photons["dx"].d
         nx = self.parameters["Dimension"]
         if psf_sigma is not None:
              psf_sigma = parse_value(psf_sigma, "degree")
@@ -560,12 +571,13 @@
         x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
         y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
         z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-                    
-        vz = self.photons["vx"]*z_hat[0] + \
-             self.photons["vy"]*z_hat[1] + \
-             self.photons["vz"]*z_hat[2]
-        shift = -vz.in_cgs()/clight
-        shift = np.sqrt((1.-shift)/(1.+shift))
+
+        if not no_shifting:
+            vz = self.photons["vx"]*z_hat[0] + \
+                 self.photons["vy"]*z_hat[1] + \
+                 self.photons["vz"]*z_hat[2]
+            shift = -vz.in_cgs()/clight
+            shift = np.sqrt((1.-shift)/(1.+shift))
 
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
@@ -579,8 +591,11 @@
         z *= delta
         x += self.photons["x"][obs_cells]
         y += self.photons["y"][obs_cells]
-        z += self.photons["z"][obs_cells]  
-        eobs = self.photons["Energy"][idxs]*shift[obs_cells]
+        z += self.photons["z"][obs_cells]
+        if no_shifting:
+            eobs = self.photons["Energy"][idxs]
+        else:
+            eobs = self.photons["Energy"][idxs]*shift[obs_cells]
 
         xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
         ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]
@@ -611,7 +626,7 @@
         events = {}
 
         dx_min = self.parameters["Width"].value/self.parameters["Dimension"]
-        dtheta = np.rad2deg(dx_min/D_A.value)*units.degree
+        dtheta = YTQuantity(np.rad2deg(dx_min/D_A.value), "degree")
         
         events["xpix"] = xsky[detected]/dx_min + 0.5*(nx+1)
         events["ypix"] = ysky[detected]/dx_min + 0.5*(nx+1)
@@ -625,7 +640,7 @@
         
         num_events = len(events["xpix"])
             
-        if comm.rank == 0: mylog.info("Total number of observed photons: %d" % (num_events))
+        if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)
@@ -737,7 +752,7 @@
     
 class EventList(object) :
 
-    def __init__(self, events, parameters) :
+    def __init__(self, events, parameters):
 
         self.events = events
         self.parameters = parameters
@@ -749,8 +764,8 @@
         self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
         self.wcs.wcs.cunit = ["deg"]*2                                                
         x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
-        self.events["xsky"] = x*units.degree
-        self.events["ysky"] = y*units.degree
+        self.events["xsky"] = YTArray(x, "degree")
+        self.events["ysky"] = YTArray(y, "degree")
 
     def keys(self):
         return self.events.keys()
@@ -769,6 +784,51 @@
 
     def __repr__(self):
         return self.events.__repr__()
+
+    def __add__(self, other):
+        keys1 = self.parameters.keys()
+        keys2 = other.parameters.keys()
+        keys1.sort()
+        keys2.sort()
+        if keys1 != keys2:
+            raise RuntimeError("The two EventLists do not have the same parameters!")
+        for k1, k2 in zip(keys1, keys2):
+            v1 = self.parameters[k1]
+            v2 = other.parameters[k2]
+            if isinstance(v1, basestring) or isinstance(v2, basestring):
+                check_equal = v1 == v2
+            else:
+                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
+            if not check_equal:
+                raise RuntimeError("The values for the parameter '%s' in the two EventLists" % k1 +
+                                   " are not identical (%s vs. %s)!" % (v1, v2))
+        events = {}
+        for item1, item2 in zip(self.items(), other.items()):
+            k1, v1 = item1
+            k2, v2 = item2
+            events[k1] = uconcatenate([v1,v2])
+        return EventList(events, self.parameters)
+
+    def filter_events(self, region):
+        """                                                                                                                                 
+        Filter events using a ds9 region. Requires the pyregion package.                                                                    
+        Returns a new EventList.                                                                                                            
+        """
+        import pyregion
+        import os
+        if os.path.exists(region):
+            reg = pyregion.open(region)
+        else:
+            reg = pyregion.parse(region)
+        r = reg.as_imagecoord(header=self.wcs.to_header())
+        f = r.get_filter()
+        idxs = f.inside_x_y(self.events["xpix"], self.events["ypix"])
+        if idxs.sum() == 0:
+            raise RuntimeError("No events are inside this region!")
+        new_events = {}
+        for k, v in self.events.items():
+            new_events[k] = v[idxs]
+        return EventList(new_events, self.parameters)
    
     @classmethod
     def from_h5_file(cls, h5file):
@@ -780,10 +840,10 @@
         
         f = h5py.File(h5file, "r")
 
-        parameters["ExposureTime"] = f["/exp_time"].value*units.s
-        parameters["Area"] = f["/area"].value*units.cm*units.cm
+        parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
+        parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
         parameters["Redshift"] = f["/redshift"].value
-        parameters["AngularDiameterDistance"] = f["/d_a"].value*units.Mpc
+        parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
         if "rmf" in f:
             parameters["RMF"] = f["/rmf"].value
         if "arf" in f:
@@ -799,13 +859,13 @@
 
         events["xpix"] = f["/xpix"][:]
         events["ypix"] = f["/ypix"][:]
-        events["eobs"] = f["/eobs"][:]*units.keV
+        events["eobs"] = YTArray(f["/eobs"][:], "keV")
         if "pi" in f:
             events["PI"] = f["/pi"][:]
         if "pha" in f:
             events["PHA"] = f["/pha"][:]
-        parameters["sky_center"] = f["/sky_center"][:]*units.deg
-        parameters["dtheta"] = f["/dtheta"].value*units.deg
+        parameters["sky_center"] = YTArray(f["/sky_center"][:], "deg")
+        parameters["dtheta"] = YTQuantity(f["/dtheta"].value, "deg")
         parameters["pix_center"] = f["/pix_center"][:]
         
         f.close()
@@ -824,10 +884,10 @@
         events = {}
         parameters = {}
         
-        parameters["ExposureTime"] = tblhdu.header["EXPOSURE"]*units.s
-        parameters["Area"] = tblhdu.header["AREA"]*units.cm*units.cm
+        parameters["ExposureTime"] = YTQuantity(tblhdu.header["EXPOSURE"], "s")
+        parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2")
         parameters["Redshift"] = tblhdu.header["REDSHIFT"]
-        parameters["AngularDiameterDistance"] = tblhdu.header["D_A"]*units.Mpc
+        parameters["AngularDiameterDistance"] = YTQuantity(tblhdu.header["D_A"], "Mpc")
         if "RMF" in tblhdu.header:
             parameters["RMF"] = tblhdu["RMF"]
         if "ARF" in tblhdu.header:
@@ -840,12 +900,12 @@
             parameters["Telescope"] = tblhdu["TELESCOP"]
         if "INSTRUME" in tblhdu.header:
             parameters["Instrument"] = tblhdu["INSTRUME"]
-        parameters["sky_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]])*units.deg
+        parameters["sky_center"] = YTArray([tblhdu["TCRVL2"],tblhdu["TCRVL3"]], "deg")
         parameters["pix_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]])
-        parameters["dtheta"] = tblhdu["TCRVL3"]*units.deg
+        parameters["dtheta"] = YTQuantity(tblhdu["TCRVL3"], "deg")
         events["xpix"] = tblhdu.data.field("X")
         events["ypix"] = tblhdu.data.field("Y")
-        events["eobs"] = (tblhdu.data.field("ENERGY")/1000.)*units.keV # Convert to keV
+        events["eobs"] = YTArray(tblhdu.data.field("ENERGY")/1000., "keV")
         if "PI" in tblhdu.columns.names:
             events["PI"] = tblhdu.data.field("PI")
         if "PHA" in tblhdu.columns.names:
@@ -853,19 +913,6 @@
         
         return cls(events, parameters)
 
-    @classmethod
-    def join_events(cls, events1, events2):
-        """
-        Join two sets of events, *events1* and *events2*.
-        """
-        events = {}
-        for item1, item2 in zip(events1.items(), events2.items()):
-            k1, v1 = item1
-            k2, v2 = item2
-            events[k1] = np.concatenate([v1,v2])
-        
-        return cls(events, events1.parameters)
-                
     @parallel_root_only
     def write_fits_file(self, fitsfile, clobber=False):
         """
@@ -1152,7 +1199,7 @@
 
         if energy_bins:
             spectype = "energy"
-            espec = self.events["eobs"].ndarray_view()
+            espec = self.events["eobs"].d
             range = (emin, emax)
             spec, ee = np.histogram(espec, bins=nchan, range=range)
             bins = 0.5*(ee[1:]+ee[:-1])

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -14,7 +14,7 @@
 import numpy as np
 import os
 from yt.funcs import *
-from yt import units
+from yt.units.yt_array import YTQuantity
 import h5py
 
 try:
@@ -29,16 +29,17 @@
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
-hc = (hcgs*clight).in_units("keV*angstrom")
-cm3 = units.cm*units.cm*units.cm
+hc = (hcgs*clight).in_units("keV*angstrom").v
+cl = clight.v
+K = 1.0/np.sqrt(2.*np.pi)
 
 class SpectralModel(object):
 
     def __init__(self, emin, emax, nchan):
-        self.emin = emin*units.keV
-        self.emax = emax*units.keV
+        self.emin = YTQuantity(emin, "keV")
+        self.emax = YTQuantity(emax, "keV")
         self.nchan = nchan
-        self.ebins = np.linspace(emin, emax, nchan+1)*units.keV
+        self.ebins = np.linspace(self.emin, self.emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
         
@@ -68,8 +69,9 @@
     --------
     >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000)
     """
-    def __init__(self, model_name, emin, emax, nchan):
+    def __init__(self, model_name, emin, emax, nchan, thermal_broad=False):
         self.model_name = model_name
+        self.thermal_broad = thermal_broad
         SpectralModel.__init__(self, emin, emax, nchan)
         
     def prepare(self):
@@ -80,27 +82,29 @@
         xspec.AllModels.setEnergies("%f %f %d lin" %
                                     (self.emin.value, self.emax.value, self.nchan))
         self.model = xspec.Model(self.model_name)
+        self.thermal_comp = getattr(self.model,self.model_name)
         if self.model_name == "bremss":
             self.norm = 3.02e-15
         else:
             self.norm = 1.0e-14
-        
+        self.thermal_comp.norm = 1.0
+        self.thermal_comp.Redshift = 0.0
+        if self.thermal_broad:
+            xspec.Xset.addModelString("APECTHERMAL","yes")
+
     def get_spectrum(self, kT):
         """
         Get the thermal emission spectrum given a temperature *kT* in keV. 
         """
-        m = getattr(self.model,self.model_name)
-        m.kT = kT
-        m.Abundanc = 0.0
-        m.norm = 1.0
-        m.Redshift = 0.0
+        self.thermal_comp.kT = kT
+        self.thermal_comp.Abundanc = 0.0
         cosmic_spec = self.norm*np.array(self.model.values(0))
-        m.Abundanc = 1.0
         if self.model_name == "bremss":
-            metal_spec = np.zeros((self.nchan))
+            metal_spec = np.zeros(self.nchan)
         else:
+            self.thermal_comp.Abundanc = 1.0
             metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
-        return cosmic_spec*cm3/units.s, metal_spec*cm3/units.s
+        return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
         
 class XSpecAbsorbModel(SpectralModel):
     r"""
@@ -188,7 +192,7 @@
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
         SpectralModel.__init__(self, emin, emax, nchan)
-        self.wvbins = (hc/self.ebins[::-1]).ndarray_view()
+        self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
         # Non-trace metals
@@ -230,12 +234,12 @@
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
         vec = np.zeros(self.nchan)
-        E0 = hc.value/self.line_handle[tindex].data.field('lambda')[i]
+        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]
         amp = self.line_handle[tindex].data.field('epsilon')[i]
-        ebins = self.ebins.ndarray_view()
+        ebins = self.ebins.d
         if self.thermal_broad:
             vec = np.zeros(self.nchan)
-            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/clight.value
+            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/cl
             for E, sig, a in zip(E0, sigma, amp):
                 cdf = stats.norm(E,sig).cdf(ebins)
                 vec += np.diff(cdf)*a
@@ -255,13 +259,13 @@
         e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
         continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
 
-        tmpspec += np.interp(self.emid.ndarray_view(), e_cont, continuum)*self.de.ndarray_view()
+        tmpspec += np.interp(self.emid.d, e_cont, continuum)*self.de.d
         
         n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
         e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
         pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
         
-        tmpspec += np.interp(self.emid.ndarray_view(), e_pseudo, pseudo)*self.de.ndarray_view()
+        tmpspec += np.interp(self.emid.d, e_pseudo, pseudo)*self.de.d
         
         return tmpspec
 
@@ -275,7 +279,7 @@
         mspec_r = np.zeros(self.nchan)
         tindex = np.searchsorted(self.Tvals, kT)-1
         if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
-            return cspec_l*cm3/units.s, mspec_l*cm3/units.s
+            return YTArray(cspec_l, "cm**3/s"), YTArray(mspec_l, "cm**3/s")
         dT = (kT-self.Tvals[tindex])/self.dTvals[tindex]
         # First do H,He, and trace elements
         for elem in self.cosmic_elem:
@@ -285,8 +289,8 @@
         for elem in self.metal_elem:
             mspec_l += self._make_spectrum(elem, tindex+2)
             mspec_r += self._make_spectrum(elem, tindex+3)
-        cosmic_spec = (cspec_l*(1.-dT)+cspec_r*dT)*cm3/units.s
-        metal_spec = (mspec_l*(1.-dT)+mspec_r*dT)*cm3/units.s
+        cosmic_spec = YTArray(cspec_l*(1.-dT)+cspec_r*dT, "cm**3/s")
+        metal_spec = YTArray(mspec_l*(1.-dT)+mspec_r*dT, "cm**3/s")
         return cosmic_spec, metal_spec
 
 class TableAbsorbModel(SpectralModel):
@@ -313,11 +317,11 @@
         f = h5py.File(self.filename,"r")
         emin = f["energy"][:].min()
         emax = f["energy"][:].max()
-        self.sigma = f["cross_section"][:]*units.cm*units.cm
+        self.sigma = YTArray(f["cross_section"][:], "cm**2")
         nchan = self.sigma.shape[0]
         f.close()
         SpectralModel.__init__(self, emin, emax, nchan)
-        self.nH = nH*1.0e22/(units.cm*units.cm)
+        self.nH = YTQuantity(nH*1.0e22, "cm**-2")
         
     def prepare(self):
         """

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -26,8 +26,11 @@
 import ppv_utils
 from yt.funcs import is_root
 
-def create_vlos(normal):
-    if isinstance(normal, basestring):
+def create_vlos(normal, no_shifting):
+    if no_shifting:
+        def _v_los(field, data):
+            return data.ds.arr(data["zeros"], "cm/s")
+    elif isinstance(normal, basestring):
         def _v_los(field, data): 
             return -data["velocity_%s" % normal]
     else:
@@ -48,7 +51,7 @@
 class PPVCube(object):
     def __init__(self, ds, normal, field, center="c", width=(1.0,"unitary"),
                  dims=(100,100,100), velocity_bounds=None, thermal_broad=False,
-                 atomic_weight=56., method="integrate"):
+                 atomic_weight=56., method="integrate", no_shifting=False):
         r""" Initialize a PPVCube object.
 
         Parameters
@@ -88,6 +91,9 @@
             Set the projection method to be used.
             "integrate" : line of sight integration over the line element.
             "sum" : straight summation over the line of sight.
+        no_shifting : boolean, optional
+            If set, no shifting due to velocity will occur but only thermal broadening.
+            Should not be set when *thermal_broad* is False, otherwise nothing happens!
 
         Examples
         --------
@@ -102,6 +108,10 @@
         self.width = width
         self.particle_mass = atomic_weight*mh
         self.thermal_broad = thermal_broad
+        self.no_shifting = no_shifting
+
+        if no_shifting and not thermal_broad:
+            raise RuntimeError("no_shifting cannot be True when thermal_broad is False!")
 
         self.center = ds.coordinates.sanitize_center(center, normal)[0]
 
@@ -135,7 +145,7 @@
 
         self.current_v = 0.0
 
-        _vlos = create_vlos(normal)
+        _vlos = create_vlos(normal, self.no_shifting)
         self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s")
 
         _intensity = self.create_intensity()
@@ -287,7 +297,7 @@
         w.wcs.cunit = [units,units,vunit]
         w.wcs.ctype = [types[0],types[1],vtype]
 
-        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
+        fib = FITSImageBuffer(self.data.transpose(2,0,1), fields=self.field, wcs=w)
         fib[0].header["bunit"] = re.sub('()', '', str(self.proj_units))
         fib[0].header["btype"] = self.field
 

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -28,9 +28,13 @@
     mpc_conversion, sec_conversion
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
+from yt.geometry.geometry_handler import \
+    YTDataChunk
 
 from .fields import AthenaFieldInfo
 from yt.units.yt_array import YTQuantity
+from yt.utilities.decompose import \
+    decompose_array, get_psize
 
 def _get_convert(fname):
     def _conv(data):
@@ -39,7 +43,8 @@
 
 class AthenaGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, id, index, level, start, dimensions):
+    def __init__(self, id, index, level, start, dimensions,
+                 file_offset, read_dims):
         df = index.dataset.filename[4:-4]
         gname = index.grid_filenames[id]
         AMRGridPatch.__init__(self, id, filename = gname,
@@ -51,6 +56,8 @@
         self.start_index = start.copy()
         self.stop_index = self.start_index + dimensions
         self.ActiveDimensions = dimensions.copy()
+        self.file_offset = file_offset
+        self.read_dims = read_dims
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -172,7 +179,7 @@
         self._field_map = field_map
 
     def _count_grids(self):
-        self.num_grids = self.dataset.nvtk
+        self.num_grids = self.dataset.nvtk*self.dataset.nprocs
 
     def _parse_index(self):
         f = open(self.index_filename,'rb')
@@ -220,7 +227,6 @@
         gridlistread = [fn for fn in gridlistread if os.path.basename(fn).count(".") == ndots]
         self.num_grids = len(gridlistread)
         dxs=[]
-        self.grids = np.empty(self.num_grids, dtype='object')
         levels = np.zeros(self.num_grids, dtype='int32')
         glis = np.empty((self.num_grids,3), dtype='float64')
         gdds = np.empty((self.num_grids,3), dtype='float64')
@@ -292,24 +298,66 @@
             self.dataset.domain_dimensions[2] = np.int(1)
         if self.dataset.dimensionality == 1 :
             self.dataset.domain_dimensions[1] = np.int(1)
-        for i in range(levels.shape[0]):
-            self.grids[i] = self.grid(i,self,levels[i],
-                                      glis[i],
-                                      gdims[i])
-            dx = (self.dataset.domain_right_edge-
-                  self.dataset.domain_left_edge)/self.dataset.domain_dimensions
-            dx = dx/self.dataset.refine_by**(levels[i])
-            dxs.append(dx)
 
-        dx = self.ds.arr(dxs, "code_length")
         dle = self.dataset.domain_left_edge
         dre = self.dataset.domain_right_edge
-        self.grid_left_edge = self.ds.arr(np.round(dle + dx*glis, decimals=12), "code_length")
-        self.grid_dimensions = gdims.astype("int32")
-        self.grid_right_edge = self.ds.arr(np.round(self.grid_left_edge +
-                                                    dx*self.grid_dimensions,
-                                                    decimals=12),
-                                            "code_length")
+        dx_root = (self.dataset.domain_right_edge-
+                   self.dataset.domain_left_edge)/self.dataset.domain_dimensions
+
+        if self.dataset.nprocs > 1:
+            gle_all = []
+            gre_all = []
+            shapes_all = []
+            levels_all = []
+            new_gridfilenames = []
+            file_offsets = []
+            read_dims = []
+            for i in range(levels.shape[0]):
+                dx = dx_root/self.dataset.refine_by**(levels[i])
+                gle_orig = self.ds.arr(np.round(dle + dx*glis[i], decimals=12),
+                                       "code_length")
+                gre_orig = self.ds.arr(np.round(gle_orig + dx*gdims[i], decimals=12),
+                                       "code_length")
+                bbox = np.array([[le,re] for le, re in zip(gle_orig, gre_orig)])
+                psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs)
+                gle, gre, shapes, slices = decompose_array(gdims[i], psize, bbox)
+                gle_all += gle
+                gre_all += gre
+                shapes_all += shapes
+                levels_all += [levels[i]]*self.dataset.nprocs
+                new_gridfilenames += [self.grid_filenames[i]]*self.dataset.nprocs
+                file_offsets += [[slc[0].start, slc[1].start, slc[2].start] for slc in slices]
+                read_dims += [np.array([gdims[i][0], gdims[i][1], shape[2]], dtype="int") for shape in shapes]
+            self.num_grids *= self.dataset.nprocs
+            self.grids = np.empty(self.num_grids, dtype='object')
+            self.grid_filenames = new_gridfilenames
+            self.grid_left_edge = self.ds.arr(gle_all, "code_length")
+            self.grid_right_edge = self.ds.arr(gre_all, "code_length")
+            self.grid_dimensions = np.array([shape for shape in shapes_all],
+                                            dtype="int32")
+            gdds = (self.grid_right_edge-self.grid_left_edge)/self.grid_dimensions
+            glis = np.round((self.grid_left_edge - self.ds.domain_left_edge)/gdds).astype('int')
+            for i in range(self.num_grids):
+                self.grids[i] = self.grid(i,self,levels_all[i],
+                                          glis[i], shapes_all[i],
+                                          file_offsets[i], read_dims[i])
+        else:
+            self.grids = np.empty(self.num_grids, dtype='object')
+            for i in range(levels.shape[0]):
+                self.grids[i] = self.grid(i,self,levels[i],
+                                          glis[i], gdims[i], [0]*3,
+                                          gdims[i])
+                dx = dx_root/self.dataset.refine_by**(levels[i])
+                dxs.append(dx)
+
+            dx = self.ds.arr(dxs, "code_length")
+            self.grid_left_edge = self.ds.arr(np.round(dle + dx*glis, decimals=12),
+                                              "code_length")
+            self.grid_dimensions = gdims.astype("int32")
+            self.grid_right_edge = self.ds.arr(np.round(self.grid_left_edge +
+                                                        dx*self.grid_dimensions,
+                                                        decimals=12),
+                                               "code_length")
         
         if self.dataset.dimensionality <= 2:
             self.grid_right_edge[:,2] = dre[2]
@@ -347,6 +395,14 @@
         mask[grid_ind] = True
         return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
 
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        gfiles = defaultdict(list)
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in gobjs:
+            yield YTDataChunk(dobj, "io", [subset],
+                              self._count_selection(dobj, [subset]),
+                              cache = cache)
+
 class AthenaDataset(Dataset):
     _index_class = AthenaHierarchy
     _field_info_class = AthenaFieldInfo
@@ -354,8 +410,9 @@
 
     def __init__(self, filename, dataset_type='athena',
                  storage_filename=None, parameters=None,
-                 units_override=None):
+                 units_override=None, nprocs=1):
         self.fluid_types += ("athena",)
+        self.nprocs = nprocs
         if parameters is None:
             parameters = {}
         self.specified_parameters = parameters
@@ -435,6 +492,8 @@
             dimensionality = 1
         if dimensionality <= 2 : self.domain_dimensions[2] = np.int32(1)
         if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
+        if dimensionality != 3 and self.nprocs > 1:
+            raise RuntimeError("Virtual grids are only supported for 3D outputs!")
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self.parameter_filename.__hash__()

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -17,6 +17,9 @@
 import numpy as np
 from yt.funcs import mylog, defaultdict
 
+float_size = np.dtype(">f4").itemsize
+axis_list = ["_x","_y","_z"]
+
 class IOHandlerAthena(BaseIOHandler):
     _dataset_type = "athena"
     _offset_string = 'data:offsets=0'
@@ -33,40 +36,40 @@
 
     def _read_chunk_data(self,chunk,fields):
         data = {}
-        grids_by_file = defaultdict(list)
         if len(chunk.objs) == 0: return data
-        field_list = set(f[1] for f in fields)
         for grid in chunk.objs:
             if grid.filename is None:
                 continue
             f = open(grid.filename, "rb")
             data[grid.id] = {}
-            grid_ncells = np.prod(grid.ActiveDimensions)
             grid_dims = grid.ActiveDimensions
-            grid0_ncells = np.prod(grid.index.grid_dimensions[0,:])
+            read_dims = grid.read_dims
+            grid_ncells = np.int(np.prod(read_dims))
+            grid0_ncells = np.int(np.prod(grid.index.grids[0].read_dims))
             read_table_offset = get_read_table_offset(f)
-            for field in self.ds.field_list:
+            for field in fields:
                 dtype, offsetr = grid.index._field_map[field]
                 if grid_ncells != grid0_ncells:
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
                 if grid_ncells == grid0_ncells:
                     offset = offsetr
-                f.seek(read_table_offset+offset)
+                file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size
+                xread = slice(grid.file_offset[0],grid.file_offset[0]+grid_dims[0])
+                yread = slice(grid.file_offset[1],grid.file_offset[1]+grid_dims[1])
+                f.seek(read_table_offset+offset+file_offset)
                 if dtype == 'scalar':
+                    f.seek(read_table_offset+offset+file_offset)
                     v = np.fromfile(f, dtype='>f4',
-                                    count=grid_ncells).reshape(grid_dims,order='F')
+                                    count=grid_ncells).reshape(read_dims,order='F')
                 if dtype == 'vector':
+                    vec_offset = axis_list.index(field[-1][-2:])
+                    f.seek(read_table_offset+offset+3*file_offset)
                     v = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
-                if '_x' in field[-1]:
-                    v = v[0::3].reshape(grid_dims,order='F')
-                elif '_y' in field[-1]:
-                    v = v[1::3].reshape(grid_dims,order='F')
-                elif '_z' in field[-1]:
-                    v = v[2::3].reshape(grid_dims,order='F')
+                    v = v[vec_offset::3].reshape(read_dims,order='F')
                 if grid.ds.field_ordering == 1:
-                    data[grid.id][field] = v.T.astype("float64")
+                    data[grid.id][field] = v[xread,yread,:].T.astype("float64")
                 else:
-                    data[grid.id][field] = v.astype("float64")
+                    data[grid.id][field] = v[xread,yread,:].astype("float64")
             f.close()
         return data
     

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -20,6 +20,8 @@
     big_patch_amr, \
     data_dir_load
 from yt.frontends.athena.api import AthenaDataset
+from yt.config import ytcfg
+from yt.convenience import load
 
 _fields_cloud = ("scalar[0]", "density", "total_energy")
 
@@ -58,6 +60,31 @@
         test_stripping.__name__ = test.description
         yield test
 
+sloshing = "MHDSloshing/virgo_low_res.0054.vtk"
+
+uo_sloshing = {"length_unit":(1.0,"Mpc"),
+               "time_unit":(1.0,"Myr"),
+               "mass_unit":(1.0e14,"Msun")}
+
+ at requires_file(sloshing)
+def test_nprocs():
+    ytcfg["yt","skip_dataset_cache"] = "True"
+
+    ds1 = load(sloshing, units_override=uo_sloshing)
+    sp1 = ds1.sphere("c", (100.,"kpc"))
+    prj1 = ds1.proj("density",0)
+    ds2 = load(sloshing, units_override=uo_sloshing, nprocs=8)
+    sp2 = ds2.sphere("c", (100.,"kpc"))
+    prj2 = ds1.proj("density",0)
+
+    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
+    yield assert_allclose, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
+    for ax in "xyz":
+        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
+    yield assert_allclose, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
+    yield assert_equal, prj1["density"], prj2["density"]
+
+    ytcfg["yt","skip_dataset_cache"] = "False"
 
 @requires_file(cloud)
 def test_AthenaDataset():

diff -r 7e6c439c67cae08f7430f2b5a03896e6d6d0a512 -r 863eeb36693d8eb136d06ac9a9418e1e67a7a77f yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -94,6 +94,7 @@
     "arcmin": (np.pi/10800., dimensions.angle), # arcminutes
     "arcsec": (np.pi/648000., dimensions.angle), # arcseconds
     "mas": (np.pi/648000000., dimensions.angle), # millarcseconds
+    "hourangle": (np.pi/12., dimensions.angle), # hour angle
     "steradian": (1.0, dimensions.solid_angle),
 
     # misc

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list